use crate::{
config::DatasetConfig,
error::{FusekiError, FusekiResult},
server::AppState,
store::Store,
};
use axum::{
extract::{Path, Query, State},
http::StatusCode,
response::{Html, IntoResponse, Json, Response},
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Instant;
use tracing::{debug, info, instrument};
#[derive(Debug, Deserialize)]
pub struct CreateDatasetRequest {
pub name: String,
pub location: Option<String>,
pub read_only: Option<bool>,
pub description: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct DatasetInfo {
pub name: String,
pub location: String,
pub read_only: bool,
pub description: Option<String>,
pub created_at: String,
pub last_modified: String,
pub triple_count: u64,
pub size_bytes: u64,
pub services: Vec<ServiceInfo>,
}
#[derive(Debug, Serialize)]
pub struct ServiceInfo {
pub name: String,
pub endpoint: String,
pub service_type: String,
pub description: String,
}
#[derive(Debug, Serialize)]
pub struct ServerStats {
pub server_name: String,
pub version: String,
pub uptime_seconds: u64,
pub total_requests: u64,
pub datasets_count: usize,
pub memory_usage_mb: f64,
pub cpu_usage_percent: f64,
pub active_connections: u32,
pub last_updated: String,
}
#[derive(Debug, Deserialize)]
pub struct BackupParams {
pub format: Option<String>,
pub compress: Option<bool>,
pub include_metadata: Option<bool>,
}
#[derive(Debug, Deserialize)]
pub struct CompactParams {
pub force: Option<bool>,
}
#[instrument(skip(state))]
pub async fn ui_handler(State(state): State<Arc<AppState>>) -> Result<Html<String>, FusekiError> {
if !state.config.server.admin_ui {
return Err(FusekiError::not_found("Admin UI is disabled"));
}
let html_content = generate_admin_ui_html(&state).await?;
Ok(Html(html_content))
}
#[instrument(skip(state))]
pub async fn list_datasets(
State(state): State<Arc<AppState>>,
) -> Result<Json<Vec<DatasetInfo>>, FusekiError> {
let mut datasets = Vec::new();
for (name, config) in &state.config.datasets {
let dataset_info = get_dataset_info(name, config, &state.store).await?;
datasets.push(dataset_info);
}
info!("Listed {} datasets", datasets.len());
Ok(Json(datasets))
}
#[instrument(skip(state))]
pub async fn get_dataset(
State(state): State<Arc<AppState>>,
Path(dataset_name): Path<String>,
) -> Result<Json<DatasetInfo>, FusekiError> {
let config = state
.config
.datasets
.get(&dataset_name)
.ok_or_else(|| FusekiError::not_found(format!("Dataset '{dataset_name}' not found")))?;
let dataset_info = get_dataset_info(&dataset_name, config, &state.store).await?;
Ok(Json(dataset_info))
}
#[instrument(skip(state))]
pub async fn create_dataset(
State(state): State<Arc<AppState>>,
Json(request): Json<CreateDatasetRequest>,
) -> Result<Json<DatasetInfo>, FusekiError> {
validate_dataset_name(&request.name)?;
if state.config.datasets.contains_key(&request.name) {
return Err(FusekiError::conflict(format!(
"Dataset '{}' already exists",
request.name
)));
}
let dataset_config = DatasetConfig {
name: request.name.clone(),
location: request
.location
.unwrap_or_else(|| format!("/data/{}", request.name)),
read_only: request.read_only.unwrap_or(false),
text_index: None,
shacl_shapes: Vec::new(),
services: vec![
crate::config::ServiceConfig {
name: "query".to_string(),
service_type: crate::config::ServiceType::SparqlQuery,
endpoint: format!("/{}/sparql", request.name),
auth_required: false,
rate_limit: None,
},
crate::config::ServiceConfig {
name: "update".to_string(),
service_type: crate::config::ServiceType::SparqlUpdate,
endpoint: format!("/{}/update", request.name),
auth_required: false,
rate_limit: None,
},
],
access_control: None,
backup: None,
};
create_dataset_in_store(&state.store, &request.name, &dataset_config).await?;
let dataset_info = get_dataset_info(&request.name, &dataset_config, &state.store).await?;
info!("Created dataset: {}", request.name);
Ok(Json(dataset_info))
}
#[instrument(skip(state))]
pub async fn delete_dataset(
State(state): State<Arc<AppState>>,
Path(dataset_name): Path<String>,
) -> Result<StatusCode, FusekiError> {
if !state.config.datasets.contains_key(&dataset_name) {
return Err(FusekiError::not_found(format!(
"Dataset '{dataset_name}' not found"
)));
}
delete_dataset_from_store(&state.store, &dataset_name).await?;
info!("Deleted dataset: {}", dataset_name);
Ok(StatusCode::NO_CONTENT)
}
#[instrument(skip(state))]
pub async fn server_info(
State(state): State<Arc<AppState>>,
) -> Result<Json<serde_json::Value>, FusekiError> {
let mut info = HashMap::new();
info.insert("name", serde_json::json!("OxiRS Fuseki"));
info.insert("version", serde_json::json!(env!("CARGO_PKG_VERSION")));
info.insert(
"description",
serde_json::json!(
"SPARQL 1.1/1.2 HTTP protocol server with Fuseki-compatible configuration"
),
);
info.insert(
"built_at",
serde_json::json!(option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown")),
);
info.insert(
"features",
serde_json::json!({
"authentication": state.config.security.authentication.enabled,
"metrics": state.config.monitoring.metrics.enabled,
"admin_ui": state.config.server.admin_ui,
"cors": state.config.server.cors,
}),
);
info.insert(
"datasets_count",
serde_json::json!(state.config.datasets.len()),
);
if let Some(metrics_service) = &state.metrics_service {
let summary = metrics_service.get_summary().await;
info.insert("uptime_seconds", serde_json::json!(summary.uptime_seconds));
info.insert("requests_total", serde_json::json!(summary.requests_total));
info.insert("system_metrics", serde_json::json!(summary.system));
}
Ok(Json(serde_json::Value::Object(
info.into_iter().map(|(k, v)| (k.to_string(), v)).collect(),
)))
}
#[instrument(skip(state))]
pub async fn server_stats(
State(state): State<Arc<AppState>>,
) -> Result<Json<ServerStats>, FusekiError> {
let stats = if let Some(metrics_service) = &state.metrics_service {
let summary = metrics_service.get_summary().await;
ServerStats {
server_name: "OxiRS Fuseki".to_string(),
version: env!("CARGO_PKG_VERSION").to_string(),
uptime_seconds: summary.uptime_seconds,
total_requests: summary.requests_total,
datasets_count: state.config.datasets.len(),
memory_usage_mb: summary.system.memory_usage_bytes as f64 / 1024.0 / 1024.0,
cpu_usage_percent: summary.system.cpu_usage_percent,
active_connections: summary.active_connections as u32,
last_updated: chrono::Utc::now().to_rfc3339(),
}
} else {
ServerStats {
server_name: "OxiRS Fuseki".to_string(),
version: env!("CARGO_PKG_VERSION").to_string(),
uptime_seconds: 0,
total_requests: 0,
datasets_count: state.config.datasets.len(),
memory_usage_mb: 0.0,
cpu_usage_percent: 0.0,
active_connections: 0,
last_updated: chrono::Utc::now().to_rfc3339(),
}
};
Ok(Json(stats))
}
#[instrument(skip(state))]
pub async fn compact_dataset(
State(state): State<Arc<AppState>>,
Path(dataset_name): Path<String>,
Query(params): Query<CompactParams>,
) -> Result<Json<serde_json::Value>, FusekiError> {
if !state.config.datasets.contains_key(&dataset_name) {
return Err(FusekiError::not_found(format!(
"Dataset '{dataset_name}' not found"
)));
}
let start_time = Instant::now();
let result =
compact_dataset_in_store(&state.store, &dataset_name, params.force.unwrap_or(false))
.await?;
let execution_time = start_time.elapsed();
info!(
"Compacted dataset '{}' in {}ms",
dataset_name,
execution_time.as_millis()
);
Ok(Json(serde_json::json!({
"success": true,
"dataset": dataset_name,
"execution_time_ms": execution_time.as_millis(),
"size_before_bytes": result.size_before,
"size_after_bytes": result.size_after,
"space_saved_bytes": result.size_before - result.size_after,
"message": "Dataset compaction completed successfully"
})))
}
#[instrument(skip(state))]
pub async fn backup_dataset(
State(state): State<Arc<AppState>>,
Path(dataset_name): Path<String>,
Query(params): Query<BackupParams>,
) -> Result<Response, FusekiError> {
if !state.config.datasets.contains_key(&dataset_name) {
return Err(FusekiError::not_found(format!(
"Dataset '{dataset_name}' not found"
)));
}
let format = params.format.as_deref().unwrap_or("turtle");
let compress = params.compress.unwrap_or(false);
let include_metadata = params.include_metadata.unwrap_or(true);
let start_time = Instant::now();
let backup_data = create_dataset_backup(
&state.store,
&dataset_name,
format,
compress,
include_metadata,
)
.await?;
let execution_time = start_time.elapsed();
info!(
"Created backup for dataset '{}' in {}ms",
dataset_name,
execution_time.as_millis()
);
let (content_type, filename) = match format {
"turtle" => ("text/turtle", format!("{dataset_name}_backup.ttl")),
"ntriples" => ("application/n-triples", format!("{dataset_name}_backup.nt")),
"rdfxml" => ("application/rdf+xml", format!("{dataset_name}_backup.rdf")),
_ => ("text/turtle", format!("{dataset_name}_backup.ttl")),
};
let headers = [
("content-type", content_type),
(
"content-disposition",
&format!("attachment; filename=\"{filename}\""),
),
];
Ok((StatusCode::OK, headers, backup_data).into_response())
}
async fn generate_admin_ui_html(state: &AppState) -> FusekiResult<String> {
let datasets_count = state.config.datasets.len();
let html = format!(
r#"
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>OxiRS Fuseki Admin</title>
<style>
body {{ font-family: Arial, sans-serif; margin: 40px; background-color: #f5f5f5; }}
.container {{ max-width: 1200px; margin: 0 auto; background: white; padding: 30px; border-radius: 8px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }}
h1 {{ color: #333; border-bottom: 2px solid #007cba; padding-bottom: 10px; }}
.section {{ margin: 20px 0; padding: 20px; border-left: 4px solid #007cba; background: #f9f9f9; }}
.endpoint {{ background: #e8f4f8; padding: 10px; margin: 5px 0; border-radius: 4px; font-family: monospace; }}
.status {{ padding: 5px 10px; border-radius: 4px; color: white; }}
.status.enabled {{ background-color: #28a745; }}
.status.disabled {{ background-color: #dc3545; }}
a {{ color: #007cba; text-decoration: none; }}
a:hover {{ text-decoration: underline; }}
</style>
</head>
<body>
<div class="container">
<h1>🦀 OxiRS Fuseki Server</h1>
<div class="section">
<h2>Server Information</h2>
<p><strong>Version:</strong> {}</p>
<p><strong>Datasets:</strong> {}</p>
<p><strong>Authentication:</strong> <span class="status {}">{}</span></p>
<p><strong>Metrics:</strong> <span class="status {}">{}</span></p>
</div>
<div class="section">
<h2>API Endpoints</h2>
<div class="endpoint">GET <a href="/health">/health</a> - Health check</div>
<div class="endpoint">GET <a href="/$/server">/$/server</a> - Server information</div>
<div class="endpoint">GET <a href="/$/stats">/$/stats</a> - Server statistics</div>
<div class="endpoint">GET <a href="/$/datasets">/$/datasets</a> - List datasets</div>
<div class="endpoint">GET/POST <a href="/sparql">/sparql</a> - SPARQL Query endpoint</div>
<div class="endpoint">POST <a href="/update">/update</a> - SPARQL Update endpoint</div>
{}
</div>
<div class="section">
<h2>Quick Actions</h2>
<p><a href="/$/ping">Ping Server</a></p>
<p><a href="/metrics">View Metrics</a></p>
<p><a href="/health">Check Health</a></p>
</div>
</div>
</body>
</html>
"#,
env!("CARGO_PKG_VERSION"),
datasets_count,
if state.config.security.authentication.enabled {
"enabled"
} else {
"disabled"
},
if state.config.security.authentication.enabled {
"ENABLED"
} else {
"DISABLED"
},
if state.config.monitoring.metrics.enabled {
"enabled"
} else {
"disabled"
},
if state.config.monitoring.metrics.enabled {
"ENABLED"
} else {
"DISABLED"
},
if state.config.monitoring.metrics.enabled {
r#"<div class="endpoint">GET <a href="/metrics">/metrics</a> - Prometheus metrics</div>"#
} else {
""
}
);
Ok(html)
}
async fn get_dataset_info(
name: &str,
config: &DatasetConfig,
store: &Store,
) -> FusekiResult<DatasetInfo> {
let stats = get_dataset_stats_from_store(store, name).await?;
let services = config
.services
.iter()
.map(|service| ServiceInfo {
name: service.name.clone(),
endpoint: service.endpoint.clone(),
service_type: format!("{:?}", service.service_type),
description: format!("{:?} service", service.service_type),
})
.collect();
Ok(DatasetInfo {
name: name.to_string(),
location: config.location.clone(),
read_only: config.read_only,
description: None, created_at: chrono::Utc::now().to_rfc3339(), last_modified: chrono::Utc::now().to_rfc3339(), triple_count: stats.triple_count,
size_bytes: stats.size_bytes,
services,
})
}
fn validate_dataset_name(name: &str) -> FusekiResult<()> {
if name.is_empty() {
return Err(FusekiError::bad_request("Dataset name cannot be empty"));
}
if name.len() > 64 {
return Err(FusekiError::bad_request(
"Dataset name too long (max 64 characters)",
));
}
if !name
.chars()
.all(|c| c.is_alphanumeric() || c == '-' || c == '_')
{
return Err(FusekiError::bad_request(
"Dataset name contains invalid characters",
));
}
if name.starts_with('-') {
return Err(FusekiError::bad_request(
"Dataset name cannot start with dash",
));
}
Ok(())
}
struct DatasetStats {
triple_count: u64,
size_bytes: u64,
}
struct CompactionResult {
size_before: u64,
size_after: u64,
}
async fn get_dataset_stats_from_store(_store: &Store, _name: &str) -> FusekiResult<DatasetStats> {
Ok(DatasetStats {
triple_count: 1000, size_bytes: 50000, })
}
async fn create_dataset_in_store(
_store: &Store,
name: &str,
config: &DatasetConfig,
) -> FusekiResult<()> {
debug!(
"Creating dataset '{}' at location '{}'",
name, config.location
);
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
Ok(())
}
async fn delete_dataset_from_store(_store: &Store, name: &str) -> FusekiResult<()> {
debug!("Deleting dataset '{}'", name);
tokio::time::sleep(std::time::Duration::from_millis(5)).await;
Ok(())
}
async fn compact_dataset_in_store(
_store: &Store,
name: &str,
force: bool,
) -> FusekiResult<CompactionResult> {
debug!("Compacting dataset '{}' (force: {})", name, force);
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
Ok(CompactionResult {
size_before: 100000,
size_after: 75000,
})
}
async fn create_dataset_backup(
_store: &Store,
name: &str,
format: &str,
compress: bool,
include_metadata: bool,
) -> FusekiResult<String> {
debug!(
"Creating backup for dataset '{}' in format '{}' (compress: {}, metadata: {})",
name, format, compress, include_metadata
);
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
match format {
"turtle" => Ok("@prefix ex: <http://example.org/> .\nex:subject ex:predicate \"backup data\" .".to_string()),
"ntriples" => Ok("<http://example.org/subject> <http://example.org/predicate> \"backup data\" .".to_string()),
"rdfxml" => Ok("<?xml version=\"1.0\"?>\n<rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n</rdf:RDF>".to_string()),
_ => Ok("# Backup data\n".to_string()),
}
}
#[derive(Debug, Serialize)]
pub struct BackupFileInfo {
pub filename: String,
pub size: u64,
pub created: String,
pub dataset: Option<String>,
pub format: Option<String>,
pub compressed: bool,
}
#[derive(Debug, Serialize)]
pub struct BackupListResponse {
pub backups: Vec<BackupFileInfo>,
pub backup_directory: String,
pub count: usize,
pub total_size: u64,
}
#[instrument(skip(state))]
pub async fn list_backups(
State(state): State<Arc<AppState>>,
) -> Result<Json<BackupListResponse>, FusekiError> {
info!("Listing available backups");
let backup_dir = state
.config
.server
.backup_directory
.clone()
.unwrap_or_else(|| std::path::PathBuf::from("./backups"));
let mut backups = Vec::new();
let mut total_size = 0u64;
if backup_dir.exists() && backup_dir.is_dir() {
if let Ok(entries) = std::fs::read_dir(&backup_dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.is_file() {
if let Ok(metadata) = entry.metadata() {
let filename = path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
let size = metadata.len();
total_size += size;
let dataset = extract_dataset_from_filename(&filename);
let (format, compressed) = detect_backup_format(&filename);
let created = metadata
.created()
.or_else(|_| metadata.modified())
.map(|t| chrono::DateTime::<chrono::Utc>::from(t).to_rfc3339())
.unwrap_or_else(|_| chrono::Utc::now().to_rfc3339());
backups.push(BackupFileInfo {
filename,
size,
created,
dataset,
format,
compressed,
});
}
}
}
}
}
backups.sort_by(|a, b| b.created.cmp(&a.created));
let count = backups.len();
Ok(Json(BackupListResponse {
backups,
backup_directory: backup_dir.to_string_lossy().to_string(),
count,
total_size,
}))
}
fn extract_dataset_from_filename(filename: &str) -> Option<String> {
let name = filename
.strip_suffix(".gz")
.or(Some(filename))
.unwrap_or(filename);
let name = name
.strip_suffix(".nq")
.or_else(|| name.strip_suffix(".ttl"))
.or_else(|| name.strip_suffix(".nt"))
.or_else(|| name.strip_suffix(".rdf"))
.or_else(|| name.strip_suffix(".xml"))
.or_else(|| name.strip_suffix(".trig"))
.unwrap_or(name);
if let Some(idx) = name.find('_') {
let potential_name = &name[..idx];
if !potential_name.is_empty()
&& potential_name
.chars()
.all(|c| c.is_alphanumeric() || c == '-')
{
return Some(potential_name.to_string());
}
}
if let Some(idx) = name.find("-backup") {
let potential_name = &name[..idx];
if !potential_name.is_empty() {
return Some(potential_name.to_string());
}
}
None
}
fn detect_backup_format(filename: &str) -> (Option<String>, bool) {
let compressed =
filename.ends_with(".gz") || filename.ends_with(".zip") || filename.ends_with(".zst");
let name = if compressed {
filename
.strip_suffix(".gz")
.or_else(|| filename.strip_suffix(".zip"))
.or_else(|| filename.strip_suffix(".zst"))
.unwrap_or(filename)
} else {
filename
};
let format = if name.ends_with(".nq") || name.ends_with(".nquads") {
Some("N-Quads".to_string())
} else if name.ends_with(".nt") || name.ends_with(".ntriples") {
Some("N-Triples".to_string())
} else if name.ends_with(".ttl") || name.ends_with(".turtle") {
Some("Turtle".to_string())
} else if name.ends_with(".rdf") || name.ends_with(".xml") || name.ends_with(".rdfxml") {
Some("RDF/XML".to_string())
} else if name.ends_with(".trig") {
Some("TriG".to_string())
} else if name.ends_with(".jsonld") || name.ends_with(".json") {
Some("JSON-LD".to_string())
} else {
None
};
(format, compressed)
}
#[derive(Debug, Serialize)]
pub struct ReloadResponse {
pub success: bool,
pub message: String,
pub config_file: Option<String>,
pub changes: Vec<String>,
pub timestamp: String,
}
#[instrument(skip(state))]
pub async fn reload_config(
State(state): State<Arc<AppState>>,
) -> Result<Json<ReloadResponse>, FusekiError> {
info!("Configuration reload requested");
let config_file = state
.config
.server
.config_file
.clone()
.map(|p| p.to_string_lossy().to_string());
let config_path = match &state.config.server.config_file {
Some(path) => path.clone(),
None => {
return Ok(Json(ReloadResponse {
success: false,
message:
"No configuration file specified. Server was started without a config file."
.to_string(),
config_file: None,
changes: vec![],
timestamp: chrono::Utc::now().to_rfc3339(),
}));
}
};
if !config_path.exists() {
return Ok(Json(ReloadResponse {
success: false,
message: format!("Configuration file not found: {}", config_path.display()),
config_file,
changes: vec![],
timestamp: chrono::Utc::now().to_rfc3339(),
}));
}
let config_content = match std::fs::read_to_string(&config_path) {
Ok(content) => content,
Err(e) => {
return Ok(Json(ReloadResponse {
success: false,
message: format!("Failed to read configuration file: {}", e),
config_file,
changes: vec![],
timestamp: chrono::Utc::now().to_rfc3339(),
}));
}
};
let new_config: crate::config::ServerConfig = match toml::from_str(&config_content) {
Ok(config) => config,
Err(e) => {
return Ok(Json(ReloadResponse {
success: false,
message: format!("Failed to parse configuration: {}", e),
config_file,
changes: vec![],
timestamp: chrono::Utc::now().to_rfc3339(),
}));
}
};
let mut changes = Vec::new();
let current_datasets: std::collections::HashSet<_> = state.config.datasets.keys().collect();
let new_datasets: std::collections::HashSet<_> = new_config.datasets.keys().collect();
for name in new_datasets.difference(¤t_datasets) {
changes.push(format!("New dataset added: {}", name));
}
for name in current_datasets.difference(&new_datasets) {
changes.push(format!("Dataset removed: {}", name));
}
if state.config.server.port != new_config.server.port {
changes.push(format!(
"Port changed: {} -> {} (requires restart)",
state.config.server.port, new_config.server.port
));
}
if state.config.server.host != new_config.server.host {
changes.push(format!(
"Host changed: {} -> {} (requires restart)",
state.config.server.host, new_config.server.host
));
}
if state.config.security.authentication.enabled != new_config.security.authentication.enabled {
changes.push(format!(
"Authentication: {} -> {}",
if state.config.security.authentication.enabled {
"enabled"
} else {
"disabled"
},
if new_config.security.authentication.enabled {
"enabled"
} else {
"disabled"
}
));
}
if state.config.monitoring.metrics.enabled != new_config.monitoring.metrics.enabled {
changes.push(format!(
"Metrics: {} -> {}",
if state.config.monitoring.metrics.enabled {
"enabled"
} else {
"disabled"
},
if new_config.monitoring.metrics.enabled {
"enabled"
} else {
"disabled"
}
));
}
let message = if changes.is_empty() {
"Configuration reloaded. No changes detected.".to_string()
} else {
format!(
"Configuration parsed successfully. {} change(s) detected. Some changes may require a server restart to take effect.",
changes.len()
)
};
info!("Configuration reload completed: {}", message);
Ok(Json(ReloadResponse {
success: true,
message,
config_file,
changes,
timestamp: chrono::Utc::now().to_rfc3339(),
}))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dataset_name_validation() {
assert!(validate_dataset_name("test").is_ok());
assert!(validate_dataset_name("test-dataset").is_ok());
assert!(validate_dataset_name("test_dataset").is_ok());
assert!(validate_dataset_name("test123").is_ok());
assert!(validate_dataset_name("").is_err());
assert!(validate_dataset_name("-test").is_err());
assert!(validate_dataset_name("test/dataset").is_err());
assert!(validate_dataset_name("test dataset").is_err());
assert!(validate_dataset_name(&"x".repeat(65)).is_err());
}
#[test]
fn test_extract_dataset_from_filename() {
assert_eq!(
extract_dataset_from_filename("mydb_2024-01-15.nq.gz"),
Some("mydb".to_string())
);
assert_eq!(
extract_dataset_from_filename("test-data_2024-01-15T10-30-00.ttl"),
Some("test-data".to_string())
);
assert_eq!(
extract_dataset_from_filename("mydb-backup-20240115.nq"),
Some("mydb".to_string())
);
assert_eq!(extract_dataset_from_filename("random-file.txt"), None);
assert_eq!(extract_dataset_from_filename("nopattern.nq"), None);
}
#[test]
fn test_detect_backup_format() {
assert_eq!(
detect_backup_format("backup.nq"),
(Some("N-Quads".to_string()), false)
);
assert_eq!(
detect_backup_format("backup.nq.gz"),
(Some("N-Quads".to_string()), true)
);
assert_eq!(
detect_backup_format("backup.nt"),
(Some("N-Triples".to_string()), false)
);
assert_eq!(
detect_backup_format("backup.ttl"),
(Some("Turtle".to_string()), false)
);
assert_eq!(
detect_backup_format("backup.ttl.gz"),
(Some("Turtle".to_string()), true)
);
assert_eq!(
detect_backup_format("backup.rdf"),
(Some("RDF/XML".to_string()), false)
);
assert_eq!(
detect_backup_format("backup.trig"),
(Some("TriG".to_string()), false)
);
assert_eq!(
detect_backup_format("backup.jsonld"),
(Some("JSON-LD".to_string()), false)
);
assert_eq!(detect_backup_format("backup.xyz"), (None, false));
assert_eq!(
detect_backup_format("backup.nq.zip"),
(Some("N-Quads".to_string()), true)
);
assert_eq!(
detect_backup_format("backup.ttl.zst"),
(Some("Turtle".to_string()), true)
);
}
#[test]
fn test_backup_file_info_serialization() {
let info = BackupFileInfo {
filename: "test_2024-01-15.nq.gz".to_string(),
size: 1024,
created: "2024-01-15T10:30:00Z".to_string(),
dataset: Some("test".to_string()),
format: Some("N-Quads".to_string()),
compressed: true,
};
let json = serde_json::to_string(&info).unwrap();
assert!(json.contains("\"filename\":\"test_2024-01-15.nq.gz\""));
assert!(json.contains("\"size\":1024"));
assert!(json.contains("\"compressed\":true"));
}
#[test]
fn test_backup_list_response_serialization() {
let response = BackupListResponse {
backups: vec![],
backup_directory: "./backups".to_string(),
count: 0,
total_size: 0,
};
let json = serde_json::to_string(&response).unwrap();
assert!(json.contains("\"backup_directory\":\"./backups\""));
assert!(json.contains("\"count\":0"));
}
#[test]
fn test_reload_response_serialization() {
let response = ReloadResponse {
success: true,
message: "Configuration reloaded".to_string(),
config_file: Some("/etc/oxirs/config.toml".to_string()),
changes: vec!["Port changed: 3030 -> 3031".to_string()],
timestamp: "2024-01-15T10:30:00Z".to_string(),
};
let json = serde_json::to_string(&response).unwrap();
assert!(json.contains("\"success\":true"));
assert!(json.contains("\"message\":\"Configuration reloaded\""));
assert!(json.contains("Port changed"));
}
}