use std::path::PathBuf;
use serde::Deserialize;
use thiserror::Error;
#[derive(Debug, Clone, Deserialize)]
pub struct StorageConfig {
#[serde(default)]
pub backend: StorageBackend,
#[serde(default = "default_provider")]
pub default_provider: String,
#[serde(default)]
pub allow_local_in_production: bool,
#[serde(default)]
pub local: StorageLocalConfig,
#[serde(default)]
pub s3: StorageS3Config,
}
impl Default for StorageConfig {
fn default() -> Self {
Self {
backend: StorageBackend::default(),
default_provider: default_provider(),
allow_local_in_production: false,
local: StorageLocalConfig::default(),
s3: StorageS3Config::default(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Deserialize)]
#[serde(rename_all = "lowercase")]
#[non_exhaustive]
pub enum StorageBackend {
#[default]
Disabled,
Local,
S3,
}
impl StorageBackend {
#[must_use]
pub fn from_env_value(value: &str) -> Option<Self> {
match value.trim().to_ascii_lowercase().as_str() {
"disabled" | "off" | "none" => Some(Self::Disabled),
"local" => Some(Self::Local),
"s3" => Some(Self::S3),
_ => None,
}
}
}
#[derive(Debug, Clone, Deserialize)]
pub struct StorageLocalConfig {
#[serde(default = "default_local_root")]
pub root: PathBuf,
#[serde(default = "default_local_mount_path")]
pub mount_path: String,
#[serde(default = "default_local_url_expiry_secs")]
pub default_url_expiry_secs: u64,
#[serde(default)]
pub signing_key: Option<String>,
}
impl Default for StorageLocalConfig {
fn default() -> Self {
Self {
root: default_local_root(),
mount_path: default_local_mount_path(),
default_url_expiry_secs: default_local_url_expiry_secs(),
signing_key: None,
}
}
}
#[derive(Debug, Clone, Default, Deserialize)]
pub struct StorageS3Config {
#[serde(default)]
pub bucket: Option<String>,
#[serde(default)]
pub region: Option<String>,
#[serde(default)]
pub endpoint: Option<String>,
#[serde(default)]
pub public_base_url: Option<String>,
#[serde(default)]
pub access_key_id_env: Option<String>,
#[serde(default)]
pub secret_access_key_env: Option<String>,
#[serde(default)]
pub force_path_style: bool,
#[serde(default = "default_s3_url_expiry_secs")]
pub default_url_expiry_secs: u64,
}
fn default_provider() -> String {
"default".to_owned()
}
fn default_local_root() -> PathBuf {
PathBuf::from("target/blobs")
}
fn default_local_mount_path() -> String {
"/_blobs".to_owned()
}
const fn default_local_url_expiry_secs() -> u64 {
15 * 60
}
const fn default_s3_url_expiry_secs() -> u64 {
15 * 60
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum StorageBackendPlan {
Disabled,
Local {
provider_id: String,
root: PathBuf,
mount_path: String,
default_url_expiry_secs: u64,
warn_in_production: bool,
},
S3 {
provider_id: String,
bucket: String,
region: String,
endpoint: Option<String>,
public_base_url: Option<String>,
force_path_style: bool,
default_url_expiry_secs: u64,
},
}
#[derive(Debug, Error, PartialEq, Eq)]
#[non_exhaustive]
pub enum StorageBackendConfigError {
#[error(
"storage.backend=local in the prod profile is unsafe across replicas; \
set storage.allow_local_in_production=true to acknowledge"
)]
LocalInProduction,
#[error("storage.backend=s3 requires storage.s3.bucket")]
MissingS3Bucket,
#[error("storage.backend=s3 requires storage.s3.region")]
MissingS3Region,
}
impl StorageConfig {
pub fn backend_plan(
&self,
profile: Option<&str>,
) -> Result<StorageBackendPlan, StorageBackendConfigError> {
match self.backend {
StorageBackend::Disabled => Ok(StorageBackendPlan::Disabled),
StorageBackend::Local => {
if is_production_profile(profile) && !self.allow_local_in_production {
return Err(StorageBackendConfigError::LocalInProduction);
}
Ok(StorageBackendPlan::Local {
provider_id: self.default_provider.clone(),
root: self.local.root.clone(),
mount_path: self.local.mount_path.clone(),
default_url_expiry_secs: self.local.default_url_expiry_secs,
warn_in_production: is_production_profile(profile),
})
}
StorageBackend::S3 => {
let bucket = self
.s3
.bucket
.clone()
.filter(|b| !b.trim().is_empty())
.ok_or(StorageBackendConfigError::MissingS3Bucket)?;
let region = self
.s3
.region
.clone()
.filter(|r| !r.trim().is_empty())
.ok_or(StorageBackendConfigError::MissingS3Region)?;
Ok(StorageBackendPlan::S3 {
provider_id: self.default_provider.clone(),
bucket,
region,
endpoint: self.s3.endpoint.clone(),
public_base_url: self.s3.public_base_url.clone(),
force_path_style: self.s3.force_path_style,
default_url_expiry_secs: self.s3.default_url_expiry_secs,
})
}
}
}
}
fn is_production_profile(profile: Option<&str>) -> bool {
matches!(profile, Some("prod" | "production"))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn defaults() {
let cfg = StorageConfig::default();
assert_eq!(cfg.backend, StorageBackend::Disabled);
assert_eq!(cfg.default_provider, "default");
assert_eq!(cfg.local.root, PathBuf::from("target/blobs"));
assert_eq!(cfg.local.mount_path, "/_blobs");
}
#[test]
fn backend_from_env_value() {
assert_eq!(
StorageBackend::from_env_value("local"),
Some(StorageBackend::Local)
);
assert_eq!(
StorageBackend::from_env_value("S3"),
Some(StorageBackend::S3)
);
assert_eq!(
StorageBackend::from_env_value("disabled"),
Some(StorageBackend::Disabled)
);
assert_eq!(StorageBackend::from_env_value("memory"), None);
}
#[test]
fn disabled_plan() {
let cfg = StorageConfig::default();
assert_eq!(
cfg.backend_plan(Some("dev")),
Ok(StorageBackendPlan::Disabled)
);
}
#[test]
fn local_plan_in_dev() {
let cfg = StorageConfig {
backend: StorageBackend::Local,
..Default::default()
};
let plan = cfg.backend_plan(Some("dev")).unwrap();
match plan {
StorageBackendPlan::Local {
root,
mount_path,
warn_in_production,
..
} => {
assert_eq!(root, PathBuf::from("target/blobs"));
assert_eq!(mount_path, "/_blobs");
assert!(!warn_in_production);
}
other => panic!("expected Local plan, got {other:?}"),
}
}
#[test]
fn local_plan_rejects_prod_without_ack() {
let cfg = StorageConfig {
backend: StorageBackend::Local,
..Default::default()
};
assert_eq!(
cfg.backend_plan(Some("prod")),
Err(StorageBackendConfigError::LocalInProduction)
);
}
#[test]
fn local_plan_allows_prod_with_ack() {
let cfg = StorageConfig {
backend: StorageBackend::Local,
allow_local_in_production: true,
..Default::default()
};
let plan = cfg.backend_plan(Some("prod")).unwrap();
assert!(matches!(
plan,
StorageBackendPlan::Local {
warn_in_production: true,
..
}
));
}
#[test]
fn s3_plan_requires_bucket_and_region() {
let cfg = StorageConfig {
backend: StorageBackend::S3,
..Default::default()
};
assert_eq!(
cfg.backend_plan(Some("prod")),
Err(StorageBackendConfigError::MissingS3Bucket)
);
let cfg = StorageConfig {
backend: StorageBackend::S3,
s3: StorageS3Config {
bucket: Some("b".into()),
..Default::default()
},
..Default::default()
};
assert_eq!(
cfg.backend_plan(Some("prod")),
Err(StorageBackendConfigError::MissingS3Region)
);
}
#[test]
fn s3_plan_succeeds_with_bucket_and_region() {
let cfg = StorageConfig {
backend: StorageBackend::S3,
s3: StorageS3Config {
bucket: Some("b".into()),
region: Some("us-east-1".into()),
..Default::default()
},
..Default::default()
};
let plan = cfg.backend_plan(Some("prod")).unwrap();
assert!(matches!(plan, StorageBackendPlan::S3 { .. }));
}
}