use std::collections::HashMap;
use std::path::{Path, PathBuf};
use anyhow::{anyhow, Result};
use config::Config;
use etcetera::base_strategy::{choose_base_strategy, BaseStrategy};
use serde::Serialize;
pub const DEFAULT_CACHE_TTL_SECS: u64 = 604800;
#[derive(Clone)]
pub struct MonocleConfig {
pub data_dir: String,
pub asinfo_cache_ttl_secs: u64,
pub as2rel_cache_ttl_secs: u64,
pub rpki_cache_ttl_secs: u64,
pub pfx2as_cache_ttl_secs: u64,
pub rpki_rtr_host: Option<String>,
pub rpki_rtr_port: u16,
pub rpki_rtr_timeout_secs: u64,
pub rpki_rtr_no_fallback: bool,
}
const EMPTY_CONFIG: &str = r#"### monocle configuration file
### directory for cached data used by monocle
### defaults to $XDG_DATA_HOME/monocle or ~/.local/share/monocle
# data_dir = "~/.local/share/monocle"
### cache TTL settings (in seconds)
### all data sources default to 7 days (604800 seconds)
# asinfo_cache_ttl_secs = 604800
# as2rel_cache_ttl_secs = 604800
# rpki_cache_ttl_secs = 604800
# pfx2as_cache_ttl_secs = 604800
### RTR endpoint for ROA data (optional)
### If set, ROAs will be fetched via RTR protocol instead of Cloudflare JSON API
### ASPAs are always fetched from Cloudflare (RTR v1 doesn't support ASPA)
# rpki_rtr_host = "rtr.rpki.cloudflare.com"
# rpki_rtr_port = 8282
# rpki_rtr_timeout_secs = 10
### If true, error out instead of falling back to Cloudflare when RTR fails
# rpki_rtr_no_fallback = false
"#;
#[derive(Debug, Clone)]
struct MonoclePaths {
config_dir: PathBuf,
config_file: PathBuf,
data_dir: PathBuf,
cache_dir: PathBuf,
legacy_config_file: PathBuf, }
fn resolve_monocle_paths() -> Result<MonoclePaths> {
let strategy = choose_base_strategy()
.map_err(|e| anyhow!("Could not determine user directories: {}", e))?;
let config_dir = strategy.config_dir().join("monocle");
let config_file = config_dir.join("monocle.toml");
let data_dir = strategy.data_dir().join("monocle");
let cache_dir = strategy.cache_dir().join("monocle");
let legacy_config_file = strategy.home_dir().join(".monocle").join("monocle.toml");
Ok(MonoclePaths {
config_dir,
config_file,
data_dir,
cache_dir,
legacy_config_file,
})
}
fn ensure_default_config_file(paths: &MonoclePaths) -> Result<()> {
if paths.config_file.exists() {
return Ok(());
}
std::fs::create_dir_all(paths.config_dir.as_path()).map_err(|e| {
anyhow!(
"Unable to create config directory {}: {}",
paths.config_dir.display(),
e
)
})?;
let mut entries = std::fs::read_dir(paths.config_dir.as_path()).map_err(|e| {
anyhow!(
"Unable to read config directory {}: {}",
paths.config_dir.display(),
e
)
})?;
let config_dir_is_empty = match entries.next() {
None => true,
Some(Ok(_)) => false,
Some(Err(e)) => {
return Err(anyhow!(
"Unable to read config directory {}: {}",
paths.config_dir.display(),
e
));
}
};
if config_dir_is_empty && paths.legacy_config_file.exists() {
std::fs::copy(
paths.legacy_config_file.as_path(),
paths.config_file.as_path(),
)
.map_err(|e| {
anyhow!(
"Unable to migrate config file from {} to {}: {}",
paths.legacy_config_file.display(),
paths.config_file.display(),
e
)
})?;
return Ok(());
}
std::fs::write(paths.config_file.as_path(), EMPTY_CONFIG).map_err(|e| {
anyhow!(
"Unable to create config file {}: {}",
paths.config_file.display(),
e
)
})?;
Ok(())
}
impl Default for MonocleConfig {
fn default() -> Self {
let data_dir = resolve_monocle_paths()
.map(|paths| paths.data_dir.to_string_lossy().to_string())
.unwrap_or_else(|_| ".".to_string());
Self {
data_dir,
asinfo_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
as2rel_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
rpki_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
pfx2as_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
rpki_rtr_host: None,
rpki_rtr_port: 8282,
rpki_rtr_timeout_secs: 10,
rpki_rtr_no_fallback: false,
}
}
}
impl MonocleConfig {
pub fn new(path: &Option<String>) -> Result<MonocleConfig> {
let mut builder = Config::builder();
let paths = resolve_monocle_paths()?;
match path {
Some(p) => {
let path = Path::new(p.as_str());
if path.exists() {
let path_str = path
.to_str()
.ok_or_else(|| anyhow!("Could not convert path to string"))?;
builder = builder.add_source(config::File::with_name(path_str));
} else {
if let Some(parent) = path.parent() {
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent).map_err(|e| {
anyhow!(
"Unable to create config parent directory {}: {}",
parent.display(),
e
)
})?;
}
}
std::fs::write(p.as_str(), EMPTY_CONFIG)
.map_err(|e| anyhow!("Unable to create config file: {}", e))?;
}
}
None => {
ensure_default_config_file(&paths)?;
let p = paths.config_file.to_string_lossy().to_string();
builder = builder.add_source(config::File::with_name(p.as_str()));
}
}
builder = builder.add_source(config::Environment::with_prefix("MONOCLE"));
let settings = builder
.build()
.map_err(|e| anyhow!("Failed to build configuration: {}", e))?;
let config = settings
.try_deserialize::<HashMap<String, String>>()
.map_err(|e| anyhow!("Failed to deserialize configuration: {}", e))?;
let data_dir = match config.get("data_dir") {
Some(p) => {
let path = Path::new(p);
path.to_str()
.ok_or_else(|| anyhow!("Could not convert data_dir path to string"))?
.to_string()
}
None => {
let dir = paths.data_dir.to_string_lossy().to_string();
std::fs::create_dir_all(dir.as_str())
.map_err(|e| anyhow!("Unable to create data directory: {}", e))?;
dir
}
};
let asinfo_cache_ttl_secs = config
.get("asinfo_cache_ttl_secs")
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_CACHE_TTL_SECS);
let as2rel_cache_ttl_secs = config
.get("as2rel_cache_ttl_secs")
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_CACHE_TTL_SECS);
let rpki_cache_ttl_secs = config
.get("rpki_cache_ttl_secs")
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_CACHE_TTL_SECS);
let pfx2as_cache_ttl_secs = config
.get("pfx2as_cache_ttl_secs")
.and_then(|s| s.parse().ok())
.unwrap_or(DEFAULT_CACHE_TTL_SECS);
let rpki_rtr_host = config.get("rpki_rtr_host").cloned();
let rpki_rtr_port = config
.get("rpki_rtr_port")
.and_then(|s| s.parse().ok())
.unwrap_or(8282);
let rpki_rtr_timeout_secs = config
.get("rpki_rtr_timeout_secs")
.and_then(|s| s.parse().ok())
.unwrap_or(10);
let rpki_rtr_no_fallback = config
.get("rpki_rtr_no_fallback")
.map(|s| s.to_lowercase() == "true")
.unwrap_or(false);
Ok(MonocleConfig {
data_dir,
asinfo_cache_ttl_secs,
as2rel_cache_ttl_secs,
rpki_cache_ttl_secs,
pfx2as_cache_ttl_secs,
rpki_rtr_host,
rpki_rtr_port,
rpki_rtr_timeout_secs,
rpki_rtr_no_fallback,
})
}
pub fn sqlite_path(&self) -> String {
let data_dir = self.data_dir.trim_end_matches('/');
format!("{}/monocle-data.sqlite3", data_dir)
}
pub fn asinfo_cache_ttl(&self) -> std::time::Duration {
std::time::Duration::from_secs(self.asinfo_cache_ttl_secs)
}
pub fn as2rel_cache_ttl(&self) -> std::time::Duration {
std::time::Duration::from_secs(self.as2rel_cache_ttl_secs)
}
pub fn rpki_cache_ttl(&self) -> std::time::Duration {
std::time::Duration::from_secs(self.rpki_cache_ttl_secs)
}
pub fn pfx2as_cache_ttl(&self) -> std::time::Duration {
std::time::Duration::from_secs(self.pfx2as_cache_ttl_secs)
}
pub fn has_rtr_endpoint(&self) -> bool {
self.rpki_rtr_host.is_some()
}
pub fn rtr_endpoint(&self) -> Option<(String, u16)> {
self.rpki_rtr_host
.as_ref()
.map(|h| (h.clone(), self.rpki_rtr_port))
}
pub fn rtr_timeout(&self) -> std::time::Duration {
std::time::Duration::from_secs(self.rpki_rtr_timeout_secs)
}
pub fn summary(&self) -> String {
let mut lines = vec![
format!("Data Directory: {}", self.data_dir),
format!("SQLite Path: {}", self.sqlite_path()),
format!("ASInfo Cache TTL: {} seconds", self.asinfo_cache_ttl_secs),
format!("AS2Rel Cache TTL: {} seconds", self.as2rel_cache_ttl_secs),
format!("RPKI Cache TTL: {} seconds", self.rpki_cache_ttl_secs),
format!("Pfx2as Cache TTL: {} seconds", self.pfx2as_cache_ttl_secs),
];
if let Some((host, port)) = self.rtr_endpoint() {
lines.push(format!("RTR Endpoint: {}:{}", host, port));
}
let cache_dir = self.cache_dir();
if std::path::Path::new(&cache_dir).exists() {
lines.push(format!("Cache Directory: {}", cache_dir));
}
lines.join("\n")
}
pub fn config_file_path() -> String {
resolve_monocle_paths()
.map(|paths| paths.config_file.to_string_lossy().to_string())
.unwrap_or_else(|_| "~/.config/monocle/monocle.toml".to_string())
}
pub fn cache_dir(&self) -> String {
resolve_monocle_paths()
.map(|paths| paths.cache_dir.to_string_lossy().to_string())
.unwrap_or_else(|_| format!("{}/cache", self.data_dir.trim_end_matches('/')))
}
}
#[derive(Debug, Serialize, Clone)]
pub struct DataSourceInfo {
pub name: String,
pub description: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub record_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_updated: Option<String>,
pub status: DataSourceStatus,
pub is_stale: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub ttl_secs: Option<u64>,
}
#[derive(Debug, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum DataSourceStatus {
Ready,
Empty,
NotInitialized,
}
impl std::fmt::Display for DataSourceStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DataSourceStatus::Ready => write!(f, "ready"),
DataSourceStatus::Empty => write!(f, "empty"),
DataSourceStatus::NotInitialized => write!(f, "not initialized"),
}
}
}
#[derive(Debug, Serialize, Clone)]
pub struct SqliteDatabaseInfo {
pub path: String,
pub exists: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub size_bytes: Option<u64>,
pub schema_initialized: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub schema_version: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub asinfo_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub asinfo_last_updated: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub as2rel_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub as2rel_last_updated: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rpki_roa_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rpki_aspa_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rpki_last_updated: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pfx2as_count: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pfx2as_last_updated: Option<String>,
}
#[derive(Debug, Serialize, Clone)]
pub struct CacheSettings {
pub asinfo_ttl_secs: u64,
pub as2rel_ttl_secs: u64,
pub rpki_ttl_secs: u64,
pub pfx2as_ttl_secs: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DataSource {
Asinfo,
As2rel,
Rpki,
Pfx2as,
}
impl DataSource {
pub fn all() -> Vec<DataSource> {
vec![
DataSource::Asinfo,
DataSource::As2rel,
DataSource::Rpki,
DataSource::Pfx2as,
]
}
pub fn database_sources() -> Vec<DataSource> {
vec![DataSource::Asinfo, DataSource::As2rel, DataSource::Rpki]
}
#[allow(clippy::should_implement_trait)]
pub fn from_str(s: &str) -> Option<DataSource> {
match s.to_lowercase().as_str() {
"asinfo" => Some(DataSource::Asinfo),
"as2rel" => Some(DataSource::As2rel),
"rpki" => Some(DataSource::Rpki),
"pfx2as" => Some(DataSource::Pfx2as),
_ => None,
}
}
pub fn name(&self) -> &'static str {
match self {
DataSource::Asinfo => "asinfo",
DataSource::As2rel => "as2rel",
DataSource::Rpki => "rpki",
DataSource::Pfx2as => "pfx2as",
}
}
pub fn description(&self) -> &'static str {
match self {
DataSource::Asinfo => "AS information data (from BGPKIT)",
DataSource::As2rel => "AS-level relationship data (from BGPKIT)",
DataSource::Rpki => "RPKI ROAs and ASPAs (from Cloudflare)",
DataSource::Pfx2as => "Prefix-to-AS mappings (from BGPKIT)",
}
}
}
impl std::fmt::Display for DataSource {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.name())
}
}
#[cfg(feature = "lib")]
pub fn get_sqlite_info(config: &MonocleConfig) -> SqliteDatabaseInfo {
use crate::database::{MonocleDatabase, SchemaManager, SchemaStatus, SCHEMA_VERSION};
let sqlite_path = config.sqlite_path();
let sqlite_exists = Path::new(&sqlite_path).exists();
let sqlite_size = if sqlite_exists {
std::fs::metadata(&sqlite_path).ok().map(|m| m.len())
} else {
None
};
let (
schema_initialized,
schema_version,
asinfo_count,
asinfo_last_updated,
as2rel_count,
as2rel_last_updated,
rpki_roa_count,
rpki_aspa_count,
rpki_last_updated,
pfx2as_count,
pfx2as_last_updated,
) = if sqlite_exists {
match MonocleDatabase::open(&sqlite_path) {
Ok(db) => {
let conn = db.connection();
let manager = SchemaManager::new(conn);
let (initialized, version) = match manager.check_status() {
Ok(status) => match status {
SchemaStatus::Current => (true, Some(SCHEMA_VERSION)),
SchemaStatus::NeedsMigration { from, to: _ } => (true, Some(from)),
SchemaStatus::NotInitialized => (false, None),
SchemaStatus::Incompatible {
database_version,
required_version: _,
} => (true, Some(database_version)),
SchemaStatus::Corrupted => (false, None),
},
Err(_) => (false, None),
};
let (
asinfo,
asinfo_updated,
as2rel,
as2rel_updated,
rpki_roa,
rpki_aspa,
rpki_updated,
pfx2as,
pfx2as_updated,
) = if initialized {
let asinfo = Some(db.asinfo().core_count() as u64);
let asinfo_meta = db.asinfo().get_metadata().ok().flatten();
let asinfo_updated = asinfo_meta.map(|m| {
let datetime =
chrono::DateTime::from_timestamp(m.last_updated, 0).unwrap_or_default();
datetime.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
let as2rel = db.as2rel().count().ok();
let as2rel_meta = db.as2rel().get_meta().ok().flatten();
let as2rel_updated = as2rel_meta.map(|m| {
let datetime = chrono::DateTime::from_timestamp(m.last_updated as i64, 0)
.unwrap_or_default();
datetime.format("%Y-%m-%d %H:%M:%S UTC").to_string()
});
let rpki_roa = db.rpki().roa_count().ok();
let rpki_aspa = db.rpki().aspa_count().ok();
let rpki_meta = db.rpki().get_metadata().ok().flatten();
let rpki_updated =
rpki_meta.map(|m| m.updated_at.format("%Y-%m-%d %H:%M:%S UTC").to_string());
let pfx2as = db.pfx2as().record_count().ok();
let pfx2as_meta = db.pfx2as().get_metadata().ok().flatten();
let pfx2as_updated = pfx2as_meta
.map(|m| m.updated_at.format("%Y-%m-%d %H:%M:%S UTC").to_string());
(
asinfo,
asinfo_updated,
as2rel,
as2rel_updated,
rpki_roa,
rpki_aspa,
rpki_updated,
pfx2as,
pfx2as_updated,
)
} else {
(None, None, None, None, None, None, None, None, None)
};
(
initialized,
version,
asinfo,
asinfo_updated,
as2rel,
as2rel_updated,
rpki_roa,
rpki_aspa,
rpki_updated,
pfx2as,
pfx2as_updated,
)
}
Err(_) => (
false, None, None, None, None, None, None, None, None, None, None,
),
}
} else {
(
false, None, None, None, None, None, None, None, None, None, None,
)
};
SqliteDatabaseInfo {
path: sqlite_path,
exists: sqlite_exists,
size_bytes: sqlite_size,
schema_initialized,
schema_version,
asinfo_count,
asinfo_last_updated,
as2rel_count,
as2rel_last_updated,
rpki_roa_count,
rpki_aspa_count,
rpki_last_updated,
pfx2as_count,
pfx2as_last_updated,
}
}
pub fn get_cache_settings(config: &MonocleConfig) -> CacheSettings {
CacheSettings {
asinfo_ttl_secs: config.asinfo_cache_ttl_secs,
as2rel_ttl_secs: config.as2rel_cache_ttl_secs,
rpki_ttl_secs: config.rpki_cache_ttl_secs,
pfx2as_ttl_secs: config.pfx2as_cache_ttl_secs,
}
}
#[cfg(feature = "lib")]
pub fn get_data_source_info(config: &MonocleConfig) -> Vec<DataSourceInfo> {
use crate::database::MonocleDatabase;
use std::time::Duration;
let sqlite_info = get_sqlite_info(config);
let sqlite_path = config.sqlite_path();
let db = if sqlite_info.exists {
MonocleDatabase::open(&sqlite_path).ok()
} else {
None
};
let mut sources = Vec::new();
let asinfo_status = match sqlite_info.asinfo_count {
Some(count) if count > 0 => DataSourceStatus::Ready,
Some(_) => DataSourceStatus::Empty,
None => DataSourceStatus::NotInitialized,
};
let asinfo_ttl = Duration::from_secs(config.asinfo_cache_ttl_secs);
let asinfo_is_stale = db
.as_ref()
.map(|d| d.asinfo().needs_refresh(asinfo_ttl))
.unwrap_or(true);
sources.push(DataSourceInfo {
name: DataSource::Asinfo.name().to_string(),
description: DataSource::Asinfo.description().to_string(),
record_count: sqlite_info.asinfo_count,
last_updated: sqlite_info.asinfo_last_updated.clone(),
status: asinfo_status,
is_stale: asinfo_is_stale,
ttl_secs: Some(config.asinfo_cache_ttl_secs),
});
let as2rel_status = match sqlite_info.as2rel_count {
Some(count) if count > 0 => DataSourceStatus::Ready,
Some(_) => DataSourceStatus::Empty,
None => DataSourceStatus::NotInitialized,
};
let as2rel_ttl = Duration::from_secs(config.as2rel_cache_ttl_secs);
let as2rel_is_stale = db
.as_ref()
.map(|d| d.as2rel().needs_refresh(as2rel_ttl))
.unwrap_or(true);
sources.push(DataSourceInfo {
name: DataSource::As2rel.name().to_string(),
description: DataSource::As2rel.description().to_string(),
record_count: sqlite_info.as2rel_count,
last_updated: sqlite_info.as2rel_last_updated.clone(),
status: as2rel_status,
is_stale: as2rel_is_stale,
ttl_secs: Some(config.as2rel_cache_ttl_secs),
});
let rpki_total = match (sqlite_info.rpki_roa_count, sqlite_info.rpki_aspa_count) {
(Some(roa), Some(aspa)) => Some(roa + aspa),
(Some(roa), None) => Some(roa),
(None, Some(aspa)) => Some(aspa),
(None, None) => None,
};
let rpki_status = match rpki_total {
Some(count) if count > 0 => DataSourceStatus::Ready,
Some(_) => DataSourceStatus::Empty,
None => DataSourceStatus::NotInitialized,
};
let rpki_ttl = Duration::from_secs(config.rpki_cache_ttl_secs);
let rpki_is_stale = db
.as_ref()
.map(|d| d.rpki().needs_refresh(rpki_ttl))
.unwrap_or(true);
sources.push(DataSourceInfo {
name: DataSource::Rpki.name().to_string(),
description: DataSource::Rpki.description().to_string(),
record_count: rpki_total,
last_updated: sqlite_info.rpki_last_updated.clone(),
status: rpki_status,
is_stale: rpki_is_stale,
ttl_secs: Some(config.rpki_cache_ttl_secs),
});
let pfx2as_status = match sqlite_info.pfx2as_count {
Some(count) if count > 0 => DataSourceStatus::Ready,
Some(_) => DataSourceStatus::Empty,
None => DataSourceStatus::NotInitialized,
};
let pfx2as_ttl = Duration::from_secs(config.pfx2as_cache_ttl_secs);
let pfx2as_is_stale = db
.as_ref()
.map(|d| d.pfx2as().needs_refresh(pfx2as_ttl))
.unwrap_or(true);
sources.push(DataSourceInfo {
name: DataSource::Pfx2as.name().to_string(),
description: DataSource::Pfx2as.description().to_string(),
record_count: sqlite_info.pfx2as_count,
last_updated: sqlite_info.pfx2as_last_updated.clone(),
status: pfx2as_status,
is_stale: pfx2as_is_stale,
ttl_secs: Some(config.pfx2as_cache_ttl_secs),
});
sources
}
pub fn format_size(bytes: u64) -> String {
const KB: u64 = 1024;
const MB: u64 = KB * 1024;
const GB: u64 = MB * 1024;
if bytes >= GB {
format!("{:.2} GB", bytes as f64 / GB as f64)
} else if bytes >= MB {
format!("{:.2} MB", bytes as f64 / MB as f64)
} else if bytes >= KB {
format!("{:.2} KB", bytes as f64 / KB as f64)
} else {
format!("{} B", bytes)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_config() {
let config = MonocleConfig::default();
assert_eq!(config.asinfo_cache_ttl_secs, DEFAULT_CACHE_TTL_SECS); assert_eq!(config.as2rel_cache_ttl_secs, DEFAULT_CACHE_TTL_SECS); assert_eq!(config.rpki_cache_ttl_secs, DEFAULT_CACHE_TTL_SECS); assert_eq!(config.pfx2as_cache_ttl_secs, DEFAULT_CACHE_TTL_SECS); assert_eq!(config.rpki_rtr_host, None);
assert_eq!(config.rpki_rtr_port, 8282);
assert_eq!(config.rpki_rtr_timeout_secs, 10);
assert!(!config.rpki_rtr_no_fallback);
}
#[test]
fn test_paths() {
let config = MonocleConfig {
data_dir: "/test/dir".to_string(),
asinfo_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
as2rel_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
rpki_cache_ttl_secs: 3600,
pfx2as_cache_ttl_secs: 86400,
rpki_rtr_host: None,
rpki_rtr_port: 8282,
rpki_rtr_timeout_secs: 10,
rpki_rtr_no_fallback: false,
};
assert_eq!(config.sqlite_path(), "/test/dir/monocle-data.sqlite3");
let expected_cache_dir = resolve_monocle_paths()
.map(|paths| paths.cache_dir.to_string_lossy().to_string())
.unwrap_or_else(|_| "/test/dir/cache".to_string());
assert_eq!(config.cache_dir(), expected_cache_dir);
}
#[test]
fn test_ttl_durations() {
let config = MonocleConfig {
data_dir: "/test".to_string(),
asinfo_cache_ttl_secs: 1000,
as2rel_cache_ttl_secs: 2000,
rpki_cache_ttl_secs: 7200,
pfx2as_cache_ttl_secs: 3600,
rpki_rtr_host: None,
rpki_rtr_port: 8282,
rpki_rtr_timeout_secs: 10,
rpki_rtr_no_fallback: false,
};
assert_eq!(
config.asinfo_cache_ttl(),
std::time::Duration::from_secs(1000)
);
assert_eq!(
config.as2rel_cache_ttl(),
std::time::Duration::from_secs(2000)
);
assert_eq!(
config.rpki_cache_ttl(),
std::time::Duration::from_secs(7200)
);
assert_eq!(
config.pfx2as_cache_ttl(),
std::time::Duration::from_secs(3600)
);
}
#[test]
fn test_rtr_endpoint() {
let config = MonocleConfig::default();
assert!(!config.has_rtr_endpoint());
assert_eq!(config.rtr_endpoint(), None);
let config = MonocleConfig {
data_dir: "/test".to_string(),
asinfo_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
as2rel_cache_ttl_secs: DEFAULT_CACHE_TTL_SECS,
rpki_cache_ttl_secs: 3600,
pfx2as_cache_ttl_secs: 86400,
rpki_rtr_host: Some("rtr.example.com".to_string()),
rpki_rtr_port: 8282,
rpki_rtr_timeout_secs: 30,
rpki_rtr_no_fallback: false,
};
assert!(config.has_rtr_endpoint());
assert_eq!(
config.rtr_endpoint(),
Some(("rtr.example.com".to_string(), 8282))
);
assert_eq!(config.rtr_timeout(), std::time::Duration::from_secs(30));
}
#[test]
fn test_data_source_from_str() {
assert_eq!(DataSource::from_str("asinfo"), Some(DataSource::Asinfo));
assert_eq!(DataSource::from_str("AS2REL"), Some(DataSource::As2rel));
assert_eq!(DataSource::from_str("rpki"), Some(DataSource::Rpki));
assert_eq!(DataSource::from_str("pfx2as"), Some(DataSource::Pfx2as));
assert_eq!(DataSource::from_str("unknown"), None);
}
#[test]
fn test_format_size() {
assert_eq!(format_size(500), "500 B");
assert_eq!(format_size(1024), "1.00 KB");
assert_eq!(format_size(1536), "1.50 KB");
assert_eq!(format_size(1048576), "1.00 MB");
assert_eq!(format_size(1073741824), "1.00 GB");
}
#[test]
fn test_data_source_status_display() {
assert_eq!(format!("{}", DataSourceStatus::Ready), "ready");
assert_eq!(format!("{}", DataSourceStatus::Empty), "empty");
assert_eq!(
format!("{}", DataSourceStatus::NotInitialized),
"not initialized"
);
}
}