use crate::util::fs::{self, File};
use directories::ProjectDirs;
use regex::Regex;
use serde::de::{Deserialize, DeserializeOwned, Deserializer};
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
use serde::ser::{Serialize, Serializer};
use std::collections::HashMap;
use std::env;
use std::fmt;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::result::Result as StdResult;
use std::str::FromStr;
use std::sync::Mutex;
use crate::errors::*;
lazy_static! {
static ref CACHED_CONFIG_PATH: PathBuf = CachedConfig::file_config_path();
static ref CACHED_CONFIG: Mutex<Option<CachedFileConfig>> = Mutex::new(None);
}
const ORGANIZATION: &str = "Parity";
const APP_NAME: &str = "cachepot";
const DIST_APP_NAME: &str = "cachepot-dist-client";
const TEN_GIGS: u64 = 10 * 1024 * 1024 * 1024;
const MOZILLA_OAUTH_PKCE_CLIENT_ID: &str = "F1VVD6nRTckSVrviMRaOdLBWIk1AvHYo";
const MOZILLA_OAUTH_PKCE_AUTH_URL: &str =
"https://auth.mozilla.auth0.com/authorize?audience=cachepot&scope=openid%20profile";
const MOZILLA_OAUTH_PKCE_TOKEN_URL: &str = "https://auth.mozilla.auth0.com/oauth/token";
pub const INSECURE_DIST_CLIENT_TOKEN: &str = "dangerously_insecure_client";
pub fn default_disk_cache_dir() -> PathBuf {
ProjectDirs::from("", ORGANIZATION, APP_NAME)
.expect("Unable to retrieve disk cache directory")
.cache_dir()
.to_owned()
}
pub fn default_dist_cache_dir() -> PathBuf {
ProjectDirs::from("", ORGANIZATION, DIST_APP_NAME)
.expect("Unable to retrieve dist cache directory")
.cache_dir()
.to_owned()
}
fn default_disk_cache_size() -> u64 {
TEN_GIGS
}
fn default_toolchain_cache_size() -> u64 {
TEN_GIGS
}
pub fn parse_size(val: &str) -> Option<u64> {
let re = Regex::new(r"^(\d+)([KMGT])$").expect("Fixed regex parse failure");
re.captures(val)
.and_then(|caps| {
caps.get(1)
.and_then(|size| u64::from_str(size.as_str()).ok())
.map(|size| (size, caps.get(2)))
})
.and_then(|(size, suffix)| match suffix.map(|s| s.as_str()) {
Some("K") => Some(1024 * size),
Some("M") => Some(1024 * 1024 * size),
Some("G") => Some(1024 * 1024 * 1024 * size),
Some("T") => Some(1024 * 1024 * 1024 * 1024 * size),
_ => None,
})
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct HTTPUrl(reqwest::Url);
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl Serialize for HTTPUrl {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.0.as_str())
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl<'a> Deserialize<'a> for HTTPUrl {
fn deserialize<D>(deserializer: D) -> StdResult<Self, D::Error>
where
D: Deserializer<'a>,
{
use serde::de::Error;
let helper: String = Deserialize::deserialize(deserializer)?;
let url = parse_http_url(&helper).map_err(D::Error::custom)?;
Ok(HTTPUrl(url))
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
fn parse_http_url(url: &str) -> Result<reqwest::Url> {
let url = reqwest::Url::parse(url)?;
if url.scheme() != "http" && url.scheme() != "https" {
bail!("url not http or https")
}
if url.path() != "/" {
bail!("url has a relative path (currently unsupported)")
}
Ok(url)
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl HTTPUrl {
pub fn from_url(u: reqwest::Url) -> Self {
HTTPUrl(u)
}
pub fn to_url(&self) -> &reqwest::Url {
&self.0
}
pub fn host(&self) -> url::Host<&str> {
self.0.host().expect("HTTPUrl always has a valid host; qed")
}
pub fn host_str(&self) -> &str {
self.0
.host_str()
.expect("HTTPUrl always has a valid host; qed")
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl FromStr for HTTPUrl {
type Err = anyhow::Error;
fn from_str(s: &str) -> ::std::result::Result<Self, Self::Err> {
let url = parse_http_url(s)?;
if !url.has_host() {
bail!("HTTPUrl should have a host");
}
Ok(HTTPUrl(url))
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl fmt::Display for HTTPUrl {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct WorkerUrl(pub HTTPUrl);
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl Serialize for WorkerUrl {
fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
where
S: Serializer,
{
let host = self.0.host_str();
let helper = if let Some(port) = self.0 .0.port() {
format!("{}:{}", host, port)
} else {
format!("{}", host)
};
serializer.serialize_str(&helper)
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl<'a> Deserialize<'a> for WorkerUrl {
fn deserialize<D>(deserializer: D) -> StdResult<Self, D::Error>
where
D: Deserializer<'a>,
{
use serde::de::Error;
let helper: String = Deserialize::deserialize(deserializer)?;
let helper = format!("https://{}", helper);
let url = parse_http_url(&helper).map_err(D::Error::custom)?;
Ok(WorkerUrl(HTTPUrl(url)))
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl FromStr for WorkerUrl {
type Err = anyhow::Error;
fn from_str(s: &str) -> ::std::result::Result<Self, Self::Err> {
let helper = format!("https://{}", s);
Ok(WorkerUrl(HTTPUrl::from_str(&helper)?))
}
}
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
impl fmt::Display for WorkerUrl {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
self.0 .0.host().expect("HTTPUrl always has a host; qed")
)?;
if let Some(port) = self.0 .0.port() {
write!(f, ":{}", port)
} else {
Ok(())
}
}
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct AzureCacheConfig;
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
#[serde(default)]
pub struct DiskCacheConfig {
pub dir: PathBuf,
pub size: u64,
}
impl Default for DiskCacheConfig {
fn default() -> Self {
DiskCacheConfig {
dir: default_disk_cache_dir(),
size: default_disk_cache_size(),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub enum GCSCacheRWMode {
#[serde(rename = "READ_ONLY")]
ReadOnly,
#[serde(rename = "READ_WRITE")]
ReadWrite,
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct GCSCacheConfig {
pub bucket: String,
pub cred_path: Option<PathBuf>,
pub url: Option<String>,
pub rw_mode: GCSCacheRWMode,
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct MemcachedCacheConfig {
pub url: String,
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct RedisCacheConfig {
pub url: String,
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct S3CacheConfig {
pub bucket: String,
#[serde(default)]
pub endpoint: Option<String>,
#[serde(default)]
pub key_prefix: Option<String>,
#[serde(default)]
pub region: Option<String>,
pub public: bool,
}
#[derive(Debug, PartialEq, Eq)]
pub enum CacheType {
Azure(AzureCacheConfig),
GCS(GCSCacheConfig),
Memcached(MemcachedCacheConfig),
Redis(RedisCacheConfig),
S3(S3CacheConfig),
}
#[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct CacheConfigs {
pub azure: Option<AzureCacheConfig>,
pub disk: Option<DiskCacheConfig>,
pub gcs: Option<GCSCacheConfig>,
pub memcached: Option<MemcachedCacheConfig>,
pub redis: Option<RedisCacheConfig>,
pub s3: Option<S3CacheConfig>,
}
impl CacheConfigs {
fn into_vec_and_fallback(self) -> (Vec<CacheType>, DiskCacheConfig) {
let CacheConfigs {
azure,
disk,
gcs,
memcached,
redis,
s3,
} = self;
let caches = s3
.map(CacheType::S3)
.into_iter()
.chain(redis.map(CacheType::Redis))
.chain(memcached.map(CacheType::Memcached))
.chain(gcs.map(CacheType::GCS))
.chain(azure.map(CacheType::Azure))
.collect();
let fallback = disk.unwrap_or_default();
(caches, fallback)
}
fn merge(&mut self, other: Self) {
let CacheConfigs {
azure,
disk,
gcs,
memcached,
redis,
s3,
} = other;
if azure.is_some() {
self.azure = azure
}
if disk.is_some() {
self.disk = disk
}
if gcs.is_some() {
self.gcs = gcs
}
if memcached.is_some() {
self.memcached = memcached
}
if redis.is_some() {
self.redis = redis
}
if s3.is_some() {
self.s3 = s3
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
#[serde(tag = "type")]
pub enum DistToolchainConfig {
#[serde(rename = "no_dist")]
NoDist { compiler_executable: PathBuf },
#[serde(rename = "path_override")]
PathOverride {
compiler_executable: PathBuf,
archive: PathBuf,
archive_compiler_executable: String,
},
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
#[serde(tag = "type")]
pub enum DistAuth {
#[serde(rename = "token")]
Token { token: String },
#[serde(rename = "oauth2_code_grant_pkce")]
Oauth2CodeGrantPKCE {
client_id: String,
auth_url: String,
token_url: String,
},
#[serde(rename = "oauth2_implicit")]
Oauth2Implicit { client_id: String, auth_url: String },
}
impl<'a> Deserialize<'a> for DistAuth {
fn deserialize<D>(deserializer: D) -> StdResult<Self, D::Error>
where
D: Deserializer<'a>,
{
#[derive(Deserialize)]
#[serde(deny_unknown_fields)]
#[serde(tag = "type")]
pub enum Helper {
#[serde(rename = "token")]
Token { token: String },
#[serde(rename = "mozilla")]
Mozilla,
#[serde(rename = "oauth2_code_grant_pkce")]
Oauth2CodeGrantPKCE {
client_id: String,
auth_url: String,
token_url: String,
},
#[serde(rename = "oauth2_implicit")]
Oauth2Implicit { client_id: String, auth_url: String },
}
let helper: Helper = Deserialize::deserialize(deserializer)?;
Ok(match helper {
Helper::Token { token } => DistAuth::Token { token },
Helper::Mozilla => DistAuth::Oauth2CodeGrantPKCE {
client_id: MOZILLA_OAUTH_PKCE_CLIENT_ID.to_owned(),
auth_url: MOZILLA_OAUTH_PKCE_AUTH_URL.to_owned(),
token_url: MOZILLA_OAUTH_PKCE_TOKEN_URL.to_owned(),
},
Helper::Oauth2CodeGrantPKCE {
client_id,
auth_url,
token_url,
} => DistAuth::Oauth2CodeGrantPKCE {
client_id,
auth_url,
token_url,
},
Helper::Oauth2Implicit {
client_id,
auth_url,
} => DistAuth::Oauth2Implicit {
client_id,
auth_url,
},
})
}
}
impl Default for DistAuth {
fn default() -> Self {
DistAuth::Token {
token: INSECURE_DIST_CLIENT_TOKEN.to_owned(),
}
}
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct DistConfig {
pub auth: DistAuth,
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
pub scheduler_url: Option<HTTPUrl>,
#[cfg(not(any(feature = "dist-client", feature = "dist-worker")))]
pub scheduler_url: Option<String>,
pub cache_dir: PathBuf,
pub toolchains: Vec<DistToolchainConfig>,
pub toolchain_cache_size: u64,
pub rewrite_includes_only: bool,
}
impl Default for DistConfig {
fn default() -> Self {
Self {
auth: Default::default(),
scheduler_url: Default::default(),
cache_dir: default_dist_cache_dir(),
toolchains: Default::default(),
toolchain_cache_size: default_toolchain_cache_size(),
rewrite_includes_only: false,
}
}
}
#[derive(Debug, Default, Serialize, Deserialize, Eq, PartialEq)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct FileConfig {
pub cache: CacheConfigs,
pub dist: DistConfig,
}
pub fn try_read_config_file<T: DeserializeOwned>(path: &Path) -> Result<Option<T>> {
debug!("Attempting to read config file at {:?}", path);
let mut file = match File::open(path) {
Ok(f) => f,
Err(e) => {
debug!("Couldn't open config file: {}", e);
return Ok(None);
}
};
let mut string = String::new();
match file.read_to_string(&mut string) {
Ok(_) => (),
Err(e) => {
warn!("Failed to read config file: {}", e);
return Ok(None);
}
}
let res = if path.extension().map_or(false, |e| e == "json") {
serde_json::from_str(&string)
.with_context(|| format!("Failed to load json config file from {}", path.display()))?
} else {
toml::from_str(&string)
.with_context(|| format!("Failed to load toml config file from {}", path.display()))?
};
Ok(Some(res))
}
#[derive(Debug)]
pub struct EnvConfig {
cache: CacheConfigs,
}
fn config_from_env() -> EnvConfig {
let s3 = env::var("CACHEPOT_BUCKET").ok().map(|bucket| {
let endpoint = env::var("CACHEPOT_ENDPOINT").ok();
let region = env::var("CACHEPOT_REGION").ok();
let key_prefix = env::var("CACHEPOT_S3_KEY_PREFIX")
.ok()
.as_ref()
.map(|s| s.trim_end_matches('/'))
.filter(|s| !s.is_empty())
.map(|s| s.to_owned() + "/");
let public = env::var("SCCACHE_S3_PUBLIC").ok().is_some();
S3CacheConfig {
bucket,
endpoint,
key_prefix,
region,
public,
}
});
let redis = env::var("CACHEPOT_REDIS")
.ok()
.map(|url| RedisCacheConfig { url });
let memcached = env::var("CACHEPOT_MEMCACHED")
.ok()
.map(|url| MemcachedCacheConfig { url });
let gcs = env::var("CACHEPOT_GCS_BUCKET").ok().map(|bucket| {
let url = env::var("CACHEPOT_GCS_CREDENTIALS_URL").ok();
let cred_path = env::var_os("CACHEPOT_GCS_KEY_PATH").map(PathBuf::from);
if url.is_some() && cred_path.is_some() {
warn!("Both CACHEPOT_GCS_CREDENTIALS_URL and CACHEPOT_GCS_KEY_PATH are set");
warn!("You should set only one of them!");
warn!("CACHEPOT_GCS_KEY_PATH will take precedence");
}
let rw_mode = match env::var("CACHEPOT_GCS_RW_MODE")
.as_ref()
.map(String::as_str)
{
Ok("READ_ONLY") => GCSCacheRWMode::ReadOnly,
Ok("READ_WRITE") => GCSCacheRWMode::ReadWrite,
Ok(_) => {
warn!("Invalid CACHEPOT_GCS_RW_MODE-- defaulting to READ_ONLY.");
GCSCacheRWMode::ReadOnly
}
_ => {
warn!("No CACHEPOT_GCS_RW_MODE specified-- defaulting to READ_ONLY.");
GCSCacheRWMode::ReadOnly
}
};
GCSCacheConfig {
bucket,
cred_path,
url,
rw_mode,
}
});
let azure = env::var("CACHEPOT_AZURE_CONNECTION_STRING")
.ok()
.map(|_| AzureCacheConfig);
let disk_dir = env::var_os("CACHEPOT_DIR").map(PathBuf::from);
let disk_sz = env::var("CACHEPOT_CACHE_SIZE")
.ok()
.and_then(|v| parse_size(&v));
let disk = if disk_dir.is_some() || disk_sz.is_some() {
Some(DiskCacheConfig {
dir: disk_dir.unwrap_or_else(default_disk_cache_dir),
size: disk_sz.unwrap_or_else(default_disk_cache_size),
})
} else {
None
};
let cache = CacheConfigs {
azure,
disk,
gcs,
memcached,
redis,
s3,
};
EnvConfig { cache }
}
fn config_file(env_var: &str, leaf: &str) -> PathBuf {
if let Some(env_value) = env::var_os(env_var) {
return env_value.into();
}
let dirs =
ProjectDirs::from("", ORGANIZATION, APP_NAME).expect("Unable to get config directory");
let path = dirs.config_dir().join(leaf);
if path.exists() {
return path;
}
let path = dirs.preference_dir().join(leaf);
if path.exists() {
return path;
}
dirs.config_dir().join(leaf)
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct Config {
pub caches: Vec<CacheType>,
pub fallback_cache: DiskCacheConfig,
pub dist: DistConfig,
}
impl Config {
pub fn load() -> Result<Config> {
let env_conf = config_from_env();
let file_conf_path = config_file("CACHEPOT_CONF", "config");
let file_conf = try_read_config_file(&file_conf_path)
.context("Failed to parse config file")?
.map(|config| {
info!("Parsed config file {} .", file_conf_path.display());
config
})
.unwrap_or_else(|| {
info!("Using the default configuration.");
Default::default()
});
Ok(Config::from_env_and_file_configs(env_conf, file_conf))
}
fn from_env_and_file_configs(env_conf: EnvConfig, file_conf: FileConfig) -> Config {
let mut conf_caches: CacheConfigs = Default::default();
let FileConfig { cache, dist } = file_conf;
conf_caches.merge(cache);
let EnvConfig { cache } = env_conf;
conf_caches.merge(cache);
let (caches, fallback_cache) = conf_caches.into_vec_and_fallback();
Config {
caches,
fallback_cache,
dist,
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct CachedDistConfig {
pub auth_tokens: HashMap<String, String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct CachedFileConfig {
pub dist: CachedDistConfig,
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct CachedConfig(());
impl CachedConfig {
pub fn load() -> Result<Self> {
let mut cached_file_config = CACHED_CONFIG.lock().unwrap();
if cached_file_config.is_none() {
let cfg = Self::load_file_config().context("Unable to initialise cached config")?;
*cached_file_config = Some(cfg)
}
Ok(CachedConfig(()))
}
pub fn reload() -> Result<Self> {
{
let mut cached_file_config = CACHED_CONFIG.lock().unwrap();
*cached_file_config = None;
};
Self::load()
}
pub fn with<F: FnOnce(&CachedFileConfig) -> T, T>(&self, f: F) -> T {
let cached_file_config = CACHED_CONFIG.lock().unwrap();
let cached_file_config = cached_file_config.as_ref().unwrap();
f(cached_file_config)
}
pub fn with_mut<F: FnOnce(&mut CachedFileConfig)>(&self, f: F) -> Result<()> {
let mut cached_file_config = CACHED_CONFIG.lock().unwrap();
let cached_file_config = cached_file_config.as_mut().unwrap();
let mut new_config = cached_file_config.clone();
f(&mut new_config);
Self::save_file_config(&new_config)?;
*cached_file_config = new_config;
Ok(())
}
fn file_config_path() -> PathBuf {
config_file("CACHEPOT_CACHED_CONF", "cached-config")
}
fn load_file_config() -> Result<CachedFileConfig> {
let file_conf_path = &*CACHED_CONFIG_PATH;
if !file_conf_path.exists() {
let file_conf_dir = file_conf_path
.parent()
.expect("Cached conf file has no parent directory");
if !file_conf_dir.is_dir() {
fs::create_dir_all(file_conf_dir)
.context("Failed to create dir to hold cached config")?
}
Self::save_file_config(&Default::default()).with_context(|| {
format!(
"Unable to create cached config file at {}",
file_conf_path.display()
)
})?
}
try_read_config_file(file_conf_path)
.context("Failed to load cached config file")?
.with_context(|| format!("Failed to load from {}", file_conf_path.display()))
}
fn save_file_config(c: &CachedFileConfig) -> Result<()> {
let file_conf_path = &*CACHED_CONFIG_PATH;
let mut file = File::create(file_conf_path).context("Could not open config for writing")?;
file.write_all(&toml::to_vec(c).unwrap())
.map_err(Into::into)
}
}
#[cfg(feature = "dist-worker")]
pub mod scheduler {
use super::HTTPUrl;
use std::path::Path;
use crate::errors::*;
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type")]
#[serde(deny_unknown_fields)]
pub enum ClientAuth {
#[serde(rename = "DANGEROUSLY_INSECURE")]
Insecure,
#[serde(rename = "token")]
Token { token: String },
#[serde(rename = "jwt_validate")]
JwtValidate {
audience: String,
issuer: String,
jwks_url: String,
},
#[serde(rename = "mozilla")]
Mozilla { required_groups: Vec<String> },
#[serde(rename = "proxy_token")]
ProxyToken {
url: String,
cache_secs: Option<u64>,
},
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type")]
#[serde(deny_unknown_fields)]
pub enum WorkerAuth {
#[serde(rename = "DANGEROUSLY_INSECURE")]
Insecure,
#[serde(rename = "jwt_hs256")]
JwtHS256 { secret_key: String },
#[serde(rename = "token")]
Token { token: String },
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct Config {
pub public_addr: HTTPUrl,
pub client_auth: ClientAuth,
pub worker_auth: WorkerAuth,
}
pub fn from_path(conf_path: &Path) -> Result<Option<Config>> {
super::try_read_config_file(conf_path).context("Failed to load scheduler config file")
}
}
#[cfg(feature = "dist-worker")]
pub mod worker {
use super::{HTTPUrl, WorkerUrl};
use std::path::{Path, PathBuf};
use crate::errors::*;
const TEN_GIGS: u64 = 10 * 1024 * 1024 * 1024;
fn default_toolchain_cache_size() -> u64 {
TEN_GIGS
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type")]
#[serde(deny_unknown_fields)]
pub enum BuilderType {
#[serde(rename = "docker")]
Docker,
#[serde(rename = "overlay")]
Overlay {
build_dir: PathBuf,
bwrap_path: PathBuf,
},
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type")]
#[serde(deny_unknown_fields)]
pub enum SchedulerAuth {
#[serde(rename = "DANGEROUSLY_INSECURE")]
Insecure,
#[serde(rename = "jwt_token")]
JwtToken { token: String },
#[serde(rename = "token")]
Token { token: String },
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct Config {
pub builder: BuilderType,
pub cache_dir: PathBuf,
pub public_addr: WorkerUrl,
pub scheduler_url: HTTPUrl,
pub scheduler_auth: SchedulerAuth,
#[serde(default = "default_toolchain_cache_size")]
pub toolchain_cache_size: u64,
}
pub fn from_path(conf_path: &Path) -> Result<Option<Config>> {
super::try_read_config_file(conf_path).context("Failed to load coordinator config file")
}
}
#[test]
fn test_parse_size() {
assert_eq!(None, parse_size(""));
assert_eq!(None, parse_size("100"));
assert_eq!(Some(2048), parse_size("2K"));
assert_eq!(Some(10 * 1024 * 1024), parse_size("10M"));
assert_eq!(Some(TEN_GIGS), parse_size("10G"));
assert_eq!(Some(1024 * TEN_GIGS), parse_size("10T"));
}
#[test]
fn config_overrides() {
let env_conf = EnvConfig {
cache: CacheConfigs {
azure: Some(AzureCacheConfig),
disk: Some(DiskCacheConfig {
dir: "/env-cache".into(),
size: 5,
}),
redis: Some(RedisCacheConfig {
url: "myotherredisurl".to_owned(),
}),
..Default::default()
},
};
let file_conf = FileConfig {
cache: CacheConfigs {
disk: Some(DiskCacheConfig {
dir: "/file-cache".into(),
size: 15,
}),
memcached: Some(MemcachedCacheConfig {
url: "memurl".to_owned(),
}),
redis: Some(RedisCacheConfig {
url: "myredisurl".to_owned(),
}),
..Default::default()
},
dist: Default::default(),
};
assert_eq!(
Config::from_env_and_file_configs(env_conf, file_conf),
Config {
caches: vec![
CacheType::Redis(RedisCacheConfig {
url: "myotherredisurl".to_owned()
}),
CacheType::Memcached(MemcachedCacheConfig {
url: "memurl".to_owned()
}),
CacheType::Azure(AzureCacheConfig),
],
fallback_cache: DiskCacheConfig {
dir: "/env-cache".into(),
size: 5,
},
dist: Default::default(),
}
);
}
#[test]
fn test_gcs_credentials_url() {
env::set_var("CACHEPOT_GCS_BUCKET", "my-bucket");
env::set_var("CACHEPOT_GCS_CREDENTIALS_URL", "http://localhost/");
env::set_var("CACHEPOT_GCS_RW_MODE", "READ_WRITE");
let env_cfg = config_from_env();
match env_cfg.cache.gcs {
Some(GCSCacheConfig {
ref bucket,
ref url,
rw_mode,
..
}) => {
assert_eq!(bucket, "my-bucket");
match url {
Some(ref url) => assert_eq!(url, "http://localhost/"),
None => panic!("URL can't be none"),
};
assert_eq!(rw_mode, GCSCacheRWMode::ReadWrite);
}
None => unreachable!(),
};
}
#[test]
fn full_toml_parse() {
const CONFIG_STR: &str = r#"
[dist]
# where to find the scheduler
scheduler_url = "http://1.2.3.4:10600"
# a set of prepackaged toolchains
toolchains = []
# the maximum size of the toolchain cache in bytes
toolchain_cache_size = 5368709120
cache_dir = "/home/user/.cache/cachepot-dist-client"
[dist.auth]
type = "token"
token = "secrettoken"
#[cache.azure]
# does not work as it appears
[cache.disk]
dir = "/tmp/.cache/cachepot"
size = 7516192768 # 7 GiBytes
[cache.gcs]
# optional url
url = "..."
rw_mode = "READ_ONLY"
# rw_mode = "READ_WRITE"
cred_path = "/psst/secret/cred"
bucket = "bucket"
[cache.memcached]
url = "..."
[cache.redis]
url = "redis://user:passwd@1.2.3.4:6379/1"
[cache.s3]
bucket = "name"
endpoint = "s3-us-east-1.amazonaws.com"
region = "us-east-1"
key_prefix = "prefix"
public = false
"#;
let file_config: FileConfig = toml::from_str(CONFIG_STR).expect("Is valid toml.");
assert_eq!(
file_config,
FileConfig {
cache: CacheConfigs {
azure: None, disk: Some(DiskCacheConfig {
dir: PathBuf::from("/tmp/.cache/cachepot"),
size: 7 * 1024 * 1024 * 1024,
}),
gcs: Some(GCSCacheConfig {
url: Some("...".to_owned()),
bucket: "bucket".to_owned(),
cred_path: Some(PathBuf::from("/psst/secret/cred")),
rw_mode: GCSCacheRWMode::ReadOnly,
}),
redis: Some(RedisCacheConfig {
url: "redis://user:passwd@1.2.3.4:6379/1".to_owned(),
}),
memcached: Some(MemcachedCacheConfig {
url: "...".to_owned(),
}),
s3: Some(S3CacheConfig {
bucket: "name".to_owned(),
endpoint: Some("s3-us-east-1.amazonaws.com".to_owned()),
key_prefix: Some("prefix".to_owned()),
region: Some("us-east-1".to_owned()),
public: false,
}),
},
dist: DistConfig {
auth: DistAuth::Token {
token: "secrettoken".to_owned()
},
#[cfg(any(feature = "dist-client", feature = "dist-worker"))]
scheduler_url: Some(
parse_http_url("http://1.2.3.4:10600")
.map(|url| { HTTPUrl::from_url(url) })
.expect("Scheduler url must be valid url str")
),
#[cfg(not(any(feature = "dist-client", feature = "dist-worker")))]
scheduler_url: Some("http://1.2.3.4:10600".to_owned()),
cache_dir: PathBuf::from("/home/user/.cache/cachepot-dist-client"),
toolchains: vec![],
toolchain_cache_size: 5368709120,
rewrite_includes_only: false,
},
}
)
}