use app_dirs::{
AppDataType,
AppInfo,
app_dir,
};
use cache::disk::DiskCache;
#[cfg(feature = "redis")]
use cache::redis::RedisCache;
#[cfg(feature = "s3")]
use cache::s3::S3Cache;
#[cfg(feature = "gcs")]
use cache::gcs::{self, GCSCache, GCSCredentialProvider, RWMode};
use futures_cpupool::CpuPool;
use regex::Regex;
#[cfg(feature = "gcs")]
use serde_json;
use std::env;
use std::fmt;
use std::io::{
self,
Read,
Seek,
Write,
};
#[cfg(feature = "gcs")]
use std::fs::File;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use tokio_core::reactor::Handle;
use zip::{CompressionMethod, ZipArchive, ZipWriter};
use zip::write::FileOptions;
use errors::*;
const APP_INFO: AppInfo = AppInfo {
name: "sccache",
author: "Mozilla",
};
const TEN_GIGS: usize = 10 * 1024 * 1024 * 1024;
pub enum Cache {
Hit(CacheRead),
Miss,
Recache,
}
impl fmt::Debug for Cache {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Cache::Hit(_) => write!(f, "Cache::Hit(...)"),
Cache::Miss => write!(f, "Cache::Miss"),
Cache::Recache => write!(f, "Cache::Recache"),
}
}
}
pub trait ReadSeek : Read + Seek + Send {}
impl<T: Read + Seek + Send> ReadSeek for T {}
pub struct CacheRead {
zip: ZipArchive<Box<ReadSeek>>,
}
impl CacheRead {
pub fn from<R>(reader: R) -> Result<CacheRead>
where R: ReadSeek + 'static,
{
let z = ZipArchive::new(Box::new(reader) as Box<ReadSeek>).chain_err(|| {
"Failed to parse cache entry"
})?;
Ok(CacheRead {
zip: z,
})
}
pub fn get_object<T>(&mut self, name: &str, to: &mut T) -> Result<Option<u32>>
where T: Write,
{
let mut file = self.zip.by_name(name).chain_err(|| {
"Failed to read object from cache entry"
})?;
io::copy(&mut file, to)?;
Ok(file.unix_mode())
}
}
pub struct CacheWrite {
zip: ZipWriter<io::Cursor<Vec<u8>>>,
}
impl CacheWrite {
pub fn new() -> CacheWrite
{
CacheWrite {
zip: ZipWriter::new(io::Cursor::new(vec!())),
}
}
pub fn put_object<T>(&mut self, name: &str, from: &mut T, mode: Option<u32>) -> Result<()>
where T: Read,
{
let opts = FileOptions::default().compression_method(CompressionMethod::Deflated);
let opts = if let Some(mode) = mode { opts.unix_permissions(mode) } else { opts };
self.zip.start_file(name, opts).chain_err(|| {
"Failed to start cache entry object"
})?;
io::copy(from, &mut self.zip)?;
Ok(())
}
pub fn finish(self) -> Result<Vec<u8>>
{
let CacheWrite { mut zip } = self;
let cur = zip.finish().chain_err(|| "Failed to finish cache entry zip")?;
Ok(cur.into_inner())
}
}
pub trait Storage {
fn get(&self, key: &str) -> SFuture<Cache>;
fn put(&self, key: &str, entry: CacheWrite) -> SFuture<Duration>;
fn location(&self) -> String;
fn current_size(&self) -> Option<usize>;
fn max_size(&self) -> Option<usize>;
}
fn parse_size(val: &str) -> Option<usize> {
let re = Regex::new(r"^(\d+)([KMGT])$").unwrap();
re.captures(val)
.and_then(|caps| {
caps.get(1)
.and_then(|size| usize::from_str(size.as_str()).ok())
.and_then(|size| Some((size, caps.get(2))))
})
.and_then(|(size, suffix)| {
match suffix.map(|s| s.as_str()) {
Some("K") => Some(1024 * size),
Some("M") => Some(1024 * 1024 * size),
Some("G") => Some(1024 * 1024 * 1024 * size),
Some("T") => Some(1024 * 1024 * 1024 * 1024 * size),
_ => None,
}
})
}
pub fn storage_from_environment(pool: &CpuPool, _handle: &Handle) -> Arc<Storage> {
if cfg!(feature = "s3") {
if let Ok(bucket) = env::var("SCCACHE_BUCKET") {
let endpoint = match env::var("SCCACHE_ENDPOINT") {
Ok(endpoint) => format!("{}/{}", endpoint, bucket),
_ => match env::var("SCCACHE_REGION") {
Ok(ref region) if region != "us-east-1" =>
format!("{}.s3-{}.amazonaws.com", bucket, region),
_ => format!("{}.s3.amazonaws.com", bucket),
},
};
debug!("Trying S3Cache({})", endpoint);
#[cfg(feature = "s3")]
match S3Cache::new(&bucket, &endpoint, _handle) {
Ok(s) => {
trace!("Using S3Cache");
return Arc::new(s);
}
Err(e) => warn!("Failed to create S3Cache: {:?}", e),
}
}
}
if cfg!(feature = "redis") {
if let Ok(url) = env::var("SCCACHE_REDIS") {
debug!("Trying Redis({})", url);
#[cfg(feature = "redis")]
match RedisCache::new(&url, pool) {
Ok(s) => {
trace!("Using Redis: {}", url);
return Arc::new(s);
}
Err(e) => warn!("Failed to create RedisCache: {:?}", e),
}
}
}
if cfg!(feature = "gcs") {
if let Ok(bucket) = env::var("SCCACHE_GCS_BUCKET")
{
debug!("Trying GCS bucket({})", bucket);
#[cfg(feature = "gcs")]
{
let cred_path_res = env::var("SCCACHE_GCS_KEY_PATH");
if cred_path_res.is_err() {
warn!("No SCCACHE_GCS_KEY_PATH specified-- no authentication will be used.");
}
let service_account_key_opt: Option<gcs::ServiceAccountKey> =
if let Ok(cred_path) = cred_path_res
{
let service_account_key_res: Result<gcs::ServiceAccountKey> = (|| {
let mut file = File::open(&cred_path)?;
let mut service_account_json = String::new();
file.read_to_string(&mut service_account_json)?;
Ok(serde_json::from_str(&service_account_json)?)
})();
if let Err(ref e) = service_account_key_res {
warn!("Failed to parse service account credentials from file: {:?}. \
Continuing without authentication.", e);
}
service_account_key_res.ok()
} else { None };
let gcs_read_write_mode = match env::var("SCCACHE_GCS_RW_MODE")
.as_ref().map(String::as_str)
{
Ok("READ_ONLY") => RWMode::ReadOnly,
Ok("READ_WRITE") => RWMode::ReadWrite,
Ok(_) => {
warn!("Invalid SCCACHE_GCS_RW_MODE-- defaulting to READ_ONLY.");
RWMode::ReadOnly
},
_ => {
warn!("No SCCACHE_GCS_RW_MODE specified-- defaulting to READ_ONLY.");
RWMode::ReadOnly
}
};
let gcs_cred_provider =
service_account_key_opt.map(|path|
GCSCredentialProvider::new(gcs_read_write_mode, path));
match GCSCache::new(bucket, gcs_cred_provider, gcs_read_write_mode, _handle) {
Ok(s) => {
trace!("Using GCSCache");
return Arc::new(s);
}
Err(e) => warn!("Failed to create GCS Cache: {:?}", e),
}
}
}
}
let d = env::var_os("SCCACHE_DIR")
.map(|p| PathBuf::from(p))
.or_else(|| app_dir(AppDataType::UserCache, &APP_INFO, "").ok())
.unwrap_or(env::temp_dir().join("sccache_cache"));
trace!("Using DiskCache({:?})", d);
let cache_size = env::var("SCCACHE_CACHE_SIZE")
.ok()
.and_then(|v| parse_size(&v))
.unwrap_or(TEN_GIGS);
trace!("DiskCache size: {}", cache_size);
Arc::new(DiskCache::new(&d, cache_size, pool))
}
#[test]
fn test_parse_size() {
assert_eq!(None, parse_size(""));
assert_eq!(None, parse_size("100"));
assert_eq!(Some(2048), parse_size("2K"));
assert_eq!(Some(10 * 1024 * 1024), parse_size("10M"));
assert_eq!(Some(TEN_GIGS), parse_size("10G"));
assert_eq!(Some(1024 * TEN_GIGS), parse_size("10T"));
}