#![cfg_attr(not(feature = "std"), no_std)]
#![allow(missing_docs)]
#![allow(dead_code)]
#![allow(clippy::match_like_matches_macro)]
#![allow(clippy::expect_used)]
#![allow(clippy::type_complexity)]
#![allow(clippy::manual_div_ceil)]
#![allow(unused_variables)]
#![allow(clippy::collapsible_match)]
#![allow(async_fn_in_trait)]
#![allow(clippy::manual_strip)]
#![allow(clippy::get_first)]
#![allow(clippy::field_reassign_with_default)]
#![allow(unused_imports)]
#![allow(clippy::should_implement_trait)]
#[cfg(feature = "alloc")]
extern crate alloc;
pub mod auth;
pub mod backends;
#[cfg(feature = "cache")]
pub mod cache;
pub mod error;
#[cfg(feature = "async")]
pub mod multicloud;
#[cfg(feature = "prefetch")]
pub mod prefetch;
#[cfg(feature = "retry")]
pub mod retry;
pub use error::{CloudError, Result};
#[cfg(feature = "s3")]
pub use backends::s3::S3Backend;
#[cfg(feature = "azure-blob")]
pub use backends::azure::AzureBlobBackend;
#[cfg(feature = "gcs")]
pub use backends::gcs::GcsBackend;
#[cfg(feature = "http")]
pub use backends::http::HttpBackend;
#[cfg(feature = "async")]
pub use multicloud::{
CloudProvider, CloudProviderConfig, CloudRegion, CrossCloudTransferConfig,
CrossCloudTransferResult, MultiCloudManager, MultiCloudManagerBuilder, ProviderHealth,
RoutingStrategy, TransferCostEstimate,
};
use url::Url;
#[derive(Debug)]
pub enum CloudBackend {
#[cfg(feature = "s3")]
S3 {
backend: S3Backend,
key: String,
},
#[cfg(feature = "azure-blob")]
Azure {
backend: AzureBlobBackend,
blob: String,
},
#[cfg(feature = "gcs")]
Gcs {
backend: GcsBackend,
object: String,
},
#[cfg(feature = "http")]
Http {
backend: HttpBackend,
path: String,
},
}
impl CloudBackend {
pub fn from_url(url: &str) -> Result<Self> {
let parsed = Url::parse(url)?;
match parsed.scheme() {
#[cfg(feature = "s3")]
"s3" => {
let bucket = parsed.host_str().ok_or_else(|| CloudError::InvalidUrl {
url: url.to_string(),
})?;
let key = parsed.path().trim_start_matches('/').to_string();
Ok(Self::S3 {
backend: S3Backend::new(bucket, ""),
key,
})
}
#[cfg(feature = "azure-blob")]
"az" | "azure" => {
let container = parsed.host_str().ok_or_else(|| CloudError::InvalidUrl {
url: url.to_string(),
})?;
let account = parsed.username();
if account.is_empty() {
return Err(CloudError::InvalidUrl {
url: url.to_string(),
});
}
let blob = parsed.path().trim_start_matches('/').to_string();
Ok(Self::Azure {
backend: AzureBlobBackend::new(account, container),
blob,
})
}
#[cfg(feature = "gcs")]
"gs" | "gcs" => {
let bucket = parsed.host_str().ok_or_else(|| CloudError::InvalidUrl {
url: url.to_string(),
})?;
let object = parsed.path().trim_start_matches('/').to_string();
Ok(Self::Gcs {
backend: GcsBackend::new(bucket),
object,
})
}
#[cfg(feature = "http")]
"http" | "https" => {
let base_url = format!(
"{}://{}",
parsed.scheme(),
parsed.host_str().ok_or_else(|| CloudError::InvalidUrl {
url: url.to_string(),
})?
);
let path = parsed.path().trim_start_matches('/').to_string();
Ok(Self::Http {
backend: HttpBackend::new(base_url),
path,
})
}
scheme => Err(CloudError::UnsupportedProtocol {
protocol: scheme.to_string(),
}),
}
}
#[cfg(feature = "async")]
pub async fn get(&self) -> Result<bytes::Bytes> {
use backends::CloudStorageBackend;
match self {
#[cfg(feature = "s3")]
Self::S3 { backend, key } => backend.get(key).await,
#[cfg(feature = "azure-blob")]
Self::Azure { backend, blob } => backend.get(blob).await,
#[cfg(feature = "gcs")]
Self::Gcs { backend, object } => backend.get(object).await,
#[cfg(feature = "http")]
Self::Http { backend, path } => backend.get(path).await,
}
}
#[cfg(feature = "async")]
pub async fn put(&self, data: &[u8]) -> Result<()> {
use backends::CloudStorageBackend;
match self {
#[cfg(feature = "s3")]
Self::S3 { backend, key } => backend.put(key, data).await,
#[cfg(feature = "azure-blob")]
Self::Azure { backend, blob } => backend.put(blob, data).await,
#[cfg(feature = "gcs")]
Self::Gcs { backend, object } => backend.put(object, data).await,
#[cfg(feature = "http")]
Self::Http { .. } => Err(CloudError::NotSupported {
operation: "HTTP backend is read-only".to_string(),
}),
}
}
#[cfg(feature = "async")]
pub async fn exists(&self) -> Result<bool> {
use backends::CloudStorageBackend;
match self {
#[cfg(feature = "s3")]
Self::S3 { backend, key } => backend.exists(key).await,
#[cfg(feature = "azure-blob")]
Self::Azure { backend, blob } => backend.exists(blob).await,
#[cfg(feature = "gcs")]
Self::Gcs { backend, object } => backend.exists(object).await,
#[cfg(feature = "http")]
Self::Http { backend, path } => backend.exists(path).await,
}
}
}
#[cfg(test)]
#[allow(clippy::panic)]
mod tests {
use super::*;
#[test]
#[cfg(feature = "s3")]
fn test_cloud_backend_from_url_s3() {
let backend = CloudBackend::from_url("s3://my-bucket/path/to/file.tif");
assert!(backend.is_ok());
if let Ok(CloudBackend::S3 { backend, key }) = backend {
assert_eq!(backend.bucket, "my-bucket");
assert_eq!(key, "path/to/file.tif");
} else {
panic!("Expected S3 backend");
}
}
#[test]
#[cfg(feature = "gcs")]
fn test_cloud_backend_from_url_gcs() {
let backend = CloudBackend::from_url("gs://my-bucket/path/to/file.tif");
assert!(backend.is_ok());
if let Ok(CloudBackend::Gcs { backend, object }) = backend {
assert_eq!(backend.bucket, "my-bucket");
assert_eq!(object, "path/to/file.tif");
} else {
panic!("Expected GCS backend");
}
}
#[test]
#[cfg(feature = "http")]
fn test_cloud_backend_from_url_http() {
let backend = CloudBackend::from_url("https://example.com/path/to/file.tif");
assert!(backend.is_ok());
if let Ok(CloudBackend::Http { backend, path }) = backend {
assert!(backend.base_url.contains("example.com"));
assert_eq!(path, "path/to/file.tif");
} else {
panic!("Expected HTTP backend");
}
}
#[test]
fn test_cloud_backend_from_url_invalid() {
let backend = CloudBackend::from_url("invalid://url");
assert!(backend.is_err());
}
}