use crate::compress::CompressionAlgorithm;
use crate::crypto::Hash;
use crate::error::BucketError;
use crate::error::BucketError::EmptyName;
use crate::msg;
use crate::msg::{ObjectResponse, PaginationConfig};
use cosmwasm_std::{ensure, ensure_ne, Addr, StdError, StdResult, Uint128};
use cw_storage_plus::{Index, IndexList, IndexedMap, Item, Map, MultiIndex};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
pub const DATA: Map<Hash, Vec<u8>> = Map::new("DATA");
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
pub struct Bucket {
pub owner: Addr,
pub name: String,
pub config: BucketConfig,
pub limits: BucketLimits,
pub pagination: Pagination,
pub stat: BucketStat,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
pub struct BucketStat {
pub size: Uint128,
pub compressed_size: Uint128,
pub object_count: Uint128,
}
impl Bucket {
pub fn try_new(
owner: Addr,
name: String,
config: BucketConfig,
limits: BucketLimits,
pagination: Pagination,
) -> Result<Self, BucketError> {
let n: String = name.split_whitespace().collect();
ensure!(!n.is_empty(), EmptyName);
Ok(Self {
owner,
name: n,
config,
limits,
pagination,
stat: BucketStat {
size: Uint128::zero(),
compressed_size: Uint128::zero(),
object_count: Uint128::zero(),
},
})
}
}
#[derive(Serialize, Copy, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema, Default)]
pub enum HashAlgorithm {
MD5,
Sha224,
#[default]
Sha256,
Sha384,
Sha512,
}
impl From<msg::HashAlgorithm> for HashAlgorithm {
fn from(algorithm: msg::HashAlgorithm) -> Self {
match algorithm {
msg::HashAlgorithm::MD5 => HashAlgorithm::MD5,
msg::HashAlgorithm::Sha224 => HashAlgorithm::Sha224,
msg::HashAlgorithm::Sha256 => HashAlgorithm::Sha256,
msg::HashAlgorithm::Sha384 => HashAlgorithm::Sha384,
msg::HashAlgorithm::Sha512 => HashAlgorithm::Sha512,
}
}
}
impl From<HashAlgorithm> for msg::HashAlgorithm {
fn from(algorithm: HashAlgorithm) -> Self {
match algorithm {
HashAlgorithm::MD5 => msg::HashAlgorithm::MD5,
HashAlgorithm::Sha224 => msg::HashAlgorithm::Sha224,
HashAlgorithm::Sha256 => msg::HashAlgorithm::Sha256,
HashAlgorithm::Sha384 => msg::HashAlgorithm::Sha384,
HashAlgorithm::Sha512 => msg::HashAlgorithm::Sha512,
}
}
}
impl From<msg::CompressionAlgorithm> for CompressionAlgorithm {
fn from(algorithm: msg::CompressionAlgorithm) -> Self {
match algorithm {
msg::CompressionAlgorithm::Passthrough => CompressionAlgorithm::Passthrough,
msg::CompressionAlgorithm::Snappy => CompressionAlgorithm::Snappy,
msg::CompressionAlgorithm::Lzma => CompressionAlgorithm::Lzma,
}
}
}
impl From<CompressionAlgorithm> for msg::CompressionAlgorithm {
fn from(algorithm: CompressionAlgorithm) -> Self {
match algorithm {
CompressionAlgorithm::Passthrough => msg::CompressionAlgorithm::Passthrough,
CompressionAlgorithm::Snappy => msg::CompressionAlgorithm::Snappy,
CompressionAlgorithm::Lzma => msg::CompressionAlgorithm::Lzma,
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
pub struct BucketConfig {
pub hash_algorithm: HashAlgorithm,
pub accepted_compression_algorithms: Vec<CompressionAlgorithm>,
}
impl BucketConfig {
fn try_new(
hash_algorithm: HashAlgorithm,
accepted_compression_algorithms: Vec<CompressionAlgorithm>,
) -> StdResult<BucketConfig> {
ensure!(
!accepted_compression_algorithms.is_empty(),
StdError::generic_err("'accepted_compression_algorithms' cannot be empty")
);
Ok(BucketConfig {
hash_algorithm,
accepted_compression_algorithms,
})
}
}
impl TryFrom<msg::BucketConfig> for BucketConfig {
type Error = StdError;
fn try_from(config: msg::BucketConfig) -> StdResult<BucketConfig> {
BucketConfig::try_new(
config.hash_algorithm.into(),
config
.accepted_compression_algorithms
.into_iter()
.map(Into::into)
.collect(),
)
}
}
impl From<BucketConfig> for msg::BucketConfig {
fn from(config: BucketConfig) -> Self {
msg::BucketConfig {
hash_algorithm: config.hash_algorithm.into(),
accepted_compression_algorithms: config
.accepted_compression_algorithms
.into_iter()
.map(Into::into)
.collect(),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
pub struct BucketLimits {
pub max_total_size: Option<Uint128>,
pub max_objects: Option<Uint128>,
pub max_object_size: Option<Uint128>,
pub max_object_pins: Option<Uint128>,
}
impl From<BucketLimits> for msg::BucketLimits {
fn from(limits: BucketLimits) -> Self {
msg::BucketLimits {
max_total_size: limits.max_total_size,
max_objects: limits.max_objects,
max_object_size: limits.max_object_size,
max_object_pins: limits.max_object_pins,
}
}
}
impl From<BucketStat> for msg::BucketStat {
fn from(stat: BucketStat) -> Self {
msg::BucketStat {
size: stat.size,
compressed_size: stat.compressed_size,
object_count: stat.object_count,
}
}
}
impl BucketLimits {
fn try_new(
max_total_size: Option<Uint128>,
max_objects: Option<Uint128>,
max_object_size: Option<Uint128>,
max_object_pins: Option<Uint128>,
) -> StdResult<BucketLimits> {
ensure_ne!(
max_total_size,
Some(Uint128::zero()),
StdError::generic_err("'max_total_size' cannot be zero")
);
ensure_ne!(
max_objects,
Some(Uint128::zero()),
StdError::generic_err("'max_objects' cannot be zero")
);
ensure_ne!(
max_object_size,
Some(Uint128::zero()),
StdError::generic_err("'max_object_size' cannot be zero")
);
ensure!(
!matches!(
(max_total_size, max_object_size),
(Some(max_total_size), Some(max_object_size)) if max_total_size < max_object_size
),
StdError::generic_err("'max_total_size' cannot be less than 'max_object_size'")
);
Ok(BucketLimits {
max_total_size,
max_objects,
max_object_size,
max_object_pins,
})
}
}
impl TryFrom<msg::BucketLimits> for BucketLimits {
type Error = StdError;
fn try_from(limits: msg::BucketLimits) -> StdResult<BucketLimits> {
BucketLimits::try_new(
limits.max_total_size,
limits.max_objects,
limits.max_object_size,
limits.max_object_pins,
)
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
pub struct Pagination {
pub max_page_size: u32,
pub default_page_size: u32,
}
const MAX_PAGE_MAX_SIZE: u32 = u32::MAX - 1;
impl Pagination {
fn try_new(max_page_size: u32, default_page_size: u32) -> StdResult<Pagination> {
ensure!(
max_page_size <= MAX_PAGE_MAX_SIZE,
StdError::generic_err("'max_page_size' cannot exceed 'u32::MAX - 1'")
);
ensure_ne!(
default_page_size,
0,
StdError::generic_err("'default_page_size' cannot be zero")
);
ensure!(
default_page_size <= max_page_size,
StdError::generic_err("'default_page_size' cannot exceed 'max_page_size'")
);
Ok(Pagination {
max_page_size,
default_page_size,
})
}
}
impl From<Pagination> for PaginationConfig {
fn from(value: Pagination) -> Self {
PaginationConfig {
max_page_size: value.max_page_size,
default_page_size: value.default_page_size,
}
}
}
impl TryFrom<PaginationConfig> for Pagination {
type Error = StdError;
fn try_from(value: PaginationConfig) -> StdResult<Pagination> {
Pagination::try_new(value.max_page_size, value.default_page_size)
}
}
pub const BUCKET: Item<Bucket> = Item::new("bucket");
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, JsonSchema)]
pub struct Object {
pub id: Hash,
pub owner: Addr,
pub size: Uint128,
pub pin_count: Uint128,
pub compression: CompressionAlgorithm,
pub compressed_size: Uint128,
}
impl From<&Object> for ObjectResponse {
fn from(object: &Object) -> Self {
ObjectResponse {
id: object.id.to_string(),
size: object.size,
owner: object.owner.clone().into(),
is_pinned: object.pin_count > Uint128::zero(),
compressed_size: object.compressed_size,
compression_algorithm: object.compression.into(),
}
}
}
pub struct ObjectIndexes<'a> {
pub owner: MultiIndex<'a, Addr, Object, Hash>,
}
impl IndexList<Object> for ObjectIndexes<'_> {
fn get_indexes(&'_ self) -> Box<dyn Iterator<Item = &'_ dyn Index<Object>> + '_> {
let owner: &dyn Index<Object> = &self.owner;
Box::new(vec![owner].into_iter())
}
}
pub fn objects<'a>() -> IndexedMap<Hash, Object, ObjectIndexes<'a>> {
IndexedMap::new(
"OBJECT",
ObjectIndexes {
owner: MultiIndex::new(|_, object| object.owner.clone(), "OBJECT", "OBJECT__OWNER"),
},
)
}
#[derive(Serialize, Deserialize, Clone)]
pub struct Pin {
pub id: Hash,
pub address: Addr,
}
pub struct PinIndexes<'a> {
pub object: MultiIndex<'a, Hash, Pin, (Hash, Addr)>,
}
impl IndexList<Pin> for PinIndexes<'_> {
fn get_indexes(&'_ self) -> Box<dyn Iterator<Item = &'_ dyn Index<Pin>> + '_> {
let object: &dyn Index<Pin> = &self.object;
Box::new(vec![object].into_iter())
}
}
pub fn pins<'a>() -> IndexedMap<(Hash, Addr), Pin, PinIndexes<'a>> {
IndexedMap::new(
"PIN",
PinIndexes {
object: MultiIndex::new(|_, pin| pin.id.clone(), "PIN", "PIN__OBJECT"),
},
)
}