use super::{
Error, JsonSnafu, Model, ModelListParams, ROLE_ADMIN, ROLE_SUPER_ADMIN, Schema,
SchemaAllowCreate, SchemaAllowEdit, SchemaOption, SchemaOptionValue, SchemaType, SchemaView,
SqlxSnafu, format_datetime,
};
use http::header::{HeaderMap, HeaderName, HeaderValue};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use sqlx::FromRow;
use sqlx::types::Json;
use sqlx::{Pool, Postgres, QueryBuilder};
use std::collections::HashMap;
use std::str::FromStr;
use time::PrimitiveDateTime;
type Result<T> = std::result::Result<T, Error>;
#[derive(FromRow)]
struct FileSchema {
id: i64,
filename: String,
file_size: i64,
content_type: String,
group: String,
image_width: Option<i32>,
image_height: Option<i32>,
metadata: Option<Json<serde_json::Value>>,
uploader: String,
created: PrimitiveDateTime,
modified: PrimitiveDateTime,
}
#[derive(Deserialize, Serialize)]
pub struct File {
pub id: i64,
pub filename: String,
pub file_size: i64,
pub content_type: String,
pub group: String,
pub image_width: Option<u32>,
pub image_height: Option<u32>,
pub metadata: Option<serde_json::Value>,
pub uploader: String,
pub created: String,
pub modified: String,
}
impl From<FileSchema> for File {
fn from(file: FileSchema) -> Self {
File {
id: file.id,
filename: file.filename,
file_size: file.file_size,
content_type: file.content_type,
group: file.group,
image_width: file.image_width.map(|w| w as u32),
image_height: file.image_height.map(|h| h as u32),
metadata: file.metadata.map(|m| m.0),
uploader: file.uploader,
created: format_datetime(file.created),
modified: format_datetime(file.modified),
}
}
}
impl File {
pub fn get_metadata(&self) -> Option<HeaderMap> {
let Some(metadata) = &self.metadata else {
return None;
};
let obj = metadata.as_object()?;
let mut headers = HeaderMap::with_capacity(obj.len());
for (key, value) in obj.iter() {
let Some(value_str) = value.as_str() else {
continue;
};
let Ok(header_value) = HeaderValue::from_str(value_str) else {
continue;
};
let Ok(header_name) = HeaderName::from_str(key) else {
continue;
};
headers.insert(header_name, header_value);
}
Some(headers)
}
}
#[derive(Debug, Clone, Deserialize, Default)]
pub struct FileInsertParams {
pub group: String,
pub filename: String,
pub file_size: i64,
pub content_type: String,
pub uploader: String,
pub width: Option<i32>,
pub height: Option<i32>,
pub metadata: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Deserialize, Default)]
pub struct FileUpdateParams {
pub metadata: Option<serde_json::Value>,
pub group: Option<String>,
}
impl From<serde_json::Value> for FileUpdateParams {
fn from(value: serde_json::Value) -> Self {
FileUpdateParams {
metadata: value.get("metadata").cloned(),
group: value
.get("group")
.and_then(|v| v.as_str().map(|s| s.to_string())),
}
}
}
#[derive(Default)]
pub struct FileModel {}
impl FileModel {
pub async fn insert_file(
&self,
pool: &Pool<Postgres>,
params: FileInsertParams,
) -> Result<u64> {
let row: (i64,) = sqlx::query_as(
r#"
INSERT INTO files (
"group", filename, file_size, content_type,
image_width, image_height, metadata, uploader
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id
"#,
)
.bind(params.group)
.bind(params.filename)
.bind(params.file_size)
.bind(params.content_type)
.bind(params.width.unwrap_or(-1))
.bind(params.height.unwrap_or(-1))
.bind(params.metadata.unwrap_or(serde_json::json!({})))
.bind(params.uploader)
.fetch_one(pool)
.await
.context(SqlxSnafu)?;
Ok(row.0 as u64)
}
pub async fn get_by_name(&self, pool: &Pool<Postgres>, name: &str) -> Result<Option<File>> {
let result = sqlx::query_as::<_, FileSchema>(
r#"SELECT * FROM files WHERE filename = $1 AND deleted_at IS NULL"#,
)
.bind(name)
.fetch_optional(pool)
.await
.context(SqlxSnafu)?;
Ok(result.map(|file| file.into()))
}
}
impl Model for FileModel {
type Output = File;
fn new() -> Self {
Self::default()
}
async fn schema_view(&self, _pool: &Pool<Postgres>) -> SchemaView {
let group_options = vec![
SchemaOption {
label: "Tibba".to_string(),
value: SchemaOptionValue::String("tibba".to_string()),
},
SchemaOption {
label: "Web".to_string(),
value: SchemaOptionValue::String("web".to_string()),
},
SchemaOption {
label: "Web Page Stat".to_string(),
value: SchemaOptionValue::String("web_page_stat".to_string()),
},
];
SchemaView {
schemas: vec![
Schema::new_id(),
Schema {
name: "filename".to_string(),
category: SchemaType::String,
identity: true,
read_only: true,
required: true,
fixed: true,
..Default::default()
},
Schema {
name: "file_size".to_string(),
category: SchemaType::Bytes,
read_only: true,
required: true,
sortable: true,
..Default::default()
},
Schema {
name: "uploader".to_string(),
category: SchemaType::String,
read_only: true,
required: true,
filterable: true,
..Default::default()
},
Schema {
name: "content_type".to_string(),
category: SchemaType::String,
read_only: true,
required: true,
..Default::default()
},
Schema {
name: "group".to_string(),
category: SchemaType::String,
options: Some(group_options.clone()),
filterable: true,
..Default::default()
},
Schema {
name: "image_width".to_string(),
category: SchemaType::Number,
read_only: true,
..Default::default()
},
Schema {
name: "image_height".to_string(),
category: SchemaType::Number,
read_only: true,
..Default::default()
},
Schema {
name: "metadata".to_string(),
category: SchemaType::Json,
span: Some(2),
popover: true,
..Default::default()
},
Schema::new_created(),
Schema::new_modified(),
],
allow_edit: SchemaAllowEdit {
owner: true,
roles: vec![ROLE_SUPER_ADMIN.to_string(), ROLE_ADMIN.to_string()],
..Default::default()
},
allow_create: SchemaAllowCreate {
roles: vec!["*".to_string()],
..Default::default()
},
}
}
fn keyword(&self) -> String {
"filename".to_string()
}
fn push_filter_conditions<'args>(
&self,
qb: &mut QueryBuilder<'args, Postgres>,
filters: &HashMap<String, String>,
) -> Result<()> {
if let Some(group) = filters.get("group") {
qb.push(" AND \"group\" = ");
qb.push_bind(group.clone());
}
if let Some(uploader) = filters.get("uploader") {
qb.push(" AND uploader = ");
qb.push_bind(uploader.clone());
}
Ok(())
}
async fn get_by_id(&self, pool: &Pool<Postgres>, id: u64) -> Result<Option<Self::Output>> {
let result = sqlx::query_as::<_, FileSchema>(
r#"SELECT * FROM files WHERE id = $1 AND deleted_at IS NULL"#,
)
.bind(id as i64)
.fetch_optional(pool)
.await
.context(SqlxSnafu)?;
Ok(result.map(|file| file.into()))
}
async fn delete_by_id(&self, pool: &Pool<Postgres>, id: u64) -> Result<()> {
sqlx::query(
r#"UPDATE files SET deleted_at = CURRENT_TIMESTAMP WHERE id = $1 AND deleted_at IS NULL"#
)
.bind(id as i64)
.execute(pool)
.await
.context(SqlxSnafu)?;
Ok(())
}
async fn update_by_id(
&self,
pool: &Pool<Postgres>,
id: u64,
data: serde_json::Value,
) -> Result<()> {
let params: FileUpdateParams = serde_json::from_value(data).context(JsonSnafu)?;
let _ = sqlx::query(
r#"UPDATE files SET metadata = COALESCE($1, metadata), "group" = COALESCE($2, "group") WHERE id = $3 AND deleted_at IS NULL"#,
)
.bind(params.metadata)
.bind(params.group)
.bind(id as i64)
.execute(pool)
.await
.context(SqlxSnafu)?;
Ok(())
}
async fn count(&self, pool: &Pool<Postgres>, params: &ModelListParams) -> Result<i64> {
let mut qb = QueryBuilder::new("SELECT COUNT(*) FROM files");
self.push_conditions(&mut qb, params)?;
let count = qb
.build_query_scalar::<i64>()
.fetch_one(pool)
.await
.context(SqlxSnafu)?;
Ok(count)
}
async fn list(
&self,
pool: &Pool<Postgres>,
params: &ModelListParams,
) -> Result<Vec<Self::Output>> {
let mut qb = QueryBuilder::new("SELECT * FROM files");
self.push_conditions(&mut qb, params)?;
params.push_pagination(&mut qb);
let files = qb
.build_query_as::<FileSchema>()
.fetch_all(pool)
.await
.context(SqlxSnafu)?;
Ok(files.into_iter().map(|f| f.into()).collect())
}
}