Skip to main content

tibba_model/
file.rs

1// Copyright 2025 Tree xie.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use super::{
16    Error, JsonSnafu, Model, ModelListParams, ROLE_ADMIN, ROLE_SUPER_ADMIN, Schema,
17    SchemaAllowCreate, SchemaAllowEdit, SchemaOption, SchemaOptionValue, SchemaType, SchemaView,
18    SqlxSnafu, format_datetime,
19};
20use http::header::{HeaderMap, HeaderName, HeaderValue};
21use serde::{Deserialize, Serialize};
22use snafu::ResultExt;
23use sqlx::FromRow;
24use sqlx::types::Json;
25use sqlx::{Pool, Postgres, QueryBuilder};
26use std::collections::HashMap;
27use std::str::FromStr;
28use time::PrimitiveDateTime;
29
30type Result<T> = std::result::Result<T, Error>;
31
32#[derive(FromRow)]
33struct FileSchema {
34    id: i64,
35    filename: String,
36    file_size: i64,
37    content_type: String,
38    group: String,
39    image_width: Option<i32>,
40    image_height: Option<i32>,
41    metadata: Option<Json<serde_json::Value>>,
42    uploader: String,
43    created: PrimitiveDateTime,
44    modified: PrimitiveDateTime,
45}
46
47#[derive(Deserialize, Serialize)]
48pub struct File {
49    pub id: i64,
50    pub filename: String,
51    pub file_size: i64,
52    pub content_type: String,
53    pub group: String,
54    pub image_width: Option<u32>,
55    pub image_height: Option<u32>,
56    pub metadata: Option<serde_json::Value>,
57    pub uploader: String,
58    pub created: String,
59    pub modified: String,
60}
61
62impl From<FileSchema> for File {
63    fn from(file: FileSchema) -> Self {
64        File {
65            id: file.id,
66            filename: file.filename,
67            file_size: file.file_size,
68            content_type: file.content_type,
69            group: file.group,
70            image_width: file.image_width.map(|w| w as u32),
71            image_height: file.image_height.map(|h| h as u32),
72            metadata: file.metadata.map(|m| m.0),
73            uploader: file.uploader,
74            created: format_datetime(file.created),
75            modified: format_datetime(file.modified),
76        }
77    }
78}
79impl File {
80    pub fn get_metadata(&self) -> Option<HeaderMap> {
81        let Some(metadata) = &self.metadata else {
82            return None;
83        };
84        let obj = metadata.as_object()?;
85        let mut headers = HeaderMap::with_capacity(obj.len());
86        for (key, value) in obj.iter() {
87            let Some(value_str) = value.as_str() else {
88                continue;
89            };
90            let Ok(header_value) = HeaderValue::from_str(value_str) else {
91                continue;
92            };
93            let Ok(header_name) = HeaderName::from_str(key) else {
94                continue;
95            };
96            headers.insert(header_name, header_value);
97        }
98        Some(headers)
99    }
100}
101#[derive(Debug, Clone, Deserialize, Default)]
102pub struct FileInsertParams {
103    pub group: String,
104    pub filename: String,
105    pub file_size: i64,
106    pub content_type: String,
107    pub uploader: String,
108    pub width: Option<i32>,
109    pub height: Option<i32>,
110    pub metadata: Option<serde_json::Value>,
111}
112
113#[derive(Debug, Clone, Deserialize, Default)]
114pub struct FileUpdateParams {
115    pub metadata: Option<serde_json::Value>,
116    pub group: Option<String>,
117}
118
119impl From<serde_json::Value> for FileUpdateParams {
120    fn from(value: serde_json::Value) -> Self {
121        FileUpdateParams {
122            metadata: value.get("metadata").cloned(),
123            group: value
124                .get("group")
125                .and_then(|v| v.as_str().map(|s| s.to_string())),
126        }
127    }
128}
129
130#[derive(Default)]
131pub struct FileModel {}
132
133impl FileModel {
134    pub async fn insert_file(
135        &self,
136        pool: &Pool<Postgres>,
137        params: FileInsertParams,
138    ) -> Result<u64> {
139        let row: (i64,) = sqlx::query_as(
140            r#"
141            INSERT INTO files (
142                "group", filename, file_size, content_type,
143                image_width, image_height, metadata, uploader
144            ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id
145            "#,
146        )
147        .bind(params.group)
148        .bind(params.filename)
149        .bind(params.file_size)
150        .bind(params.content_type)
151        .bind(params.width.unwrap_or(-1))
152        .bind(params.height.unwrap_or(-1))
153        .bind(params.metadata.unwrap_or(serde_json::json!({})))
154        .bind(params.uploader)
155        .fetch_one(pool)
156        .await
157        .context(SqlxSnafu)?;
158
159        Ok(row.0 as u64)
160    }
161    pub async fn get_by_name(&self, pool: &Pool<Postgres>, name: &str) -> Result<Option<File>> {
162        let result = sqlx::query_as::<_, FileSchema>(
163            r#"SELECT * FROM files WHERE filename = $1 AND deleted_at IS NULL"#,
164        )
165        .bind(name)
166        .fetch_optional(pool)
167        .await
168        .context(SqlxSnafu)?;
169
170        Ok(result.map(|file| file.into()))
171    }
172}
173
174impl Model for FileModel {
175    type Output = File;
176    fn new() -> Self {
177        Self::default()
178    }
179    async fn schema_view(&self, _pool: &Pool<Postgres>) -> SchemaView {
180        let group_options = vec![
181            SchemaOption {
182                label: "Tibba".to_string(),
183                value: SchemaOptionValue::String("tibba".to_string()),
184            },
185            SchemaOption {
186                label: "Web".to_string(),
187                value: SchemaOptionValue::String("web".to_string()),
188            },
189            SchemaOption {
190                label: "Web Page Stat".to_string(),
191                value: SchemaOptionValue::String("web_page_stat".to_string()),
192            },
193        ];
194        SchemaView {
195            schemas: vec![
196                Schema::new_id(),
197                Schema {
198                    name: "filename".to_string(),
199                    category: SchemaType::String,
200                    identity: true,
201                    read_only: true,
202                    required: true,
203                    fixed: true,
204                    ..Default::default()
205                },
206                Schema {
207                    name: "file_size".to_string(),
208                    category: SchemaType::Bytes,
209                    read_only: true,
210                    required: true,
211                    sortable: true,
212                    ..Default::default()
213                },
214                Schema {
215                    name: "uploader".to_string(),
216                    category: SchemaType::String,
217                    read_only: true,
218                    required: true,
219                    filterable: true,
220                    ..Default::default()
221                },
222                Schema {
223                    name: "content_type".to_string(),
224                    category: SchemaType::String,
225                    read_only: true,
226                    required: true,
227                    ..Default::default()
228                },
229                Schema {
230                    name: "group".to_string(),
231                    category: SchemaType::String,
232                    options: Some(group_options.clone()),
233                    filterable: true,
234                    ..Default::default()
235                },
236                Schema {
237                    name: "image_width".to_string(),
238                    category: SchemaType::Number,
239                    read_only: true,
240                    ..Default::default()
241                },
242                Schema {
243                    name: "image_height".to_string(),
244                    category: SchemaType::Number,
245                    read_only: true,
246                    ..Default::default()
247                },
248                Schema {
249                    name: "metadata".to_string(),
250                    category: SchemaType::Json,
251                    span: Some(2),
252                    popover: true,
253                    ..Default::default()
254                },
255                Schema::new_created(),
256                Schema::new_modified(),
257            ],
258            allow_edit: SchemaAllowEdit {
259                owner: true,
260                roles: vec![ROLE_SUPER_ADMIN.to_string(), ROLE_ADMIN.to_string()],
261                ..Default::default()
262            },
263            allow_create: SchemaAllowCreate {
264                roles: vec!["*".to_string()],
265                ..Default::default()
266            },
267        }
268    }
269
270    fn keyword(&self) -> String {
271        "filename".to_string()
272    }
273    fn push_filter_conditions<'args>(
274        &self,
275        qb: &mut QueryBuilder<'args, Postgres>,
276        filters: &HashMap<String, String>,
277    ) -> Result<()> {
278        if let Some(group) = filters.get("group") {
279            qb.push(" AND \"group\" = ");
280            qb.push_bind(group.clone());
281        }
282        if let Some(uploader) = filters.get("uploader") {
283            qb.push(" AND uploader = ");
284            qb.push_bind(uploader.clone());
285        }
286        Ok(())
287    }
288
289    async fn get_by_id(&self, pool: &Pool<Postgres>, id: u64) -> Result<Option<Self::Output>> {
290        let result = sqlx::query_as::<_, FileSchema>(
291            r#"SELECT * FROM files WHERE id = $1 AND deleted_at IS NULL"#,
292        )
293        .bind(id as i64)
294        .fetch_optional(pool)
295        .await
296        .context(SqlxSnafu)?;
297
298        Ok(result.map(|file| file.into()))
299    }
300
301    async fn delete_by_id(&self, pool: &Pool<Postgres>, id: u64) -> Result<()> {
302        sqlx::query(
303            r#"UPDATE files SET deleted_at = CURRENT_TIMESTAMP WHERE id = $1 AND deleted_at IS NULL"#
304        )
305            .bind(id as i64)
306            .execute(pool)
307            .await
308            .context(SqlxSnafu)?;
309        Ok(())
310    }
311
312    async fn update_by_id(
313        &self,
314        pool: &Pool<Postgres>,
315        id: u64,
316        data: serde_json::Value,
317    ) -> Result<()> {
318        let params: FileUpdateParams = serde_json::from_value(data).context(JsonSnafu)?;
319        let _ = sqlx::query(
320            r#"UPDATE files SET metadata = COALESCE($1, metadata), "group" = COALESCE($2, "group") WHERE id = $3 AND deleted_at IS NULL"#,
321        )
322            .bind(params.metadata)
323            .bind(params.group)
324            .bind(id as i64)
325            .execute(pool)
326            .await
327            .context(SqlxSnafu)?;
328        Ok(())
329    }
330
331    async fn count(&self, pool: &Pool<Postgres>, params: &ModelListParams) -> Result<i64> {
332        let mut qb = QueryBuilder::new("SELECT COUNT(*) FROM files");
333        self.push_conditions(&mut qb, params)?;
334        let count = qb
335            .build_query_scalar::<i64>()
336            .fetch_one(pool)
337            .await
338            .context(SqlxSnafu)?;
339        Ok(count)
340    }
341
342    async fn list(
343        &self,
344        pool: &Pool<Postgres>,
345        params: &ModelListParams,
346    ) -> Result<Vec<Self::Output>> {
347        let mut qb = QueryBuilder::new("SELECT * FROM files");
348        self.push_conditions(&mut qb, params)?;
349        params.push_pagination(&mut qb);
350        let files = qb
351            .build_query_as::<FileSchema>()
352            .fetch_all(pool)
353            .await
354            .context(SqlxSnafu)?;
355        Ok(files.into_iter().map(|f| f.into()).collect())
356    }
357}