Skip to main content

tauri_plugin_mongoose/db/
files.rs

1use std::path::Path;
2
3use futures::io::Cursor;
4use mongodb::{
5    bson::{doc, DateTime, Document},
6    gridfs::FilesCollectionDocument,
7    options::{GridFsBucketOptions, GridFsUploadOptions},
8};
9use serde::Deserialize;
10use serde_json::Value;
11
12use crate::db::state::{get_client, get_db_name};
13
14/// Arguments accepted by the `save_file` command.
15#[derive(Debug, Deserialize)]
16#[serde(rename_all = "camelCase")]
17pub struct SaveFileRequest {
18    /// GridFS bucket / collection name. Maps to `<bucket>.files` and `<bucket>.chunks` collections.
19    pub collection: String,
20    /// Absolute path of a file to read from disk.
21    pub path: Option<Value>,
22    /// Raw bytes to be written. Useful when the caller already has the buffer in memory.
23    pub data: Option<Vec<u8>>,
24    /// Optional filename stored in GridFS. Defaults to the basename of `path` or `"untitled"`.
25    pub filename: Option<String>,
26    /// Arbitrary JSON metadata stored alongside the file document.
27    pub metadata: Option<Value>,
28    /// Optional chunk size in bytes. Defaults to MongoDB GridFS default (255 KiB).
29    pub chunk_size_bytes: Option<u32>,
30}
31
32/// Store a file in MongoDB GridFS using either a filesystem path or an in-memory buffer.
33/// Returns the created files document as JSON (including the `_id`).
34pub async fn save_file(args: SaveFileRequest) -> Result<Value, String> {
35    let SaveFileRequest {
36        collection,
37        path,
38        data,
39        filename,
40        metadata,
41        chunk_size_bytes,
42    } = args;
43
44    if path.is_none() && data.is_none() {
45        return Err("Either 'path' or 'data' must be provided".to_string());
46    }
47
48    let client = get_client().await?;
49    let db_name = get_db_name().await;
50    let db = client.database(&db_name);
51
52    let bucket_options = GridFsBucketOptions::builder()
53        .bucket_name(collection.clone())
54        .chunk_size_bytes(chunk_size_bytes)
55        .build();
56    let bucket = db.gridfs_bucket(bucket_options);
57
58    // Load bytes and determine filename.
59    let (bytes, final_filename) = if let Some(path_value) = path {
60        let path_str_opt = match path_value {
61            Value::String(s) => Some(s),
62            Value::Object(map) => map
63                .get("path")
64                .and_then(|v| v.as_str())
65                .map(|s| s.to_string())
66                .or_else(|| {
67                    map.get("filePath")
68                        .and_then(|v| v.as_str())
69                        .map(|s| s.to_string())
70                }),
71            _ => None,
72        };
73
74        let file_path = path_str_opt.ok_or_else(|| {
75            "Invalid 'path': expected a string or object with a 'path' field".to_string()
76        })?;
77
78        let bytes = tokio::fs::read(&file_path)
79            .await
80            .map_err(|e| format!("Failed to read file at {}: {}", file_path, e))?;
81        let derived_name = Path::new(&file_path)
82            .file_name()
83            .and_then(|n| n.to_str())
84            .map(|s| s.to_string());
85        (
86            bytes,
87            filename
88                .or(derived_name)
89                .unwrap_or_else(|| "untitled".to_string()),
90        )
91    } else if let Some(bytes) = data {
92        (bytes, filename.unwrap_or_else(|| "untitled".to_string()))
93    } else {
94        // Unreachable because of the earlier check.
95        return Err("No data supplied".to_string());
96    };
97
98    let length = bytes.len() as i64;
99    let chunk_size = chunk_size_bytes.unwrap_or(255 * 1024);
100
101    let metadata_doc: Option<Document> = match metadata {
102        Some(value) => Some(
103            mongodb::bson::to_document(&value).map_err(|e| format!("Invalid metadata: {}", e))?,
104        ),
105        None => None,
106    };
107
108    let upload_options = GridFsUploadOptions::builder()
109        .chunk_size_bytes(chunk_size_bytes)
110        .metadata(metadata_doc.clone())
111        .build();
112
113    let cursor = Cursor::new(bytes);
114    let file_id = bucket
115        .upload_from_futures_0_3_reader(final_filename.clone(), cursor, upload_options)
116        .await
117        .map_err(|e| e.to_string())?;
118
119    // Try to fetch the stored files document for richer output.
120    let files_collection =
121        db.collection::<FilesCollectionDocument>(&format!("{}.files", collection));
122    let stored = files_collection
123        .find_one(doc! {"_id": file_id.clone()}, None)
124        .await
125        .map_err(|e| e.to_string())?;
126
127    let mut response_doc = if let Some(file) = stored {
128        mongodb::bson::to_document(&file).map_err(|e| e.to_string())?
129    } else {
130        // Fallback when the files document could not be read immediately.
131        let mut fallback = doc! {
132            "_id": file_id.clone(),
133            "length": length,
134            "chunkSizeBytes": chunk_size as i32,
135            "uploadDate": DateTime::now(),
136            "filename": final_filename.clone(),
137        };
138        if let Some(meta) = metadata_doc.clone() {
139            fallback.insert("metadata", meta);
140        }
141        fallback
142    };
143
144    response_doc.insert("bucket", collection.clone());
145
146    mongodb::bson::from_document::<Value>(response_doc).map_err(|e| e.to_string())
147}