datafold 0.1.55

A personal database for data sovereignty with AI-powered ingestion
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
//! Multipart form data parsing for file uploads

use crate::log_feature;
use crate::logging::features::LogFeature;
use crate::storage::UploadStorage;
use actix_multipart::Multipart;
use actix_web::HttpResponse;
use futures_util::StreamExt;
use serde_json::json;
use std::path::PathBuf;
#[cfg(feature = "aws-backend")]
use tokio::fs;

/// Data extracted from multipart upload form
#[derive(Debug)]
pub struct UploadFormData {
    pub file_path: PathBuf,
    /// The unique filename as saved to disk (HASH_originalname format).
    /// This matches the filename in data/uploads/ directory.
    pub original_filename: String,
    pub auto_execute: bool,
    pub trust_distance: u32,
    pub pub_key: String,
    /// Whether this file already existed (true = duplicate upload)
    pub already_exists: bool,
}

/// Extract and parse multipart form data
pub async fn parse_multipart(
    mut payload: Multipart,
    upload_storage: &UploadStorage,
) -> Result<UploadFormData, HttpResponse> {
    let mut file_path: Option<PathBuf> = None;
    let mut original_filename: Option<String> = None;
    let mut already_exists = false;
    let mut auto_execute = true;
    let mut trust_distance = 0;
    let mut pub_key = "default".to_string();
    #[cfg(feature = "aws-backend")]
    let mut s3_file_path: Option<String> = None;

    while let Some(item) = payload.next().await {
        let mut field = match item {
            Ok(field) => field,
            Err(e) => {
                log_feature!(
                    LogFeature::Ingestion,
                    error,
                    "Failed to read multipart field: {}",
                    e
                );
                return Err(HttpResponse::BadRequest().json(json!({
                    "success": false,
                    "error": format!("Failed to read multipart data: {}", e)
                })));
            }
        };

        let field_name = field
            .content_disposition()
            .get_name()
            .map(|s| s.to_string());

        match field_name.as_deref() {
            Some("file") => {
                let (path, filename, exists) = save_uploaded_file(field, upload_storage).await?;
                file_path = Some(path);
                original_filename = Some(filename);
                already_exists = exists;
            }
            #[cfg(feature = "aws-backend")]
            Some("s3FilePath") => {
                s3_file_path = parse_field_as_string(&mut field).await;
            }
            Some("autoExecute") => {
                auto_execute = parse_field_as_bool(&mut field).await.unwrap_or(true);
            }
            Some("trustDistance") => {
                trust_distance = parse_field_as_u32(&mut field).await.unwrap_or(0);
            }
            Some("pubKey") => {
                pub_key = parse_field_as_string(&mut field)
                    .await
                    .unwrap_or_else(|| "default".to_string());
            }
            _ => {}
        }
    }

    // Handle S3 file path if provided (alternative to file upload)
    #[cfg(feature = "aws-backend")]
    if let Some(s3_path) = s3_file_path {
        if file_path.is_some() {
            log_feature!(
                LogFeature::Ingestion,
                error,
                "Both file and s3FilePath provided - only one is allowed"
            );
            return Err(HttpResponse::BadRequest().json(json!({
                "success": false,
                "error": "Cannot provide both 'file' and 's3FilePath' - use one or the other"
            })));
        }

        log_feature!(
            LogFeature::Ingestion,
            info,
            "Processing S3 file path: {}",
            s3_path
        );

        let (path, filename) = handle_s3_file_path(&s3_path, upload_storage).await?;
        file_path = Some(path);
        original_filename = Some(filename);
        already_exists = false; // S3 files are not deduplicated (already in S3)
    }

    let file_path = match file_path {
        Some(path) => path,
        None => {
            log_feature!(LogFeature::Ingestion, error, "No file provided in upload");
            return Err(HttpResponse::BadRequest().json(json!({
                "success": false,
                "error": "No file provided"
            })));
        }
    };

    let original_filename = original_filename.unwrap_or_else(|| "unknown".to_string());

    Ok(UploadFormData {
        file_path,
        original_filename,
        auto_execute,
        trust_distance,
        pub_key,
        already_exists,
    })
}

/// Save uploaded file from multipart field with content-based hash
/// Returns (file_path, unique_filename, already_exists) where:
/// - unique_filename has format: HASH_originalname (first 16 chars of SHA256)
/// - already_exists is true if this exact file was already uploaded
async fn save_uploaded_file(
    mut field: actix_multipart::Field,
    upload_storage: &UploadStorage,
) -> Result<(PathBuf, String, bool), HttpResponse> {
    use sha2::{Digest, Sha256};

    let filename = field
        .content_disposition()
        .get_filename()
        .unwrap_or("uploaded_file")
        .to_string();

    // Read file contents and compute hash simultaneously
    let mut hasher = Sha256::new();
    let mut file_data = Vec::new();

    while let Some(chunk) = field.next().await {
        let data = match chunk {
            Ok(data) => data,
            Err(e) => {
                log_feature!(
                    LogFeature::Ingestion,
                    error,
                    "Failed to read file chunk: {}",
                    e
                );
                return Err(HttpResponse::InternalServerError().json(json!({
                    "success": false,
                    "error": format!("Failed to read file: {}", e)
                })));
            }
        };

        hasher.update(&data);
        file_data.extend_from_slice(&data);
    }

    // Generate hash-based filename (use first 16 chars of hex for readability)
    let hash_result = hasher.finalize();
    let hash_hex = format!("{:x}", hash_result);
    let short_hash = &hash_hex[..16]; // First 16 characters provides plenty of uniqueness
    let unique_filename = format!("{}_{}", short_hash, &filename);

    // Atomically save file only if it doesn't exist (prevents race condition)
    // Note: user_id is None here - HTTP endpoints don't have user context
    // For multi-tenant Lambda, user_id should be extracted from request/auth
    let (storage_path, already_exists) = match upload_storage
        .save_file_if_not_exists(&unique_filename, &file_data, None)
        .await
    {
        Ok((path, exists)) => (path, exists),
        Err(e) => {
            log_feature!(LogFeature::Ingestion, error, "Failed to save file: {}", e);
            return Err(HttpResponse::InternalServerError().json(json!({
                "success": false,
                "error": format!("Failed to save file: {}", e)
            })));
        }
    };

    // Handle duplicate detection
    if already_exists {
        // File already exists (duplicate upload detected atomically)
        let filepath = storage_path; // storage_path is already the permanent path

        log_feature!(
            LogFeature::Ingestion,
            info,
            "File already exists (duplicate upload): {} at {}",
            unique_filename,
            upload_storage.get_display_path(&unique_filename, None)
        );
        return Ok((filepath, unique_filename, true));
    }

    // File was newly created, determine processing path
    let filepath = match upload_storage {
        UploadStorage::Local { .. } => {
            // For local storage: file already saved, use that path for processing
            log_feature!(
                LogFeature::Ingestion,
                info,
                "File saved to local storage: {} at {}",
                unique_filename,
                upload_storage.get_display_path(&unique_filename, None)
            );
            storage_path
        }
        #[cfg(feature = "aws-backend")]
        UploadStorage::S3 { .. } => {
            // For S3 storage: file is in S3, but file_to_json needs local file
            // Write to /tmp for processing
            let temp_path = std::env::temp_dir().join(&unique_filename);
            if let Err(e) = fs::write(&temp_path, &file_data).await {
                log_feature!(
                    LogFeature::Ingestion,
                    error,
                    "Failed to write S3-uploaded file to /tmp: {}",
                    e
                );
                return Err(HttpResponse::InternalServerError().json(json!({
                    "success": false,
                    "error": format!("Failed to write file to temp directory: {}", e)
                })));
            }
            log_feature!(
                LogFeature::Ingestion,
                info,
                "File saved to S3: {} and copied to temp for processing",
                upload_storage.get_display_path(&unique_filename, None)
            );
            temp_path
        }
    };

    log_feature!(
        LogFeature::Ingestion,
        info,
        "File ready for processing (new upload): {}",
        unique_filename
    );

    Ok((filepath, unique_filename, false))
}

/// Parse multipart field as boolean
async fn parse_field_as_bool(field: &mut actix_multipart::Field) -> Option<bool> {
    let mut bytes = Vec::new();
    while let Some(chunk) = field.next().await {
        if let Ok(data) = chunk {
            bytes.extend_from_slice(&data);
        }
    }
    String::from_utf8(bytes).ok()?.parse().ok()
}

/// Parse multipart field as u32
async fn parse_field_as_u32(field: &mut actix_multipart::Field) -> Option<u32> {
    let mut bytes = Vec::new();
    while let Some(chunk) = field.next().await {
        if let Ok(data) = chunk {
            bytes.extend_from_slice(&data);
        }
    }
    String::from_utf8(bytes).ok()?.parse().ok()
}

/// Parse multipart field as string
async fn parse_field_as_string(field: &mut actix_multipart::Field) -> Option<String> {
    let mut bytes = Vec::new();
    while let Some(chunk) = field.next().await {
        if let Ok(data) = chunk {
            bytes.extend_from_slice(&data);
        }
    }
    String::from_utf8(bytes).ok()
}

/// Handle S3 file path input
/// Downloads file from S3 to /tmp for processing
/// Returns (local_path, filename)
#[cfg(feature = "aws-backend")]
async fn handle_s3_file_path(
    s3_path: &str,
    upload_storage: &UploadStorage,
) -> Result<(PathBuf, String), HttpResponse> {
    // Parse S3 path (format: s3://bucket/key or s3://bucket/prefix/key)
    if !s3_path.starts_with("s3://") {
        log_feature!(
            LogFeature::Ingestion,
            error,
            "Invalid S3 path format: {}",
            s3_path
        );
        return Err(HttpResponse::BadRequest().json(json!({
            "success": false,
            "error": format!("Invalid S3 path format. Expected 's3://bucket/key', got: {}", s3_path)
        })));
    }

    let path_without_prefix = &s3_path[5..]; // Remove "s3://"
    let parts: Vec<&str> = path_without_prefix.splitn(2, '/').collect();

    if parts.len() != 2 {
        log_feature!(
            LogFeature::Ingestion,
            error,
            "Invalid S3 path structure: {}",
            s3_path
        );
        return Err(HttpResponse::BadRequest().json(json!({
            "success": false,
            "error": format!("Invalid S3 path. Expected 's3://bucket/key', got: {}", s3_path)
        })));
    }

    let bucket = parts[0];
    let key = parts[1];

    // Extract filename from key (last part of the path)
    let filename = key.rsplit('/').next().unwrap_or(key).to_string();

    log_feature!(
        LogFeature::Ingestion,
        info,
        "Downloading S3 file: bucket={}, key={}, filename={}",
        bucket,
        key,
        filename
    );

    // Download file from S3
    let file_data = match upload_storage.download_from_s3_path(bucket, key).await {
        Ok(data) => data,
        Err(e) => {
            log_feature!(
                LogFeature::Ingestion,
                error,
                "Failed to download S3 file: {}",
                e
            );
            return Err(HttpResponse::InternalServerError().json(json!({
                "success": false,
                "error": format!("Failed to download S3 file: {}", e)
            })));
        }
    };

    // Save to /tmp for processing (file_to_json needs local file)
    let temp_path = std::env::temp_dir().join(&filename);
    if let Err(e) = fs::write(&temp_path, &file_data).await {
        log_feature!(
            LogFeature::Ingestion,
            error,
            "Failed to write S3 file to /tmp: {}",
            e
        );
        return Err(HttpResponse::InternalServerError().json(json!({
            "success": false,
            "error": format!("Failed to write file to temp directory: {}", e)
        })));
    }

    log_feature!(
        LogFeature::Ingestion,
        info,
        "S3 file downloaded to /tmp for processing: {:?}",
        temp_path
    );

    Ok((temp_path, filename))
}

#[cfg(test)]
mod tests {
    use sha2::{Digest, Sha256};

    #[test]
    fn test_unique_filename_format() {
        // Verify the unique filename format matches HASH_originalname pattern
        let test_content = b"test file content";
        let mut hasher = Sha256::new();
        hasher.update(test_content);
        let hash_result = hasher.finalize();
        let hash_hex = format!("{:x}", hash_result);
        let short_hash = &hash_hex[..16];

        let original = "tweets.js";
        let unique = format!("{}_{}", short_hash, original);

        // Verify format
        assert!(unique.contains('_'));
        assert!(unique.ends_with("tweets.js"));

        // Verify we can extract the original name if needed
        let parts: Vec<&str> = unique.splitn(2, '_').collect();
        assert_eq!(parts.len(), 2);
        assert_eq!(parts[0].len(), 16); // Short hash is 16 chars
        assert_eq!(parts[1], original);
    }

    #[test]
    fn test_hash_consistency() {
        // Same content should produce same hash
        let content = b"identical content";

        let mut hasher1 = Sha256::new();
        hasher1.update(content);
        let hash1 = format!("{:x}", hasher1.finalize());

        let mut hasher2 = Sha256::new();
        hasher2.update(content);
        let hash2 = format!("{:x}", hasher2.finalize());

        assert_eq!(hash1, hash2);
    }

    #[test]
    fn test_hash_uniqueness() {
        // Different content should produce different hashes
        let content1 = b"content one";
        let content2 = b"content two";

        let mut hasher1 = Sha256::new();
        hasher1.update(content1);
        let hash1 = format!("{:x}", hasher1.finalize());

        let mut hasher2 = Sha256::new();
        hasher2.update(content2);
        let hash2 = format!("{:x}", hasher2.finalize());

        assert_ne!(hash1, hash2);
    }
}