1use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
8use chrono::Utc;
9use rusqlite::{params, Connection};
10use serde::{Deserialize, Serialize};
11use sha2::{Digest, Sha256};
12use std::path::PathBuf;
13
14#[cfg(any(feature = "multimodal", feature = "cloud"))]
16use reqwest;
17
18use crate::error::{EngramError, Result};
19
20#[derive(Debug, Clone)]
22pub struct ImageStorageConfig {
23 pub local_dir: PathBuf,
25 pub s3_bucket: Option<String>,
27 pub s3_endpoint: Option<String>,
29 pub public_domain: Option<String>,
31}
32
33impl Default for ImageStorageConfig {
34 fn default() -> Self {
35 let local_dir = dirs::data_local_dir()
36 .unwrap_or_else(|| PathBuf::from("."))
37 .join("engram")
38 .join("images");
39 Self {
40 local_dir,
41 s3_bucket: None,
42 s3_endpoint: None,
43 public_domain: None,
44 }
45 }
46}
47
48#[derive(Debug, Clone, Serialize, Deserialize)]
50pub struct UploadedImage {
51 pub key: String,
53 pub url: String,
55 pub filename: Option<String>,
57 pub content_type: String,
59 pub size: usize,
61 pub hash: String,
63}
64
65#[derive(Debug, Clone, Serialize, Deserialize)]
67pub struct ImageRef {
68 pub url: String,
70 pub caption: Option<String>,
72 pub index: i32,
74 pub content_type: String,
76 pub size: usize,
78}
79
80#[derive(Debug, Clone, Serialize, Deserialize)]
82pub struct MigrationResult {
83 pub memories_scanned: i64,
84 pub memories_with_images: i64,
85 pub images_migrated: i64,
86 pub images_failed: i64,
87 pub errors: Vec<String>,
88 pub dry_run: bool,
89}
90
91#[derive(Debug, Clone, Serialize, Deserialize, Default)]
93pub struct MediaSyncReport {
94 pub assets_examined: i64,
96 pub assets_already_synced: i64,
98 pub assets_uploaded: i64,
100 pub assets_failed: i64,
102 pub errors: Vec<String>,
104 pub dry_run: bool,
106}
107
108fn compute_hash(data: &[u8]) -> String {
110 let mut hasher = Sha256::new();
111 hasher.update(data);
112 format!("{:x}", hasher.finalize())
113}
114
115fn extension_from_content_type(content_type: &str) -> &str {
117 match content_type {
118 "image/jpeg" => "jpg",
119 "image/png" => "png",
120 "image/gif" => "gif",
121 "image/webp" => "webp",
122 "image/svg+xml" => "svg",
123 "image/bmp" => "bmp",
124 "image/tiff" => "tiff",
125 _ => "bin",
126 }
127}
128
129fn content_type_from_extension(ext: &str) -> &str {
131 match ext.to_lowercase().as_str() {
132 "jpg" | "jpeg" => "image/jpeg",
133 "png" => "image/png",
134 "gif" => "image/gif",
135 "webp" => "image/webp",
136 "svg" => "image/svg+xml",
137 "bmp" => "image/bmp",
138 "tiff" | "tif" => "image/tiff",
139 _ => "application/octet-stream",
140 }
141}
142
143pub fn parse_data_uri(data_uri: &str) -> Result<(Vec<u8>, String)> {
145 if !data_uri.starts_with("data:") {
147 return Err(EngramError::InvalidInput("Not a data URI".to_string()));
148 }
149
150 let rest = &data_uri[5..];
151 let (content_type, data) = if let Some(semicolon_pos) = rest.find(';') {
152 let ct = &rest[..semicolon_pos];
153 let after_semicolon = &rest[semicolon_pos + 1..];
154
155 if let Some(stripped) = after_semicolon.strip_prefix("base64,") {
156 (ct.to_string(), stripped)
157 } else {
158 return Err(EngramError::InvalidInput(
159 "Invalid data URI encoding".to_string(),
160 ));
161 }
162 } else {
163 return Err(EngramError::InvalidInput(
164 "Invalid data URI format".to_string(),
165 ));
166 };
167
168 let bytes = BASE64
169 .decode(data)
170 .map_err(|e| EngramError::InvalidInput(format!("Failed to decode base64: {}", e)))?;
171
172 Ok((bytes, content_type))
173}
174
175pub struct LocalImageStorage {
177 base_dir: PathBuf,
178}
179
180impl LocalImageStorage {
181 pub fn new(base_dir: PathBuf) -> Result<Self> {
182 std::fs::create_dir_all(&base_dir)
183 .map_err(|e| EngramError::Storage(format!("Failed to create image dir: {}", e)))?;
184 Ok(Self { base_dir })
185 }
186
187 fn generate_key(
189 &self,
190 memory_id: i64,
191 image_index: i32,
192 hash: &str,
193 extension: &str,
194 ) -> String {
195 let timestamp = Utc::now().timestamp();
196 let short_hash = &hash[..8.min(hash.len())];
197 format!(
198 "images/{}/{}_{}_{}.{}",
199 memory_id, timestamp, image_index, short_hash, extension
200 )
201 }
202
203 pub fn upload_image(
205 &self,
206 image_data: &[u8],
207 content_type: &str,
208 memory_id: i64,
209 image_index: i32,
210 ) -> Result<UploadedImage> {
211 let hash = compute_hash(image_data);
212 let extension = extension_from_content_type(content_type);
213 let key = self.generate_key(memory_id, image_index, &hash, extension);
214
215 let full_path = self.base_dir.join(&key);
217 if let Some(parent) = full_path.parent() {
218 std::fs::create_dir_all(parent)
219 .map_err(|e| EngramError::Storage(format!("Failed to create dir: {}", e)))?;
220 }
221
222 std::fs::write(&full_path, image_data)
224 .map_err(|e| EngramError::Storage(format!("Failed to write image: {}", e)))?;
225
226 let url = format!("local://{}", key);
227
228 Ok(UploadedImage {
229 key,
230 url,
231 filename: None,
232 content_type: content_type.to_string(),
233 size: image_data.len(),
234 hash,
235 })
236 }
237
238 pub fn upload_from_file(
240 &self,
241 file_path: &str,
242 memory_id: i64,
243 image_index: i32,
244 ) -> Result<UploadedImage> {
245 let path = std::path::Path::new(file_path);
246
247 let image_data = std::fs::read(path)
249 .map_err(|e| EngramError::Storage(format!("Failed to read file: {}", e)))?;
250
251 let extension = path.extension().and_then(|e| e.to_str()).unwrap_or("bin");
253 let content_type = content_type_from_extension(extension);
254
255 let mut result = self.upload_image(&image_data, content_type, memory_id, image_index)?;
256 result.filename = path.file_name().and_then(|n| n.to_str()).map(String::from);
257
258 Ok(result)
259 }
260
261 pub fn get_path(&self, key: &str) -> PathBuf {
263 self.base_dir.join(key)
264 }
265
266 pub fn delete_image(&self, key: &str) -> Result<bool> {
268 let path = self.get_path(key);
269 if path.exists() {
270 std::fs::remove_file(&path)
271 .map_err(|e| EngramError::Storage(format!("Failed to delete image: {}", e)))?;
272 Ok(true)
273 } else {
274 Ok(false)
275 }
276 }
277
278 pub fn delete_memory_images(&self, memory_id: i64) -> Result<i64> {
280 let dir = self.base_dir.join("images").join(memory_id.to_string());
281 if !dir.exists() {
282 return Ok(0);
283 }
284
285 let mut count = 0;
286 for entry in std::fs::read_dir(&dir)
287 .map_err(|e| EngramError::Storage(format!("Failed to read dir: {}", e)))?
288 {
289 let entry =
290 entry.map_err(|e| EngramError::Storage(format!("Failed to read entry: {}", e)))?;
291 if entry.path().is_file() {
292 std::fs::remove_file(entry.path())
293 .map_err(|e| EngramError::Storage(format!("Failed to delete file: {}", e)))?;
294 count += 1;
295 }
296 }
297
298 let _ = std::fs::remove_dir(&dir);
300
301 Ok(count)
302 }
303}
304
305pub fn upload_image(
307 conn: &Connection,
308 storage: &LocalImageStorage,
309 memory_id: i64,
310 file_path: &str,
311 image_index: i32,
312 caption: Option<&str>,
313) -> Result<ImageRef> {
314 use crate::storage::queries::get_memory;
315
316 let memory = get_memory(conn, memory_id)?;
318
319 let uploaded = storage.upload_from_file(file_path, memory_id, image_index)?;
321
322 let image_ref = ImageRef {
324 url: uploaded.url.clone(),
325 caption: caption.map(String::from),
326 index: image_index,
327 content_type: uploaded.content_type,
328 size: uploaded.size,
329 };
330
331 let mut metadata = memory.metadata.clone();
333 let images: Vec<ImageRef> = metadata
334 .get("images")
335 .and_then(|v| serde_json::from_value(v.clone()).ok())
336 .unwrap_or_default();
337
338 let mut images: Vec<ImageRef> = images
339 .into_iter()
340 .filter(|i| i.index != image_index)
341 .collect();
342 images.push(image_ref.clone());
343 images.sort_by_key(|i| i.index);
344
345 metadata.insert("images".to_string(), serde_json::to_value(&images)?);
346 let metadata_json = serde_json::to_string(&metadata)?;
347
348 conn.execute(
349 "UPDATE memories SET metadata = ?, updated_at = ? WHERE id = ?",
350 params![metadata_json, Utc::now().to_rfc3339(), memory_id],
351 )?;
352
353 Ok(image_ref)
354}
355
356pub fn migrate_images(
358 conn: &Connection,
359 storage: &LocalImageStorage,
360 dry_run: bool,
361) -> Result<MigrationResult> {
362 use crate::storage::queries::get_memory;
363
364 let mut result = MigrationResult {
365 memories_scanned: 0,
366 memories_with_images: 0,
367 images_migrated: 0,
368 images_failed: 0,
369 errors: Vec::new(),
370 dry_run,
371 };
372
373 let mut stmt = conn.prepare("SELECT id, metadata FROM memories WHERE valid_to IS NULL")?;
375
376 let memory_ids: Vec<i64> = stmt
377 .query_map([], |row| row.get(0))?
378 .filter_map(|r| r.ok())
379 .collect();
380
381 for memory_id in memory_ids {
382 result.memories_scanned += 1;
383
384 let memory = match get_memory(conn, memory_id) {
385 Ok(m) => m,
386 Err(e) => {
387 result
388 .errors
389 .push(format!("Failed to get memory {}: {}", memory_id, e));
390 continue;
391 }
392 };
393
394 let images: Vec<serde_json::Value> = memory
396 .metadata
397 .get("images")
398 .and_then(|v| v.as_array())
399 .cloned()
400 .unwrap_or_default();
401
402 let content_has_data_uri = memory.content.contains("data:image/");
404
405 if images.is_empty() && !content_has_data_uri {
406 continue;
407 }
408
409 result.memories_with_images += 1;
410
411 let mut new_images: Vec<ImageRef> = Vec::new();
413 let mut image_index = 0;
414
415 for img in images {
416 let url = img.get("url").and_then(|v| v.as_str()).unwrap_or("");
417
418 if !url.starts_with("data:") {
420 if let Ok(existing) = serde_json::from_value::<ImageRef>(img.clone()) {
421 new_images.push(existing);
422 }
423 continue;
424 }
425
426 match parse_data_uri(url) {
428 Ok((data, content_type)) => {
429 if dry_run {
430 result.images_migrated += 1;
431 if let Ok(existing) = serde_json::from_value::<ImageRef>(img.clone()) {
433 new_images.push(existing);
434 }
435 } else {
436 match storage.upload_image(&data, &content_type, memory_id, image_index) {
437 Ok(uploaded) => {
438 let caption = img
439 .get("caption")
440 .and_then(|v| v.as_str())
441 .map(String::from);
442 new_images.push(ImageRef {
443 url: uploaded.url,
444 caption,
445 index: image_index,
446 content_type: uploaded.content_type,
447 size: uploaded.size,
448 });
449 result.images_migrated += 1;
450 }
451 Err(e) => {
452 result.images_failed += 1;
453 result.errors.push(format!(
454 "Failed to upload image {} for memory {}: {}",
455 image_index, memory_id, e
456 ));
457 if let Ok(existing) =
459 serde_json::from_value::<ImageRef>(img.clone())
460 {
461 new_images.push(existing);
462 }
463 }
464 }
465 }
466 }
467 Err(e) => {
468 result.images_failed += 1;
469 result.errors.push(format!(
470 "Failed to parse data URI for memory {}: {}",
471 memory_id, e
472 ));
473 if let Ok(existing) = serde_json::from_value::<ImageRef>(img.clone()) {
475 new_images.push(existing);
476 }
477 }
478 }
479 image_index += 1;
480 }
481
482 if !dry_run && !new_images.is_empty() {
484 let mut metadata = memory.metadata.clone();
485 metadata.insert("images".to_string(), serde_json::to_value(&new_images)?);
486 let metadata_json = serde_json::to_string(&metadata)?;
487
488 if let Err(e) = conn.execute(
489 "UPDATE memories SET metadata = ?, updated_at = ? WHERE id = ?",
490 params![metadata_json, Utc::now().to_rfc3339(), memory_id],
491 ) {
492 result
493 .errors
494 .push(format!("Failed to update memory {}: {}", memory_id, e));
495 }
496 }
497 }
498
499 Ok(result)
500}
501
502pub fn build_cloud_key(memory_id: i64, file_hash: &str, mime_type: &str) -> String {
508 let ext = extension_from_content_type(mime_type);
509 let short_hash = &file_hash[..file_hash.len().min(16)];
511 format!("media/{}/{}.{}", memory_id, short_hash, ext)
512}
513
514pub fn build_cloud_url(
516 s3_bucket: &str,
517 s3_endpoint: Option<&str>,
518 public_domain: Option<&str>,
519 key: &str,
520) -> String {
521 if let Some(domain) = public_domain {
522 format!("https://{}/{}", domain.trim_end_matches('/'), key)
523 } else if let Some(endpoint) = s3_endpoint {
524 format!(
525 "{}/{}/{}",
526 endpoint.trim_end_matches('/'),
527 s3_bucket,
528 key
529 )
530 } else {
531 format!("https://{}.s3.amazonaws.com/{}", s3_bucket, key)
532 }
533}
534
535pub fn is_cloud_url(file_path: &str) -> bool {
537 file_path.starts_with("https://")
538 || file_path.starts_with("http://")
539 || file_path.starts_with("s3://")
540 || file_path.starts_with("r2://")
541}
542
543#[cfg(feature = "cloud")]
558pub fn sync_to_cloud(
559 conn: &Connection,
560 config: &ImageStorageConfig,
561 dry_run: bool,
562) -> crate::error::Result<MediaSyncReport> {
563 let bucket = match &config.s3_bucket {
564 Some(b) => b.clone(),
565 None => {
566 return Err(crate::error::EngramError::Config(
567 "s3_bucket must be configured for cloud media sync".to_string(),
568 ));
569 }
570 };
571
572 let mut report = MediaSyncReport {
573 dry_run,
574 ..Default::default()
575 };
576
577 let mut stmt = conn.prepare(
579 "SELECT id, memory_id, file_hash, file_path, mime_type FROM media_assets",
580 )?;
581
582 struct AssetRow {
583 id: i64,
584 memory_id: i64,
585 file_hash: String,
586 file_path: Option<String>,
587 mime_type: Option<String>,
588 }
589
590 let assets: Vec<AssetRow> = stmt
591 .query_map([], |row| {
592 Ok(AssetRow {
593 id: row.get(0)?,
594 memory_id: row.get(1)?,
595 file_hash: row.get(2)?,
596 file_path: row.get(3)?,
597 mime_type: row.get(4)?,
598 })
599 })?
600 .filter_map(|r| r.ok())
601 .collect();
602
603 report.assets_examined = assets.len() as i64;
604
605 for asset in assets {
606 let file_path = match &asset.file_path {
607 Some(p) => p.clone(),
608 None => {
609 report.assets_failed += 1;
610 report
611 .errors
612 .push(format!("Asset id={} has no file_path", asset.id));
613 continue;
614 }
615 };
616
617 if is_cloud_url(&file_path) {
619 report.assets_already_synced += 1;
620 continue;
621 }
622
623 let mime_type = asset.mime_type.as_deref().unwrap_or("application/octet-stream");
624 let cloud_key = build_cloud_key(asset.memory_id, &asset.file_hash, mime_type);
625 let cloud_url = build_cloud_url(
626 &bucket,
627 config.s3_endpoint.as_deref(),
628 config.public_domain.as_deref(),
629 &cloud_key,
630 );
631
632 if dry_run {
633 report.assets_uploaded += 1;
635 continue;
636 }
637
638 let local_path = file_path
641 .strip_prefix("local://")
642 .unwrap_or(&file_path);
643
644 let file_data = match std::fs::read(local_path) {
645 Ok(d) => d,
646 Err(e) => {
647 report.assets_failed += 1;
648 report.errors.push(format!(
649 "Failed to read '{}' for asset id={}: {}",
650 local_path, asset.id, e
651 ));
652 continue;
653 }
654 };
655
656 match upload_bytes_to_s3_blocking(&file_data, &bucket, &cloud_key, mime_type, config) {
658 Ok(()) => {
659 conn.execute(
661 "UPDATE media_assets SET file_path = ? WHERE id = ?",
662 rusqlite::params![cloud_url, asset.id],
663 )?;
664 report.assets_uploaded += 1;
665 }
666 Err(e) => {
667 report.assets_failed += 1;
668 report.errors.push(format!(
669 "Failed to upload asset id={}: {}",
670 asset.id, e
671 ));
672 }
673 }
674 }
675
676 Ok(report)
677}
678
679pub fn download_from_cloud(file_url: &str) -> crate::error::Result<Vec<u8>> {
683 if file_url.starts_with("local://") {
684 let path = file_url.strip_prefix("local://").unwrap_or(file_url);
685 return std::fs::read(path).map_err(|e| {
686 crate::error::EngramError::Storage(format!(
687 "Failed to read local media file '{}': {}",
688 path, e
689 ))
690 });
691 }
692
693 if file_url.starts_with("https://") || file_url.starts_with("http://") {
694 #[cfg(any(feature = "cloud", feature = "multimodal"))]
696 {
697 let url = file_url.to_string();
698 let rt = tokio::runtime::Runtime::new().map_err(|e| {
699 crate::error::EngramError::Storage(format!(
700 "Failed to create async runtime for download: {}",
701 e
702 ))
703 })?;
704 return rt.block_on(async {
705 let client = reqwest::Client::new();
706 let response = client.get(&url).send().await.map_err(|e| {
707 crate::error::EngramError::Storage(format!(
708 "Failed to download '{}': {}",
709 url, e
710 ))
711 })?;
712 if !response.status().is_success() {
713 return Err(crate::error::EngramError::Storage(format!(
714 "HTTP {} downloading '{}'",
715 response.status(),
716 url
717 )));
718 }
719 response.bytes().await.map(|b| b.to_vec()).map_err(|e| {
720 crate::error::EngramError::Storage(format!(
721 "Failed to read response body from '{}': {}",
722 url, e
723 ))
724 })
725 });
726 }
727 #[cfg(not(any(feature = "cloud", feature = "multimodal")))]
728 {
729 return Err(crate::error::EngramError::Config(
730 "Downloading from cloud URLs requires the 'cloud' or 'multimodal' feature".to_string(),
731 ));
732 }
733 }
734
735 Err(crate::error::EngramError::InvalidInput(format!(
736 "Unsupported media URL scheme: '{}'",
737 file_url
738 )))
739}
740
741#[cfg(feature = "cloud")]
743fn upload_bytes_to_s3_blocking(
744 data: &[u8],
745 bucket: &str,
746 key: &str,
747 content_type: &str,
748 _config: &ImageStorageConfig,
749) -> crate::error::Result<()> {
750 let rt = tokio::runtime::Runtime::new().map_err(|e| {
752 crate::error::EngramError::Storage(format!(
753 "Failed to create async runtime for S3 upload: {}",
754 e
755 ))
756 })?;
757
758 rt.block_on(async {
759 let sdk_config = aws_config::load_defaults(aws_config::BehaviorVersion::latest()).await;
760 let client = aws_sdk_s3::Client::new(&sdk_config);
761
762 let body = aws_sdk_s3::primitives::ByteStream::from(data.to_vec());
763 client
764 .put_object()
765 .bucket(bucket)
766 .key(key)
767 .content_type(content_type)
768 .body(body)
769 .send()
770 .await
771 .map_err(|e| {
772 crate::error::EngramError::Storage(format!(
773 "S3 PutObject failed for key '{}': {}",
774 key, e
775 ))
776 })?;
777
778 Ok(())
779 })
780}
781
782#[cfg(test)]
783mod tests {
784 use super::*;
785 use tempfile::tempdir;
786
787 #[test]
788 fn test_parse_data_uri() {
789 let data_uri = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==";
790 let (bytes, content_type) = parse_data_uri(data_uri).unwrap();
791 assert_eq!(content_type, "image/png");
792 assert!(!bytes.is_empty());
793 }
794
795 #[test]
796 fn test_local_storage() {
797 let dir = tempdir().unwrap();
798 let storage = LocalImageStorage::new(dir.path().to_path_buf()).unwrap();
799
800 let png_data = [0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A];
802
803 let result = storage.upload_image(&png_data, "image/png", 1, 0).unwrap();
804 assert!(result.url.starts_with("local://"));
805 assert_eq!(result.content_type, "image/png");
806 assert_eq!(result.size, png_data.len());
807
808 let path = storage.get_path(&result.key);
810 assert!(path.exists());
811
812 assert!(storage.delete_image(&result.key).unwrap());
814 assert!(!path.exists());
815 }
816
817 #[test]
818 fn test_content_type_detection() {
819 assert_eq!(content_type_from_extension("jpg"), "image/jpeg");
820 assert_eq!(content_type_from_extension("PNG"), "image/png");
821 assert_eq!(content_type_from_extension("webp"), "image/webp");
822 }
823
824 #[test]
827 fn test_build_cloud_key_image() {
828 let key = build_cloud_key(42, "abcdef1234567890", "image/png");
829 assert_eq!(key, "media/42/abcdef1234567890.png");
830 }
831
832 #[test]
833 fn test_build_cloud_key_audio() {
834 let key = build_cloud_key(7, "feedbeef12345678", "audio/mpeg");
835 assert_eq!(key, "media/7/feedbeef12345678.bin");
836 }
837
838 #[test]
839 fn test_build_cloud_url_with_public_domain() {
840 let url = build_cloud_url(
841 "my-bucket",
842 None,
843 Some("media.example.com"),
844 "media/42/abc.png",
845 );
846 assert_eq!(url, "https://media.example.com/media/42/abc.png");
847 }
848
849 #[test]
850 fn test_build_cloud_url_with_s3_endpoint() {
851 let url = build_cloud_url(
852 "my-bucket",
853 Some("https://r2.example.com"),
854 None,
855 "media/42/abc.png",
856 );
857 assert_eq!(url, "https://r2.example.com/my-bucket/media/42/abc.png");
858 }
859
860 #[test]
861 fn test_build_cloud_url_default_s3() {
862 let url = build_cloud_url("my-bucket", None, None, "media/42/abc.png");
863 assert_eq!(
864 url,
865 "https://my-bucket.s3.amazonaws.com/media/42/abc.png"
866 );
867 }
868
869 #[test]
870 fn test_is_cloud_url() {
871 assert!(is_cloud_url("https://cdn.example.com/file.png"));
872 assert!(is_cloud_url("http://cdn.example.com/file.png"));
873 assert!(is_cloud_url("s3://my-bucket/file.png"));
874 assert!(is_cloud_url("r2://my-bucket/file.png"));
875 assert!(!is_cloud_url("local:///tmp/file.png"));
876 assert!(!is_cloud_url("/tmp/file.png"));
877 }
878
879 #[cfg(feature = "cloud")]
880 #[test]
881 fn test_sync_to_cloud_no_bucket_returns_error() {
882 use crate::storage::migrations::run_migrations;
883
884 let conn = rusqlite::Connection::open_in_memory().expect("in-memory db");
885 run_migrations(&conn).expect("migrations");
886
887 let config = ImageStorageConfig {
888 local_dir: std::path::PathBuf::from("/tmp"),
889 s3_bucket: None, s3_endpoint: None,
891 public_domain: None,
892 };
893
894 let result = sync_to_cloud(&conn, &config, true);
895 assert!(result.is_err(), "should fail without bucket configured");
896 }
897
898 #[cfg(feature = "cloud")]
899 #[test]
900 fn test_sync_to_cloud_empty_table_dry_run() {
901 use crate::storage::migrations::run_migrations;
902
903 let conn = rusqlite::Connection::open_in_memory().expect("in-memory db");
904 run_migrations(&conn).expect("migrations");
905
906 let config = ImageStorageConfig {
907 local_dir: std::path::PathBuf::from("/tmp"),
908 s3_bucket: Some("test-bucket".to_string()),
909 s3_endpoint: None,
910 public_domain: None,
911 };
912
913 let report = sync_to_cloud(&conn, &config, true).expect("sync report");
914 assert_eq!(report.assets_examined, 0);
915 assert_eq!(report.assets_uploaded, 0);
916 assert_eq!(report.assets_failed, 0);
917 assert!(report.dry_run);
918 }
919}