1use anyhow::{Context, Result};
2use async_trait::async_trait;
3use heed::{Database, EnvOpenOptions};
4use heed::types::*;
5use hashtree_fs::FsBlobStore;
6#[cfg(feature = "lmdb")]
7use hashtree_lmdb::LmdbBlobStore;
8use hashtree_core::{
9 HashTree, HashTreeConfig, Cid,
10 sha256, to_hex, from_hex, TreeNode, DirEntry as HashTreeDirEntry,
11 types::Hash,
12};
13use hashtree_core::store::{Store, StoreError};
14use hashtree_config::StorageBackend;
15use serde::{Deserialize, Serialize};
16use std::path::Path;
17use std::collections::HashSet;
18use std::io::Read;
19use std::sync::Arc;
20use std::time::{SystemTime, UNIX_EPOCH};
21use futures::executor::block_on as sync_block_on;
22
23pub const PRIORITY_OTHER: u8 = 64;
25pub const PRIORITY_FOLLOWED: u8 = 128;
26pub const PRIORITY_OWN: u8 = 255;
27
28#[derive(Debug, Clone, Serialize, Deserialize)]
30pub struct TreeMeta {
31 pub owner: String,
33 pub name: Option<String>,
35 pub synced_at: u64,
37 pub total_size: u64,
39 pub priority: u8,
41}
42
43#[derive(Debug, Clone, Serialize, Deserialize)]
45pub struct CachedRoot {
46 pub hash: String,
48 pub key: Option<String>,
50 pub updated_at: u64,
52 pub visibility: String,
54}
55
56#[derive(Debug, Clone)]
58pub struct LocalStoreStats {
59 pub count: usize,
60 pub total_bytes: u64,
61}
62
63pub enum LocalStore {
65 Fs(FsBlobStore),
66 #[cfg(feature = "lmdb")]
67 Lmdb(LmdbBlobStore),
68}
69
70impl LocalStore {
71 pub fn new<P: AsRef<Path>>(path: P, backend: &StorageBackend) -> Result<Self, StoreError> {
73 match backend {
74 StorageBackend::Fs => {
75 Ok(LocalStore::Fs(FsBlobStore::new(path)?))
76 }
77 #[cfg(feature = "lmdb")]
78 StorageBackend::Lmdb => {
79 Ok(LocalStore::Lmdb(LmdbBlobStore::new(path)?))
80 }
81 #[cfg(not(feature = "lmdb"))]
82 StorageBackend::Lmdb => {
83 tracing::warn!("LMDB backend requested but lmdb feature not enabled, using filesystem storage");
84 Ok(LocalStore::Fs(FsBlobStore::new(path)?))
85 }
86 }
87 }
88
89 pub fn put_sync(&self, hash: Hash, data: &[u8]) -> Result<bool, StoreError> {
91 match self {
92 LocalStore::Fs(store) => store.put_sync(hash, data),
93 #[cfg(feature = "lmdb")]
94 LocalStore::Lmdb(store) => store.put_sync(hash, data),
95 }
96 }
97
98 pub fn get_sync(&self, hash: &Hash) -> Result<Option<Vec<u8>>, StoreError> {
100 match self {
101 LocalStore::Fs(store) => store.get_sync(hash),
102 #[cfg(feature = "lmdb")]
103 LocalStore::Lmdb(store) => store.get_sync(hash),
104 }
105 }
106
107 pub fn exists(&self, hash: &Hash) -> Result<bool, StoreError> {
109 match self {
110 LocalStore::Fs(store) => Ok(store.exists(hash)),
111 #[cfg(feature = "lmdb")]
112 LocalStore::Lmdb(store) => store.exists(hash),
113 }
114 }
115
116 pub fn delete_sync(&self, hash: &Hash) -> Result<bool, StoreError> {
118 match self {
119 LocalStore::Fs(store) => store.delete_sync(hash),
120 #[cfg(feature = "lmdb")]
121 LocalStore::Lmdb(store) => store.delete_sync(hash),
122 }
123 }
124
125 pub fn stats(&self) -> Result<LocalStoreStats, StoreError> {
127 match self {
128 LocalStore::Fs(store) => {
129 let stats = store.stats()?;
130 Ok(LocalStoreStats {
131 count: stats.count,
132 total_bytes: stats.total_bytes,
133 })
134 }
135 #[cfg(feature = "lmdb")]
136 LocalStore::Lmdb(store) => {
137 let stats = store.stats()?;
138 Ok(LocalStoreStats {
139 count: stats.count,
140 total_bytes: stats.total_bytes,
141 })
142 }
143 }
144 }
145
146 pub fn list(&self) -> Result<Vec<Hash>, StoreError> {
148 match self {
149 LocalStore::Fs(store) => store.list(),
150 #[cfg(feature = "lmdb")]
151 LocalStore::Lmdb(store) => store.list(),
152 }
153 }
154}
155
156#[async_trait]
157impl Store for LocalStore {
158 async fn put(&self, hash: Hash, data: Vec<u8>) -> Result<bool, StoreError> {
159 self.put_sync(hash, &data)
160 }
161
162 async fn get(&self, hash: &Hash) -> Result<Option<Vec<u8>>, StoreError> {
163 self.get_sync(hash)
164 }
165
166 async fn has(&self, hash: &Hash) -> Result<bool, StoreError> {
167 self.exists(hash)
168 }
169
170 async fn delete(&self, hash: &Hash) -> Result<bool, StoreError> {
171 self.delete_sync(hash)
172 }
173}
174
175#[cfg(feature = "s3")]
176use tokio::sync::mpsc;
177
178use crate::config::S3Config;
179
180#[cfg(feature = "s3")]
182enum S3SyncMessage {
183 Upload { hash: Hash, data: Vec<u8> },
184 Delete { hash: Hash },
185}
186
187pub struct StorageRouter {
192 local: Arc<LocalStore>,
194 #[cfg(feature = "s3")]
196 s3_client: Option<aws_sdk_s3::Client>,
197 #[cfg(feature = "s3")]
198 s3_bucket: Option<String>,
199 #[cfg(feature = "s3")]
200 s3_prefix: String,
201 #[cfg(feature = "s3")]
203 sync_tx: Option<mpsc::UnboundedSender<S3SyncMessage>>,
204}
205
206impl StorageRouter {
207 pub fn new(local: Arc<LocalStore>) -> Self {
209 Self {
210 local,
211 #[cfg(feature = "s3")]
212 s3_client: None,
213 #[cfg(feature = "s3")]
214 s3_bucket: None,
215 #[cfg(feature = "s3")]
216 s3_prefix: String::new(),
217 #[cfg(feature = "s3")]
218 sync_tx: None,
219 }
220 }
221
222 #[cfg(feature = "s3")]
224 pub async fn with_s3(local: Arc<LocalStore>, config: &S3Config) -> Result<Self, anyhow::Error> {
225 use aws_sdk_s3::Client as S3Client;
226
227 let mut aws_config_loader = aws_config::from_env();
229 aws_config_loader = aws_config_loader.region(aws_sdk_s3::config::Region::new(config.region.clone()));
230 let aws_config = aws_config_loader.load().await;
231
232 let mut s3_config_builder = aws_sdk_s3::config::Builder::from(&aws_config);
234 s3_config_builder = s3_config_builder
235 .endpoint_url(&config.endpoint)
236 .force_path_style(true);
237
238 let s3_client = S3Client::from_conf(s3_config_builder.build());
239 let bucket = config.bucket.clone();
240 let prefix = config.prefix.clone().unwrap_or_default();
241
242 let (sync_tx, mut sync_rx) = mpsc::unbounded_channel::<S3SyncMessage>();
244
245 let sync_client = s3_client.clone();
247 let sync_bucket = bucket.clone();
248 let sync_prefix = prefix.clone();
249
250 tokio::spawn(async move {
251 use aws_sdk_s3::primitives::ByteStream;
252
253 tracing::info!("S3 background sync task started");
254
255 let semaphore = std::sync::Arc::new(tokio::sync::Semaphore::new(32));
257 let client = std::sync::Arc::new(sync_client);
258 let bucket = std::sync::Arc::new(sync_bucket);
259 let prefix = std::sync::Arc::new(sync_prefix);
260
261 while let Some(msg) = sync_rx.recv().await {
262 let client = client.clone();
263 let bucket = bucket.clone();
264 let prefix = prefix.clone();
265 let semaphore = semaphore.clone();
266
267 tokio::spawn(async move {
269 let _permit = semaphore.acquire().await;
271
272 match msg {
273 S3SyncMessage::Upload { hash, data } => {
274 let key = format!("{}{}.bin", prefix, to_hex(&hash));
275 tracing::debug!("S3 uploading {} ({} bytes)", &key, data.len());
276
277 match client
278 .put_object()
279 .bucket(bucket.as_str())
280 .key(&key)
281 .body(ByteStream::from(data))
282 .send()
283 .await
284 {
285 Ok(_) => tracing::debug!("S3 upload succeeded: {}", &key),
286 Err(e) => tracing::error!("S3 upload failed {}: {}", &key, e),
287 }
288 }
289 S3SyncMessage::Delete { hash } => {
290 let key = format!("{}{}.bin", prefix, to_hex(&hash));
291 tracing::debug!("S3 deleting {}", &key);
292
293 if let Err(e) = client
294 .delete_object()
295 .bucket(bucket.as_str())
296 .key(&key)
297 .send()
298 .await
299 {
300 tracing::error!("S3 delete failed {}: {}", &key, e);
301 }
302 }
303 }
304 });
305 }
306 });
307
308 tracing::info!("S3 storage initialized: bucket={}, prefix={}", bucket, prefix);
309
310 Ok(Self {
311 local,
312 s3_client: Some(s3_client),
313 s3_bucket: Some(bucket),
314 s3_prefix: prefix,
315 sync_tx: Some(sync_tx),
316 })
317 }
318
319 pub fn put_sync(&self, hash: Hash, data: &[u8]) -> Result<bool, StoreError> {
321 let is_new = self.local.put_sync(hash, data)?;
323
324 #[cfg(feature = "s3")]
327 if let Some(ref tx) = self.sync_tx {
328 tracing::info!("Queueing S3 upload for {} ({} bytes, is_new={})",
329 crate::storage::to_hex(&hash)[..16].to_string(), data.len(), is_new);
330 if let Err(e) = tx.send(S3SyncMessage::Upload { hash, data: data.to_vec() }) {
331 tracing::error!("Failed to queue S3 upload: {}", e);
332 }
333 }
334
335 Ok(is_new)
336 }
337
338 pub fn get_sync(&self, hash: &Hash) -> Result<Option<Vec<u8>>, StoreError> {
340 if let Some(data) = self.local.get_sync(hash)? {
342 return Ok(Some(data));
343 }
344
345 #[cfg(feature = "s3")]
347 if let (Some(ref client), Some(ref bucket)) = (&self.s3_client, &self.s3_bucket) {
348 let key = format!("{}{}.bin", self.s3_prefix, to_hex(hash));
349
350 match sync_block_on(async {
351 client.get_object()
352 .bucket(bucket)
353 .key(&key)
354 .send()
355 .await
356 }) {
357 Ok(output) => {
358 if let Ok(body) = sync_block_on(output.body.collect()) {
359 let data = body.into_bytes().to_vec();
360 let _ = self.local.put_sync(*hash, &data);
362 return Ok(Some(data));
363 }
364 }
365 Err(e) => {
366 let service_err = e.into_service_error();
367 if !service_err.is_no_such_key() {
368 tracing::warn!("S3 get failed: {}", service_err);
369 }
370 }
371 }
372 }
373
374 Ok(None)
375 }
376
377 pub fn exists(&self, hash: &Hash) -> Result<bool, StoreError> {
379 if self.local.exists(hash)? {
381 return Ok(true);
382 }
383
384 #[cfg(feature = "s3")]
386 if let (Some(ref client), Some(ref bucket)) = (&self.s3_client, &self.s3_bucket) {
387 let key = format!("{}{}.bin", self.s3_prefix, to_hex(hash));
388
389 match sync_block_on(async {
390 client.head_object()
391 .bucket(bucket)
392 .key(&key)
393 .send()
394 .await
395 }) {
396 Ok(_) => return Ok(true),
397 Err(e) => {
398 let service_err = e.into_service_error();
399 if !service_err.is_not_found() {
400 tracing::warn!("S3 head failed: {}", service_err);
401 }
402 }
403 }
404 }
405
406 Ok(false)
407 }
408
409 pub fn delete_sync(&self, hash: &Hash) -> Result<bool, StoreError> {
411 let deleted = self.local.delete_sync(hash)?;
412
413 #[cfg(feature = "s3")]
415 if let Some(ref tx) = self.sync_tx {
416 let _ = tx.send(S3SyncMessage::Delete { hash: *hash });
417 }
418
419 Ok(deleted)
420 }
421
422 pub fn delete_local_only(&self, hash: &Hash) -> Result<bool, StoreError> {
425 self.local.delete_sync(hash)
426 }
427
428 pub fn stats(&self) -> Result<LocalStoreStats, StoreError> {
430 self.local.stats()
431 }
432
433 pub fn list(&self) -> Result<Vec<Hash>, StoreError> {
435 self.local.list()
436 }
437
438 pub fn local_store(&self) -> Arc<LocalStore> {
440 Arc::clone(&self.local)
441 }
442}
443
444#[async_trait]
447impl Store for StorageRouter {
448 async fn put(&self, hash: Hash, data: Vec<u8>) -> Result<bool, StoreError> {
449 self.put_sync(hash, &data)
450 }
451
452 async fn get(&self, hash: &Hash) -> Result<Option<Vec<u8>>, StoreError> {
453 self.get_sync(hash)
454 }
455
456 async fn has(&self, hash: &Hash) -> Result<bool, StoreError> {
457 self.exists(hash)
458 }
459
460 async fn delete(&self, hash: &Hash) -> Result<bool, StoreError> {
461 self.delete_sync(hash)
462 }
463}
464
465pub struct HashtreeStore {
466 env: heed::Env,
467 pins: Database<Bytes, Unit>,
469 blob_owners: Database<Bytes, Unit>,
471 pubkey_blobs: Database<Bytes, Bytes>,
473 tree_meta: Database<Bytes, Bytes>,
475 blob_trees: Database<Bytes, Unit>,
477 tree_refs: Database<Str, Bytes>,
479 cached_roots: Database<Str, Bytes>,
481 router: Arc<StorageRouter>,
483 max_size_bytes: u64,
485}
486
487impl HashtreeStore {
488 pub fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
490 Self::with_options(path, None, 10 * 1024 * 1024 * 1024)
491 }
492
493 pub fn with_s3<P: AsRef<Path>>(path: P, s3_config: Option<&S3Config>) -> Result<Self> {
495 Self::with_options(path, s3_config, 10 * 1024 * 1024 * 1024)
496 }
497
498 pub fn with_options<P: AsRef<Path>>(path: P, s3_config: Option<&S3Config>, max_size_bytes: u64) -> Result<Self> {
500 let path = path.as_ref();
501 std::fs::create_dir_all(path)?;
502
503 let env = unsafe {
504 EnvOpenOptions::new()
505 .map_size(10 * 1024 * 1024 * 1024) .max_dbs(8) .open(path)?
508 };
509
510 let mut wtxn = env.write_txn()?;
511 let pins = env.create_database(&mut wtxn, Some("pins"))?;
512 let blob_owners = env.create_database(&mut wtxn, Some("blob_owners"))?;
513 let pubkey_blobs = env.create_database(&mut wtxn, Some("pubkey_blobs"))?;
514 let tree_meta = env.create_database(&mut wtxn, Some("tree_meta"))?;
515 let blob_trees = env.create_database(&mut wtxn, Some("blob_trees"))?;
516 let tree_refs = env.create_database(&mut wtxn, Some("tree_refs"))?;
517 let cached_roots = env.create_database(&mut wtxn, Some("cached_roots"))?;
518 wtxn.commit()?;
519
520 let config = hashtree_config::Config::load_or_default();
522 let backend = &config.storage.backend;
523
524 let local_store = Arc::new(LocalStore::new(path.join("blobs"), backend)
526 .map_err(|e| anyhow::anyhow!("Failed to create blob store: {}", e))?);
527
528 #[cfg(feature = "s3")]
530 let router = Arc::new(if let Some(s3_cfg) = s3_config {
531 tracing::info!("Initializing S3 storage backend: bucket={}, endpoint={}",
532 s3_cfg.bucket, s3_cfg.endpoint);
533
534 sync_block_on(async {
535 StorageRouter::with_s3(local_store, s3_cfg).await
536 })?
537 } else {
538 StorageRouter::new(local_store)
539 });
540
541 #[cfg(not(feature = "s3"))]
542 let router = Arc::new({
543 if s3_config.is_some() {
544 tracing::warn!("S3 config provided but S3 feature not enabled. Using local storage only.");
545 }
546 StorageRouter::new(local_store)
547 });
548
549 Ok(Self {
550 env,
551 pins,
552 blob_owners,
553 pubkey_blobs,
554 tree_meta,
555 blob_trees,
556 tree_refs,
557 cached_roots,
558 router,
559 max_size_bytes,
560 })
561 }
562
563 pub fn router(&self) -> &StorageRouter {
565 &self.router
566 }
567
568 pub fn store_arc(&self) -> Arc<StorageRouter> {
571 Arc::clone(&self.router)
572 }
573
574 pub fn upload_file<P: AsRef<Path>>(&self, file_path: P) -> Result<String> {
576 self.upload_file_internal(file_path, true)
577 }
578
579 pub fn upload_file_no_pin<P: AsRef<Path>>(&self, file_path: P) -> Result<String> {
581 self.upload_file_internal(file_path, false)
582 }
583
584 fn upload_file_internal<P: AsRef<Path>>(&self, file_path: P, pin: bool) -> Result<String> {
585 let file_path = file_path.as_ref();
586 let file_content = std::fs::read(file_path)?;
587
588 let store = self.store_arc();
590 let tree = HashTree::new(HashTreeConfig::new(store).public());
591
592 let (cid, _size) = sync_block_on(async {
593 tree.put(&file_content).await
594 }).context("Failed to store file")?;
595
596 if pin {
598 let mut wtxn = self.env.write_txn()?;
599 self.pins.put(&mut wtxn, cid.hash.as_slice(), &())?;
600 wtxn.commit()?;
601 }
602
603 Ok(to_hex(&cid.hash))
604 }
605
606 pub fn upload_file_stream<R: Read, F>(
608 &self,
609 mut reader: R,
610 _file_name: impl Into<String>,
611 mut callback: F,
612 ) -> Result<String>
613 where
614 F: FnMut(&str),
615 {
616 let mut data = Vec::new();
617 reader.read_to_end(&mut data)?;
618
619 let store = self.store_arc();
621 let tree = HashTree::new(HashTreeConfig::new(store).public());
622
623 let (cid, _size) = sync_block_on(async {
624 tree.put(&data).await
625 }).context("Failed to store file")?;
626
627 let root_hex = to_hex(&cid.hash);
628 callback(&root_hex);
629
630 let mut wtxn = self.env.write_txn()?;
632 self.pins.put(&mut wtxn, cid.hash.as_slice(), &())?;
633 wtxn.commit()?;
634
635 Ok(root_hex)
636 }
637
638 pub fn upload_dir<P: AsRef<Path>>(&self, dir_path: P) -> Result<String> {
641 self.upload_dir_with_options(dir_path, true)
642 }
643
644 pub fn upload_dir_with_options<P: AsRef<Path>>(&self, dir_path: P, respect_gitignore: bool) -> Result<String> {
646 let dir_path = dir_path.as_ref();
647
648 let store = self.store_arc();
649 let tree = HashTree::new(HashTreeConfig::new(store).public());
650
651 let root_cid = sync_block_on(async {
652 self.upload_dir_recursive(&tree, dir_path, dir_path, respect_gitignore).await
653 }).context("Failed to upload directory")?;
654
655 let root_hex = to_hex(&root_cid.hash);
656
657 let mut wtxn = self.env.write_txn()?;
658 self.pins.put(&mut wtxn, root_cid.hash.as_slice(), &())?;
659 wtxn.commit()?;
660
661 Ok(root_hex)
662 }
663
664 async fn upload_dir_recursive<S: Store>(
665 &self,
666 tree: &HashTree<S>,
667 _root_path: &Path,
668 current_path: &Path,
669 respect_gitignore: bool,
670 ) -> Result<Cid> {
671 use ignore::WalkBuilder;
672 use std::collections::HashMap;
673
674 let mut dir_contents: HashMap<String, Vec<(String, Cid)>> = HashMap::new();
676 dir_contents.insert(String::new(), Vec::new()); let walker = WalkBuilder::new(current_path)
679 .git_ignore(respect_gitignore)
680 .git_global(respect_gitignore)
681 .git_exclude(respect_gitignore)
682 .hidden(false)
683 .build();
684
685 for result in walker {
686 let entry = result?;
687 let path = entry.path();
688
689 if path == current_path {
691 continue;
692 }
693
694 let relative = path.strip_prefix(current_path)
695 .unwrap_or(path);
696
697 if path.is_file() {
698 let content = std::fs::read(path)?;
699 let (cid, _size) = tree.put(&content).await
700 .map_err(|e| anyhow::anyhow!("Failed to upload file {}: {}", path.display(), e))?;
701
702 let parent = relative.parent()
704 .map(|p| p.to_string_lossy().to_string())
705 .unwrap_or_default();
706 let name = relative.file_name()
707 .map(|n| n.to_string_lossy().to_string())
708 .unwrap_or_default();
709
710 dir_contents.entry(parent).or_default().push((name, cid));
711 } else if path.is_dir() {
712 let dir_path = relative.to_string_lossy().to_string();
714 dir_contents.entry(dir_path).or_default();
715 }
716 }
717
718 self.build_directory_tree(tree, &mut dir_contents).await
720 }
721
722 async fn build_directory_tree<S: Store>(
723 &self,
724 tree: &HashTree<S>,
725 dir_contents: &mut std::collections::HashMap<String, Vec<(String, Cid)>>,
726 ) -> Result<Cid> {
727 let mut dirs: Vec<String> = dir_contents.keys().cloned().collect();
729 dirs.sort_by(|a, b| {
730 let depth_a = a.matches('/').count() + if a.is_empty() { 0 } else { 1 };
731 let depth_b = b.matches('/').count() + if b.is_empty() { 0 } else { 1 };
732 depth_b.cmp(&depth_a) });
734
735 let mut dir_cids: std::collections::HashMap<String, Cid> = std::collections::HashMap::new();
736
737 for dir_path in dirs {
738 let files = dir_contents.get(&dir_path).cloned().unwrap_or_default();
739
740 let mut entries: Vec<HashTreeDirEntry> = files.into_iter()
741 .map(|(name, cid)| HashTreeDirEntry::from_cid(name, &cid))
742 .collect();
743
744 for (subdir_path, cid) in &dir_cids {
746 let parent = std::path::Path::new(subdir_path)
747 .parent()
748 .map(|p| p.to_string_lossy().to_string())
749 .unwrap_or_default();
750
751 if parent == dir_path {
752 let name = std::path::Path::new(subdir_path)
753 .file_name()
754 .map(|n| n.to_string_lossy().to_string())
755 .unwrap_or_default();
756 entries.push(HashTreeDirEntry::from_cid(name, cid));
757 }
758 }
759
760 let cid = tree.put_directory(entries).await
761 .map_err(|e| anyhow::anyhow!("Failed to create directory node: {}", e))?;
762
763 dir_cids.insert(dir_path, cid);
764 }
765
766 dir_cids.get("")
768 .cloned()
769 .ok_or_else(|| anyhow::anyhow!("No root directory"))
770 }
771
772 pub fn upload_file_encrypted<P: AsRef<Path>>(&self, file_path: P) -> Result<String> {
774 let file_path = file_path.as_ref();
775 let file_content = std::fs::read(file_path)?;
776
777 let store = self.store_arc();
779 let tree = HashTree::new(HashTreeConfig::new(store));
780
781 let (cid, _size) = sync_block_on(async {
782 tree.put(&file_content).await
783 }).map_err(|e| anyhow::anyhow!("Failed to encrypt file: {}", e))?;
784
785 let cid_str = cid.to_string();
786
787 let mut wtxn = self.env.write_txn()?;
788 self.pins.put(&mut wtxn, cid.hash.as_slice(), &())?;
789 wtxn.commit()?;
790
791 Ok(cid_str)
792 }
793
794 pub fn upload_dir_encrypted<P: AsRef<Path>>(&self, dir_path: P) -> Result<String> {
797 self.upload_dir_encrypted_with_options(dir_path, true)
798 }
799
800 pub fn upload_dir_encrypted_with_options<P: AsRef<Path>>(&self, dir_path: P, respect_gitignore: bool) -> Result<String> {
803 let dir_path = dir_path.as_ref();
804 let store = self.store_arc();
805
806 let tree = HashTree::new(HashTreeConfig::new(store));
808
809 let root_cid = sync_block_on(async {
810 self.upload_dir_recursive(&tree, dir_path, dir_path, respect_gitignore).await
811 }).context("Failed to upload encrypted directory")?;
812
813 let cid_str = root_cid.to_string(); let mut wtxn = self.env.write_txn()?;
816 self.pins.put(&mut wtxn, root_cid.hash.as_slice(), &())?;
818 wtxn.commit()?;
819
820 Ok(cid_str)
821 }
822
823 pub fn get_tree_node(&self, hash: &[u8; 32]) -> Result<Option<TreeNode>> {
825 let store = self.store_arc();
826 let tree = HashTree::new(HashTreeConfig::new(store).public());
827
828 sync_block_on(async {
829 tree.get_tree_node(hash).await
830 .map_err(|e| anyhow::anyhow!("Failed to get tree node: {}", e))
831 })
832 }
833
834 pub fn put_blob(&self, data: &[u8]) -> Result<String> {
836 let hash = sha256(data);
837 self.router.put_sync(hash, data)
838 .map_err(|e| anyhow::anyhow!("Failed to store blob: {}", e))?;
839 Ok(to_hex(&hash))
840 }
841
842 pub fn get_blob(&self, hash: &[u8; 32]) -> Result<Option<Vec<u8>>> {
844 self.router.get_sync(hash)
845 .map_err(|e| anyhow::anyhow!("Failed to get blob: {}", e))
846 }
847
848 pub fn blob_exists(&self, hash: &[u8; 32]) -> Result<bool> {
850 self.router.exists(hash)
851 .map_err(|e| anyhow::anyhow!("Failed to check blob: {}", e))
852 }
853
854 fn blob_owner_key(sha256: &[u8; 32], pubkey: &[u8; 32]) -> [u8; 64] {
860 let mut key = [0u8; 64];
861 key[..32].copy_from_slice(sha256);
862 key[32..].copy_from_slice(pubkey);
863 key
864 }
865
866 pub fn set_blob_owner(&self, sha256: &[u8; 32], pubkey: &[u8; 32]) -> Result<()> {
869 let key = Self::blob_owner_key(sha256, pubkey);
870 let mut wtxn = self.env.write_txn()?;
871
872 self.blob_owners.put(&mut wtxn, &key[..], &())?;
874
875 let sha256_hex = to_hex(sha256);
877
878 let mut blobs: Vec<BlobMetadata> = self
880 .pubkey_blobs
881 .get(&wtxn, pubkey)?
882 .and_then(|b| serde_json::from_slice(b).ok())
883 .unwrap_or_default();
884
885 if !blobs.iter().any(|b| b.sha256 == sha256_hex) {
887 let now = SystemTime::now()
888 .duration_since(UNIX_EPOCH)
889 .unwrap()
890 .as_secs();
891
892 let size = self
894 .get_blob(sha256)?
895 .map(|data| data.len() as u64)
896 .unwrap_or(0);
897
898 blobs.push(BlobMetadata {
899 sha256: sha256_hex,
900 size,
901 mime_type: "application/octet-stream".to_string(),
902 uploaded: now,
903 });
904
905 let blobs_json = serde_json::to_vec(&blobs)?;
906 self.pubkey_blobs.put(&mut wtxn, pubkey, &blobs_json)?;
907 }
908
909 wtxn.commit()?;
910 Ok(())
911 }
912
913 pub fn is_blob_owner(&self, sha256: &[u8; 32], pubkey: &[u8; 32]) -> Result<bool> {
915 let key = Self::blob_owner_key(sha256, pubkey);
916 let rtxn = self.env.read_txn()?;
917 Ok(self.blob_owners.get(&rtxn, &key[..])?.is_some())
918 }
919
920 pub fn get_blob_owners(&self, sha256: &[u8; 32]) -> Result<Vec<[u8; 32]>> {
922 let rtxn = self.env.read_txn()?;
923
924 let mut owners = Vec::new();
925 for item in self.blob_owners.prefix_iter(&rtxn, &sha256[..])? {
926 let (key, _) = item?;
927 if key.len() == 64 {
928 let mut pubkey = [0u8; 32];
930 pubkey.copy_from_slice(&key[32..64]);
931 owners.push(pubkey);
932 }
933 }
934 Ok(owners)
935 }
936
937 pub fn blob_has_owners(&self, sha256: &[u8; 32]) -> Result<bool> {
939 let rtxn = self.env.read_txn()?;
940
941 for item in self.blob_owners.prefix_iter(&rtxn, &sha256[..])? {
943 if item.is_ok() {
944 return Ok(true);
945 }
946 }
947 Ok(false)
948 }
949
950 pub fn get_blob_owner(&self, sha256: &[u8; 32]) -> Result<Option<[u8; 32]>> {
952 Ok(self.get_blob_owners(sha256)?.into_iter().next())
953 }
954
955 pub fn delete_blossom_blob(&self, sha256: &[u8; 32], pubkey: &[u8; 32]) -> Result<bool> {
959 let key = Self::blob_owner_key(sha256, pubkey);
960 let mut wtxn = self.env.write_txn()?;
961
962 self.blob_owners.delete(&mut wtxn, &key[..])?;
964
965 let sha256_hex = to_hex(sha256);
967
968 if let Some(blobs_bytes) = self.pubkey_blobs.get(&wtxn, pubkey)? {
970 if let Ok(mut blobs) = serde_json::from_slice::<Vec<BlobMetadata>>(blobs_bytes) {
971 blobs.retain(|b| b.sha256 != sha256_hex);
972 let blobs_json = serde_json::to_vec(&blobs)?;
973 self.pubkey_blobs.put(&mut wtxn, pubkey, &blobs_json)?;
974 }
975 }
976
977 let mut has_other_owners = false;
979 for item in self.blob_owners.prefix_iter(&wtxn, &sha256[..])? {
980 if item.is_ok() {
981 has_other_owners = true;
982 break;
983 }
984 }
985
986 if has_other_owners {
987 wtxn.commit()?;
988 tracing::debug!(
989 "Removed {} from blob {} owners, other owners remain",
990 &to_hex(pubkey)[..8],
991 &sha256_hex[..8]
992 );
993 return Ok(false);
994 }
995
996 tracing::info!(
998 "All owners removed from blob {}, deleting",
999 &sha256_hex[..8]
1000 );
1001
1002 let _ = self.router.delete_sync(sha256);
1004
1005 wtxn.commit()?;
1006 Ok(true)
1007 }
1008
1009 pub fn list_blobs_by_pubkey(&self, pubkey: &[u8; 32]) -> Result<Vec<crate::server::blossom::BlobDescriptor>> {
1011 let rtxn = self.env.read_txn()?;
1012
1013 let blobs: Vec<BlobMetadata> = self
1014 .pubkey_blobs
1015 .get(&rtxn, pubkey)?
1016 .and_then(|b| serde_json::from_slice(b).ok())
1017 .unwrap_or_default();
1018
1019 Ok(blobs
1020 .into_iter()
1021 .map(|b| crate::server::blossom::BlobDescriptor {
1022 url: format!("/{}", b.sha256),
1023 sha256: b.sha256,
1024 size: b.size,
1025 mime_type: b.mime_type,
1026 uploaded: b.uploaded,
1027 })
1028 .collect())
1029 }
1030
1031 pub fn get_chunk(&self, hash: &[u8; 32]) -> Result<Option<Vec<u8>>> {
1033 self.router.get_sync(hash)
1034 .map_err(|e| anyhow::anyhow!("Failed to get chunk: {}", e))
1035 }
1036
1037 pub fn get_file(&self, hash: &[u8; 32]) -> Result<Option<Vec<u8>>> {
1040 let store = self.store_arc();
1041 let tree = HashTree::new(HashTreeConfig::new(store).public());
1042
1043 sync_block_on(async {
1044 tree.read_file(hash).await
1045 .map_err(|e| anyhow::anyhow!("Failed to read file: {}", e))
1046 })
1047 }
1048
1049 pub fn get_file_by_cid(&self, cid: &Cid) -> Result<Option<Vec<u8>>> {
1052 let store = self.store_arc();
1053 let tree = HashTree::new(HashTreeConfig::new(store).public());
1054
1055 sync_block_on(async {
1056 tree.get(cid).await
1057 .map_err(|e| anyhow::anyhow!("Failed to read file: {}", e))
1058 })
1059 }
1060
1061 pub fn get_file_chunk_metadata(&self, hash: &[u8; 32]) -> Result<Option<FileChunkMetadata>> {
1063 let store = self.store_arc();
1064 let tree = HashTree::new(HashTreeConfig::new(store.clone()).public());
1065
1066 sync_block_on(async {
1067 let exists = store.has(&hash).await
1070 .map_err(|e| anyhow::anyhow!("Failed to check existence: {}", e))?;
1071
1072 if !exists {
1073 return Ok(None);
1074 }
1075
1076 let total_size = tree.get_size(&hash).await
1078 .map_err(|e| anyhow::anyhow!("Failed to get size: {}", e))?;
1079
1080 let is_tree_node = tree.is_tree(&hash).await
1082 .map_err(|e| anyhow::anyhow!("Failed to check tree: {}", e))?;
1083
1084 if !is_tree_node {
1085 return Ok(Some(FileChunkMetadata {
1087 total_size,
1088 chunk_hashes: vec![],
1089 chunk_sizes: vec![],
1090 is_chunked: false,
1091 }));
1092 }
1093
1094 let node = match tree.get_tree_node(&hash).await
1096 .map_err(|e| anyhow::anyhow!("Failed to get tree node: {}", e))? {
1097 Some(n) => n,
1098 None => return Ok(None),
1099 };
1100
1101 let is_directory = tree.is_directory(&hash).await
1103 .map_err(|e| anyhow::anyhow!("Failed to check directory: {}", e))?;
1104
1105 if is_directory {
1106 return Ok(None); }
1108
1109 let chunk_hashes: Vec<Hash> = node.links.iter().map(|l| l.hash).collect();
1111 let chunk_sizes: Vec<u64> = node.links.iter().map(|l| l.size).collect();
1112
1113 Ok(Some(FileChunkMetadata {
1114 total_size,
1115 chunk_hashes,
1116 chunk_sizes,
1117 is_chunked: !node.links.is_empty(),
1118 }))
1119 })
1120 }
1121
1122 pub fn get_file_range(&self, hash: &[u8; 32], start: u64, end: Option<u64>) -> Result<Option<(Vec<u8>, u64)>> {
1124 let metadata = match self.get_file_chunk_metadata(hash)? {
1125 Some(m) => m,
1126 None => return Ok(None),
1127 };
1128
1129 if metadata.total_size == 0 {
1130 return Ok(Some((Vec::new(), 0)));
1131 }
1132
1133 if start >= metadata.total_size {
1134 return Ok(None);
1135 }
1136
1137 let end = end.unwrap_or(metadata.total_size - 1).min(metadata.total_size - 1);
1138
1139 if !metadata.is_chunked {
1141 let content = self.get_file(hash)?.unwrap_or_default();
1142 let range_content = if start < content.len() as u64 {
1143 content[start as usize..=(end as usize).min(content.len() - 1)].to_vec()
1144 } else {
1145 Vec::new()
1146 };
1147 return Ok(Some((range_content, metadata.total_size)));
1148 }
1149
1150 let mut result = Vec::new();
1152 let mut current_offset = 0u64;
1153
1154 for (i, chunk_hash) in metadata.chunk_hashes.iter().enumerate() {
1155 let chunk_size = metadata.chunk_sizes[i];
1156 let chunk_end = current_offset + chunk_size - 1;
1157
1158 if chunk_end >= start && current_offset <= end {
1160 let chunk_content = match self.get_chunk(chunk_hash)? {
1161 Some(content) => content,
1162 None => {
1163 return Err(anyhow::anyhow!("Chunk {} not found", to_hex(chunk_hash)));
1164 }
1165 };
1166
1167 let chunk_read_start = if current_offset >= start {
1168 0
1169 } else {
1170 (start - current_offset) as usize
1171 };
1172
1173 let chunk_read_end = if chunk_end <= end {
1174 chunk_size as usize - 1
1175 } else {
1176 (end - current_offset) as usize
1177 };
1178
1179 result.extend_from_slice(&chunk_content[chunk_read_start..=chunk_read_end]);
1180 }
1181
1182 current_offset += chunk_size;
1183
1184 if current_offset > end {
1185 break;
1186 }
1187 }
1188
1189 Ok(Some((result, metadata.total_size)))
1190 }
1191
1192 pub fn stream_file_range_chunks_owned(
1194 self: Arc<Self>,
1195 hash: &[u8; 32],
1196 start: u64,
1197 end: u64,
1198 ) -> Result<Option<FileRangeChunksOwned>> {
1199 let metadata = match self.get_file_chunk_metadata(hash)? {
1200 Some(m) => m,
1201 None => return Ok(None),
1202 };
1203
1204 if metadata.total_size == 0 || start >= metadata.total_size {
1205 return Ok(None);
1206 }
1207
1208 let end = end.min(metadata.total_size - 1);
1209
1210 Ok(Some(FileRangeChunksOwned {
1211 store: self,
1212 metadata,
1213 start,
1214 end,
1215 current_chunk_idx: 0,
1216 current_offset: 0,
1217 }))
1218 }
1219
1220 pub fn get_directory_listing(&self, hash: &[u8; 32]) -> Result<Option<DirectoryListing>> {
1222 let store = self.store_arc();
1223 let tree = HashTree::new(HashTreeConfig::new(store).public());
1224
1225 sync_block_on(async {
1226 let is_dir = tree.is_directory(&hash).await
1228 .map_err(|e| anyhow::anyhow!("Failed to check directory: {}", e))?;
1229
1230 if !is_dir {
1231 return Ok(None);
1232 }
1233
1234 let cid = hashtree_core::Cid::public(*hash);
1236 let tree_entries = tree.list_directory(&cid).await
1237 .map_err(|e| anyhow::anyhow!("Failed to list directory: {}", e))?;
1238
1239 let entries: Vec<DirEntry> = tree_entries.into_iter().map(|e| DirEntry {
1240 name: e.name,
1241 cid: to_hex(&e.hash),
1242 is_directory: e.link_type.is_tree(),
1243 size: e.size,
1244 }).collect();
1245
1246 Ok(Some(DirectoryListing {
1247 dir_name: String::new(),
1248 entries,
1249 }))
1250 })
1251 }
1252
1253 pub fn pin(&self, hash: &[u8; 32]) -> Result<()> {
1255 let mut wtxn = self.env.write_txn()?;
1256 self.pins.put(&mut wtxn, hash.as_slice(), &())?;
1257 wtxn.commit()?;
1258 Ok(())
1259 }
1260
1261 pub fn unpin(&self, hash: &[u8; 32]) -> Result<()> {
1263 let mut wtxn = self.env.write_txn()?;
1264 self.pins.delete(&mut wtxn, hash.as_slice())?;
1265 wtxn.commit()?;
1266 Ok(())
1267 }
1268
1269 pub fn is_pinned(&self, hash: &[u8; 32]) -> Result<bool> {
1271 let rtxn = self.env.read_txn()?;
1272 Ok(self.pins.get(&rtxn, hash.as_slice())?.is_some())
1273 }
1274
1275 pub fn list_pins_raw(&self) -> Result<Vec<[u8; 32]>> {
1277 let rtxn = self.env.read_txn()?;
1278 let mut pins = Vec::new();
1279
1280 for item in self.pins.iter(&rtxn)? {
1281 let (hash_bytes, _) = item?;
1282 if hash_bytes.len() == 32 {
1283 let mut hash = [0u8; 32];
1284 hash.copy_from_slice(hash_bytes);
1285 pins.push(hash);
1286 }
1287 }
1288
1289 Ok(pins)
1290 }
1291
1292 pub fn list_pins_with_names(&self) -> Result<Vec<PinnedItem>> {
1294 let rtxn = self.env.read_txn()?;
1295 let store = self.store_arc();
1296 let tree = HashTree::new(HashTreeConfig::new(store).public());
1297 let mut pins = Vec::new();
1298
1299 for item in self.pins.iter(&rtxn)? {
1300 let (hash_bytes, _) = item?;
1301 if hash_bytes.len() != 32 {
1302 continue;
1303 }
1304 let mut hash = [0u8; 32];
1305 hash.copy_from_slice(hash_bytes);
1306
1307 let is_directory = sync_block_on(async {
1309 tree.is_directory(&hash).await.unwrap_or(false)
1310 });
1311
1312 pins.push(PinnedItem {
1313 cid: to_hex(&hash),
1314 name: "Unknown".to_string(),
1315 is_directory,
1316 });
1317 }
1318
1319 Ok(pins)
1320 }
1321
1322 pub fn index_tree(
1329 &self,
1330 root_hash: &Hash,
1331 owner: &str,
1332 name: Option<&str>,
1333 priority: u8,
1334 ref_key: Option<&str>,
1335 ) -> Result<()> {
1336 let root_hex = to_hex(root_hash);
1337
1338 if let Some(key) = ref_key {
1340 let rtxn = self.env.read_txn()?;
1341 if let Some(old_hash_bytes) = self.tree_refs.get(&rtxn, key)? {
1342 if old_hash_bytes != root_hash.as_slice() {
1343 let old_hash: Hash = old_hash_bytes.try_into()
1344 .map_err(|_| anyhow::anyhow!("Invalid hash in tree_refs"))?;
1345 drop(rtxn);
1346 let _ = self.unindex_tree(&old_hash);
1348 tracing::debug!("Replaced old tree for ref {}", key);
1349 }
1350 }
1351 }
1352
1353 let store = self.store_arc();
1354 let tree = HashTree::new(HashTreeConfig::new(store).public());
1355
1356 let (blob_hashes, total_size) = sync_block_on(async {
1358 self.collect_tree_blobs(&tree, root_hash).await
1359 })?;
1360
1361 let mut wtxn = self.env.write_txn()?;
1362
1363 for blob_hash in &blob_hashes {
1365 let mut key = [0u8; 64];
1366 key[..32].copy_from_slice(blob_hash);
1367 key[32..].copy_from_slice(root_hash);
1368 self.blob_trees.put(&mut wtxn, &key[..], &())?;
1369 }
1370
1371 let meta = TreeMeta {
1373 owner: owner.to_string(),
1374 name: name.map(|s| s.to_string()),
1375 synced_at: SystemTime::now()
1376 .duration_since(UNIX_EPOCH)
1377 .unwrap()
1378 .as_secs(),
1379 total_size,
1380 priority,
1381 };
1382 let meta_bytes = rmp_serde::to_vec(&meta)
1383 .map_err(|e| anyhow::anyhow!("Failed to serialize TreeMeta: {}", e))?;
1384 self.tree_meta.put(&mut wtxn, root_hash.as_slice(), &meta_bytes)?;
1385
1386 if let Some(key) = ref_key {
1388 self.tree_refs.put(&mut wtxn, key, root_hash.as_slice())?;
1389 }
1390
1391 wtxn.commit()?;
1392
1393 tracing::debug!(
1394 "Indexed tree {} ({} blobs, {} bytes, priority {})",
1395 &root_hex[..8],
1396 blob_hashes.len(),
1397 total_size,
1398 priority
1399 );
1400
1401 Ok(())
1402 }
1403
1404 async fn collect_tree_blobs<S: Store>(
1406 &self,
1407 tree: &HashTree<S>,
1408 root: &Hash,
1409 ) -> Result<(Vec<Hash>, u64)> {
1410 let mut blobs = Vec::new();
1411 let mut total_size = 0u64;
1412 let mut stack = vec![*root];
1413
1414 while let Some(hash) = stack.pop() {
1415 let is_tree = tree.is_tree(&hash).await
1417 .map_err(|e| anyhow::anyhow!("Failed to check tree: {}", e))?;
1418
1419 if is_tree {
1420 if let Some(node) = tree.get_tree_node(&hash).await
1422 .map_err(|e| anyhow::anyhow!("Failed to get tree node: {}", e))?
1423 {
1424 for link in &node.links {
1425 stack.push(link.hash);
1426 }
1427 }
1428 } else {
1429 if let Some(data) = self.router.get_sync(&hash)
1431 .map_err(|e| anyhow::anyhow!("Failed to get blob: {}", e))?
1432 {
1433 total_size += data.len() as u64;
1434 blobs.push(hash);
1435 }
1436 }
1437 }
1438
1439 Ok((blobs, total_size))
1440 }
1441
1442 pub fn unindex_tree(&self, root_hash: &Hash) -> Result<u64> {
1445 let root_hex = to_hex(root_hash);
1446
1447 let store = self.store_arc();
1448 let tree = HashTree::new(HashTreeConfig::new(store).public());
1449
1450 let (blob_hashes, _) = sync_block_on(async {
1452 self.collect_tree_blobs(&tree, root_hash).await
1453 })?;
1454
1455 let mut wtxn = self.env.write_txn()?;
1456 let mut freed = 0u64;
1457
1458 for blob_hash in &blob_hashes {
1460 let mut key = [0u8; 64];
1462 key[..32].copy_from_slice(blob_hash);
1463 key[32..].copy_from_slice(root_hash);
1464 self.blob_trees.delete(&mut wtxn, &key[..])?;
1465
1466 let rtxn = self.env.read_txn()?;
1468 let mut has_other_tree = false;
1469
1470 for item in self.blob_trees.prefix_iter(&rtxn, &blob_hash[..])? {
1471 if item.is_ok() {
1472 has_other_tree = true;
1473 break;
1474 }
1475 }
1476 drop(rtxn);
1477
1478 if !has_other_tree {
1480 if let Some(data) = self.router.get_sync(blob_hash)
1481 .map_err(|e| anyhow::anyhow!("Failed to get blob: {}", e))?
1482 {
1483 freed += data.len() as u64;
1484 self.router.delete_local_only(blob_hash)
1486 .map_err(|e| anyhow::anyhow!("Failed to delete blob: {}", e))?;
1487 }
1488 }
1489 }
1490
1491 if let Some(data) = self.router.get_sync(root_hash)
1493 .map_err(|e| anyhow::anyhow!("Failed to get tree node: {}", e))?
1494 {
1495 freed += data.len() as u64;
1496 self.router.delete_local_only(root_hash)
1498 .map_err(|e| anyhow::anyhow!("Failed to delete tree node: {}", e))?;
1499 }
1500
1501 self.tree_meta.delete(&mut wtxn, root_hash.as_slice())?;
1503
1504 wtxn.commit()?;
1505
1506 tracing::debug!(
1507 "Unindexed tree {} ({} bytes freed)",
1508 &root_hex[..8],
1509 freed
1510 );
1511
1512 Ok(freed)
1513 }
1514
1515 pub fn get_tree_meta(&self, root_hash: &Hash) -> Result<Option<TreeMeta>> {
1517 let rtxn = self.env.read_txn()?;
1518 if let Some(bytes) = self.tree_meta.get(&rtxn, root_hash.as_slice())? {
1519 let meta: TreeMeta = rmp_serde::from_slice(bytes)
1520 .map_err(|e| anyhow::anyhow!("Failed to deserialize TreeMeta: {}", e))?;
1521 Ok(Some(meta))
1522 } else {
1523 Ok(None)
1524 }
1525 }
1526
1527 pub fn list_indexed_trees(&self) -> Result<Vec<(Hash, TreeMeta)>> {
1529 let rtxn = self.env.read_txn()?;
1530 let mut trees = Vec::new();
1531
1532 for item in self.tree_meta.iter(&rtxn)? {
1533 let (hash_bytes, meta_bytes) = item?;
1534 let hash: Hash = hash_bytes.try_into()
1535 .map_err(|_| anyhow::anyhow!("Invalid hash in tree_meta"))?;
1536 let meta: TreeMeta = rmp_serde::from_slice(meta_bytes)
1537 .map_err(|e| anyhow::anyhow!("Failed to deserialize TreeMeta: {}", e))?;
1538 trees.push((hash, meta));
1539 }
1540
1541 Ok(trees)
1542 }
1543
1544 pub fn tracked_size(&self) -> Result<u64> {
1546 let rtxn = self.env.read_txn()?;
1547 let mut total = 0u64;
1548
1549 for item in self.tree_meta.iter(&rtxn)? {
1550 let (_, bytes) = item?;
1551 let meta: TreeMeta = rmp_serde::from_slice(bytes)
1552 .map_err(|e| anyhow::anyhow!("Failed to deserialize TreeMeta: {}", e))?;
1553 total += meta.total_size;
1554 }
1555
1556 Ok(total)
1557 }
1558
1559 fn get_evictable_trees(&self) -> Result<Vec<(Hash, TreeMeta)>> {
1561 let mut trees = self.list_indexed_trees()?;
1562
1563 trees.sort_by(|a, b| {
1565 match a.1.priority.cmp(&b.1.priority) {
1566 std::cmp::Ordering::Equal => a.1.synced_at.cmp(&b.1.synced_at),
1567 other => other,
1568 }
1569 });
1570
1571 Ok(trees)
1572 }
1573
1574 pub fn evict_if_needed(&self) -> Result<u64> {
1581 let stats = self.router.stats()
1583 .map_err(|e| anyhow::anyhow!("Failed to get stats: {}", e))?;
1584 let current = stats.total_bytes;
1585
1586 if current <= self.max_size_bytes {
1587 return Ok(0);
1588 }
1589
1590 let target = self.max_size_bytes * 90 / 100;
1592 let mut freed = 0u64;
1593 let mut current_size = current;
1594
1595 let orphan_freed = self.evict_orphaned_blobs()?;
1597 freed += orphan_freed;
1598 current_size = current_size.saturating_sub(orphan_freed);
1599
1600 if orphan_freed > 0 {
1601 tracing::info!("Evicted orphaned blobs: {} bytes freed", orphan_freed);
1602 }
1603
1604 if current_size <= target {
1606 if freed > 0 {
1607 tracing::info!("Eviction complete: {} bytes freed", freed);
1608 }
1609 return Ok(freed);
1610 }
1611
1612 let evictable = self.get_evictable_trees()?;
1615
1616 for (root_hash, meta) in evictable {
1617 if current_size <= target {
1618 break;
1619 }
1620
1621 let root_hex = to_hex(&root_hash);
1622
1623 if self.is_pinned(&root_hash)? {
1625 continue;
1626 }
1627
1628 let tree_freed = self.unindex_tree(&root_hash)?;
1629 freed += tree_freed;
1630 current_size = current_size.saturating_sub(tree_freed);
1631
1632 tracing::info!(
1633 "Evicted tree {} (owner={}, priority={}, {} bytes)",
1634 &root_hex[..8],
1635 &meta.owner[..8.min(meta.owner.len())],
1636 meta.priority,
1637 tree_freed
1638 );
1639 }
1640
1641 if freed > 0 {
1642 tracing::info!("Eviction complete: {} bytes freed", freed);
1643 }
1644
1645 Ok(freed)
1646 }
1647
1648 fn evict_orphaned_blobs(&self) -> Result<u64> {
1650 let mut freed = 0u64;
1651
1652 let all_hashes = self.router.list()
1654 .map_err(|e| anyhow::anyhow!("Failed to list hashes: {}", e))?;
1655
1656 let rtxn = self.env.read_txn()?;
1658 let pinned: HashSet<Hash> = self.pins.iter(&rtxn)?
1659 .filter_map(|item| item.ok())
1660 .filter_map(|(hash_bytes, _)| {
1661 if hash_bytes.len() == 32 {
1662 let mut hash = [0u8; 32];
1663 hash.copy_from_slice(hash_bytes);
1664 Some(hash)
1665 } else {
1666 None
1667 }
1668 })
1669 .collect();
1670
1671 let mut blobs_in_trees: HashSet<Hash> = HashSet::new();
1674 for item in self.blob_trees.iter(&rtxn)? {
1675 if let Ok((key_bytes, _)) = item {
1676 if key_bytes.len() >= 32 {
1677 let blob_hash: Hash = key_bytes[..32].try_into().unwrap();
1678 blobs_in_trees.insert(blob_hash);
1679 }
1680 }
1681 }
1682 drop(rtxn);
1683
1684 for hash in all_hashes {
1686 if pinned.contains(&hash) {
1688 continue;
1689 }
1690
1691 if blobs_in_trees.contains(&hash) {
1693 continue;
1694 }
1695
1696 if let Ok(Some(data)) = self.router.get_sync(&hash) {
1698 freed += data.len() as u64;
1699 let _ = self.router.delete_local_only(&hash);
1700 tracing::debug!("Deleted orphaned blob {} ({} bytes)", &to_hex(&hash)[..8], data.len());
1701 }
1702 }
1703
1704 Ok(freed)
1705 }
1706
1707 pub fn max_size_bytes(&self) -> u64 {
1709 self.max_size_bytes
1710 }
1711
1712 pub fn storage_by_priority(&self) -> Result<StorageByPriority> {
1714 let rtxn = self.env.read_txn()?;
1715 let mut own = 0u64;
1716 let mut followed = 0u64;
1717 let mut other = 0u64;
1718
1719 for item in self.tree_meta.iter(&rtxn)? {
1720 let (_, bytes) = item?;
1721 let meta: TreeMeta = rmp_serde::from_slice(bytes)
1722 .map_err(|e| anyhow::anyhow!("Failed to deserialize TreeMeta: {}", e))?;
1723
1724 if meta.priority >= PRIORITY_OWN {
1725 own += meta.total_size;
1726 } else if meta.priority >= PRIORITY_FOLLOWED {
1727 followed += meta.total_size;
1728 } else {
1729 other += meta.total_size;
1730 }
1731 }
1732
1733 Ok(StorageByPriority { own, followed, other })
1734 }
1735
1736 pub fn get_storage_stats(&self) -> Result<StorageStats> {
1738 let rtxn = self.env.read_txn()?;
1739 let total_pins = self.pins.len(&rtxn)? as usize;
1740
1741 let stats = self.router.stats()
1742 .map_err(|e| anyhow::anyhow!("Failed to get stats: {}", e))?;
1743
1744 Ok(StorageStats {
1745 total_dags: stats.count,
1746 pinned_dags: total_pins,
1747 total_bytes: stats.total_bytes,
1748 })
1749 }
1750
1751 pub fn get_cached_root(&self, pubkey_hex: &str, tree_name: &str) -> Result<Option<CachedRoot>> {
1755 let key = format!("{}/{}", pubkey_hex, tree_name);
1756 let rtxn = self.env.read_txn()?;
1757 if let Some(bytes) = self.cached_roots.get(&rtxn, &key)? {
1758 let root: CachedRoot = rmp_serde::from_slice(bytes)
1759 .map_err(|e| anyhow::anyhow!("Failed to deserialize CachedRoot: {}", e))?;
1760 Ok(Some(root))
1761 } else {
1762 Ok(None)
1763 }
1764 }
1765
1766 pub fn set_cached_root(
1768 &self,
1769 pubkey_hex: &str,
1770 tree_name: &str,
1771 hash: &str,
1772 key: Option<&str>,
1773 visibility: &str,
1774 updated_at: u64,
1775 ) -> Result<()> {
1776 let db_key = format!("{}/{}", pubkey_hex, tree_name);
1777 let root = CachedRoot {
1778 hash: hash.to_string(),
1779 key: key.map(|k| k.to_string()),
1780 updated_at,
1781 visibility: visibility.to_string(),
1782 };
1783 let bytes = rmp_serde::to_vec(&root)
1784 .map_err(|e| anyhow::anyhow!("Failed to serialize CachedRoot: {}", e))?;
1785 let mut wtxn = self.env.write_txn()?;
1786 self.cached_roots.put(&mut wtxn, &db_key, &bytes)?;
1787 wtxn.commit()?;
1788 Ok(())
1789 }
1790
1791 pub fn list_cached_roots(&self, pubkey_hex: &str) -> Result<Vec<(String, CachedRoot)>> {
1793 let prefix = format!("{}/", pubkey_hex);
1794 let rtxn = self.env.read_txn()?;
1795 let mut results = Vec::new();
1796
1797 for item in self.cached_roots.iter(&rtxn)? {
1798 let (key, bytes) = item?;
1799 if key.starts_with(&prefix) {
1800 let tree_name = key.strip_prefix(&prefix).unwrap_or(key);
1801 let root: CachedRoot = rmp_serde::from_slice(bytes)
1802 .map_err(|e| anyhow::anyhow!("Failed to deserialize CachedRoot: {}", e))?;
1803 results.push((tree_name.to_string(), root));
1804 }
1805 }
1806
1807 Ok(results)
1808 }
1809
1810 pub fn delete_cached_root(&self, pubkey_hex: &str, tree_name: &str) -> Result<bool> {
1812 let key = format!("{}/{}", pubkey_hex, tree_name);
1813 let mut wtxn = self.env.write_txn()?;
1814 let deleted = self.cached_roots.delete(&mut wtxn, &key)?;
1815 wtxn.commit()?;
1816 Ok(deleted)
1817 }
1818
1819 pub fn gc(&self) -> Result<GcStats> {
1821 let rtxn = self.env.read_txn()?;
1822
1823 let pinned: HashSet<Hash> = self.pins.iter(&rtxn)?
1825 .filter_map(|item| item.ok())
1826 .filter_map(|(hash_bytes, _)| {
1827 if hash_bytes.len() == 32 {
1828 let mut hash = [0u8; 32];
1829 hash.copy_from_slice(hash_bytes);
1830 Some(hash)
1831 } else {
1832 None
1833 }
1834 })
1835 .collect();
1836
1837 drop(rtxn);
1838
1839 let all_hashes = self.router.list()
1841 .map_err(|e| anyhow::anyhow!("Failed to list hashes: {}", e))?;
1842
1843 let mut deleted = 0;
1845 let mut freed_bytes = 0u64;
1846
1847 for hash in all_hashes {
1848 if !pinned.contains(&hash) {
1849 if let Ok(Some(data)) = self.router.get_sync(&hash) {
1850 freed_bytes += data.len() as u64;
1851 let _ = self.router.delete_local_only(&hash);
1853 deleted += 1;
1854 }
1855 }
1856 }
1857
1858 Ok(GcStats {
1859 deleted_dags: deleted,
1860 freed_bytes,
1861 })
1862 }
1863
1864 pub fn verify_lmdb_integrity(&self, delete: bool) -> Result<VerifyResult> {
1867 let all_hashes = self.router.list()
1868 .map_err(|e| anyhow::anyhow!("Failed to list hashes: {}", e))?;
1869
1870 let total = all_hashes.len();
1871 let mut valid = 0;
1872 let mut corrupted = 0;
1873 let mut deleted = 0;
1874 let mut corrupted_hashes = Vec::new();
1875
1876 for hash in &all_hashes {
1877 let hash_hex = to_hex(hash);
1878
1879 match self.router.get_sync(hash) {
1880 Ok(Some(data)) => {
1881 let actual_hash = sha256(&data);
1883
1884 if actual_hash == *hash {
1885 valid += 1;
1886 } else {
1887 corrupted += 1;
1888 let actual_hex = to_hex(&actual_hash);
1889 println!(" CORRUPTED: key={} actual={} size={}",
1890 &hash_hex[..16], &actual_hex[..16], data.len());
1891 corrupted_hashes.push(*hash);
1892 }
1893 }
1894 Ok(None) => {
1895 corrupted += 1;
1897 println!(" MISSING: key={}", &hash_hex[..16]);
1898 corrupted_hashes.push(*hash);
1899 }
1900 Err(e) => {
1901 corrupted += 1;
1902 println!(" ERROR: key={} err={}", &hash_hex[..16], e);
1903 corrupted_hashes.push(*hash);
1904 }
1905 }
1906 }
1907
1908 if delete {
1910 for hash in &corrupted_hashes {
1911 match self.router.delete_sync(hash) {
1912 Ok(true) => deleted += 1,
1913 Ok(false) => {} Err(e) => {
1915 let hash_hex = to_hex(hash);
1916 println!(" Failed to delete {}: {}", &hash_hex[..16], e);
1917 }
1918 }
1919 }
1920 }
1921
1922 Ok(VerifyResult {
1923 total,
1924 valid,
1925 corrupted,
1926 deleted,
1927 })
1928 }
1929
1930 #[cfg(feature = "s3")]
1933 pub async fn verify_r2_integrity(&self, delete: bool) -> Result<VerifyResult> {
1934 use aws_sdk_s3::Client as S3Client;
1935
1936 let config = crate::config::Config::load()?;
1939 let s3_config = config.storage.s3
1940 .ok_or_else(|| anyhow::anyhow!("S3 not configured"))?;
1941
1942 let aws_config = aws_config::from_env()
1944 .region(aws_sdk_s3::config::Region::new(s3_config.region.clone()))
1945 .load()
1946 .await;
1947
1948 let s3_client = S3Client::from_conf(
1949 aws_sdk_s3::config::Builder::from(&aws_config)
1950 .endpoint_url(&s3_config.endpoint)
1951 .force_path_style(true)
1952 .build()
1953 );
1954
1955 let bucket = &s3_config.bucket;
1956 let prefix = s3_config.prefix.as_deref().unwrap_or("");
1957
1958 let mut total = 0;
1959 let mut valid = 0;
1960 let mut corrupted = 0;
1961 let mut deleted = 0;
1962 let mut corrupted_keys = Vec::new();
1963
1964 let mut continuation_token: Option<String> = None;
1966
1967 loop {
1968 let mut list_req = s3_client.list_objects_v2()
1969 .bucket(bucket)
1970 .prefix(prefix);
1971
1972 if let Some(ref token) = continuation_token {
1973 list_req = list_req.continuation_token(token);
1974 }
1975
1976 let list_resp = list_req.send().await
1977 .map_err(|e| anyhow::anyhow!("Failed to list S3 objects: {}", e))?;
1978
1979 for object in list_resp.contents() {
1980 let key = object.key().unwrap_or("");
1981
1982 if !key.ends_with(".bin") {
1984 continue;
1985 }
1986
1987 total += 1;
1988
1989 let filename = key.strip_prefix(prefix).unwrap_or(key);
1991 let expected_hash_hex = filename.strip_suffix(".bin").unwrap_or(filename);
1992
1993 if expected_hash_hex.len() != 64 {
1995 corrupted += 1;
1996 println!(" INVALID KEY: {}", key);
1997 corrupted_keys.push(key.to_string());
1998 continue;
1999 }
2000
2001 let expected_hash = match from_hex(expected_hash_hex) {
2002 Ok(h) => h,
2003 Err(_) => {
2004 corrupted += 1;
2005 println!(" INVALID HEX: {}", key);
2006 corrupted_keys.push(key.to_string());
2007 continue;
2008 }
2009 };
2010
2011 match s3_client.get_object()
2013 .bucket(bucket)
2014 .key(key)
2015 .send()
2016 .await
2017 {
2018 Ok(resp) => {
2019 match resp.body.collect().await {
2020 Ok(bytes) => {
2021 let data = bytes.into_bytes();
2022 let actual_hash = sha256(&data);
2023
2024 if actual_hash == expected_hash {
2025 valid += 1;
2026 } else {
2027 corrupted += 1;
2028 let actual_hex = to_hex(&actual_hash);
2029 println!(" CORRUPTED: key={} actual={} size={}",
2030 &expected_hash_hex[..16], &actual_hex[..16], data.len());
2031 corrupted_keys.push(key.to_string());
2032 }
2033 }
2034 Err(e) => {
2035 corrupted += 1;
2036 println!(" READ ERROR: {} - {}", key, e);
2037 corrupted_keys.push(key.to_string());
2038 }
2039 }
2040 }
2041 Err(e) => {
2042 corrupted += 1;
2043 println!(" FETCH ERROR: {} - {}", key, e);
2044 corrupted_keys.push(key.to_string());
2045 }
2046 }
2047
2048 if total % 100 == 0 {
2050 println!(" Progress: {} objects checked, {} corrupted so far", total, corrupted);
2051 }
2052 }
2053
2054 if list_resp.is_truncated() == Some(true) {
2056 continuation_token = list_resp.next_continuation_token().map(|s| s.to_string());
2057 } else {
2058 break;
2059 }
2060 }
2061
2062 if delete {
2064 for key in &corrupted_keys {
2065 match s3_client.delete_object()
2066 .bucket(bucket)
2067 .key(key)
2068 .send()
2069 .await
2070 {
2071 Ok(_) => deleted += 1,
2072 Err(e) => {
2073 println!(" Failed to delete {}: {}", key, e);
2074 }
2075 }
2076 }
2077 }
2078
2079 Ok(VerifyResult {
2080 total,
2081 valid,
2082 corrupted,
2083 deleted,
2084 })
2085 }
2086
2087 #[cfg(not(feature = "s3"))]
2089 pub async fn verify_r2_integrity(&self, _delete: bool) -> Result<VerifyResult> {
2090 Err(anyhow::anyhow!("S3 feature not enabled"))
2091 }
2092}
2093
2094#[derive(Debug, Clone)]
2096pub struct VerifyResult {
2097 pub total: usize,
2098 pub valid: usize,
2099 pub corrupted: usize,
2100 pub deleted: usize,
2101}
2102
2103#[derive(Debug)]
2104pub struct StorageStats {
2105 pub total_dags: usize,
2106 pub pinned_dags: usize,
2107 pub total_bytes: u64,
2108}
2109
2110#[derive(Debug, Clone)]
2112pub struct StorageByPriority {
2113 pub own: u64,
2115 pub followed: u64,
2117 pub other: u64,
2119}
2120
2121#[derive(Debug, Clone)]
2122pub struct FileChunkMetadata {
2123 pub total_size: u64,
2124 pub chunk_hashes: Vec<Hash>,
2125 pub chunk_sizes: Vec<u64>,
2126 pub is_chunked: bool,
2127}
2128
2129pub struct FileRangeChunksOwned {
2131 store: Arc<HashtreeStore>,
2132 metadata: FileChunkMetadata,
2133 start: u64,
2134 end: u64,
2135 current_chunk_idx: usize,
2136 current_offset: u64,
2137}
2138
2139impl Iterator for FileRangeChunksOwned {
2140 type Item = Result<Vec<u8>>;
2141
2142 fn next(&mut self) -> Option<Self::Item> {
2143 if !self.metadata.is_chunked || self.current_chunk_idx >= self.metadata.chunk_hashes.len() {
2144 return None;
2145 }
2146
2147 if self.current_offset > self.end {
2148 return None;
2149 }
2150
2151 let chunk_hash = &self.metadata.chunk_hashes[self.current_chunk_idx];
2152 let chunk_size = self.metadata.chunk_sizes[self.current_chunk_idx];
2153 let chunk_end = self.current_offset + chunk_size - 1;
2154
2155 self.current_chunk_idx += 1;
2156
2157 if chunk_end < self.start || self.current_offset > self.end {
2158 self.current_offset += chunk_size;
2159 return self.next();
2160 }
2161
2162 let chunk_content = match self.store.get_chunk(chunk_hash) {
2163 Ok(Some(content)) => content,
2164 Ok(None) => {
2165 return Some(Err(anyhow::anyhow!("Chunk {} not found", to_hex(chunk_hash))));
2166 }
2167 Err(e) => {
2168 return Some(Err(e));
2169 }
2170 };
2171
2172 let chunk_read_start = if self.current_offset >= self.start {
2173 0
2174 } else {
2175 (self.start - self.current_offset) as usize
2176 };
2177
2178 let chunk_read_end = if chunk_end <= self.end {
2179 chunk_size as usize - 1
2180 } else {
2181 (self.end - self.current_offset) as usize
2182 };
2183
2184 let result = chunk_content[chunk_read_start..=chunk_read_end].to_vec();
2185 self.current_offset += chunk_size;
2186
2187 Some(Ok(result))
2188 }
2189}
2190
2191#[derive(Debug)]
2192pub struct GcStats {
2193 pub deleted_dags: usize,
2194 pub freed_bytes: u64,
2195}
2196
2197#[derive(Debug, Clone)]
2198pub struct DirEntry {
2199 pub name: String,
2200 pub cid: String,
2201 pub is_directory: bool,
2202 pub size: u64,
2203}
2204
2205#[derive(Debug, Clone)]
2206pub struct DirectoryListing {
2207 pub dir_name: String,
2208 pub entries: Vec<DirEntry>,
2209}
2210
2211#[derive(Debug, Clone)]
2212pub struct PinnedItem {
2213 pub cid: String,
2214 pub name: String,
2215 pub is_directory: bool,
2216}
2217
2218#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
2220pub struct BlobMetadata {
2221 pub sha256: String,
2222 pub size: u64,
2223 pub mime_type: String,
2224 pub uploaded: u64,
2225}
2226
2227impl crate::webrtc::ContentStore for HashtreeStore {
2229 fn get(&self, hash_hex: &str) -> Result<Option<Vec<u8>>> {
2230 let hash = from_hex(hash_hex)
2231 .map_err(|e| anyhow::anyhow!("Invalid hash: {}", e))?;
2232 self.get_chunk(&hash)
2233 }
2234}