1#![allow(clippy::type_complexity)]
8pub mod branch;
113pub mod commit;
114pub mod hybridstore;
115pub mod memoryrepo;
116pub mod objectstore;
117pub mod pile;
118
119pub trait StorageClose {
125 type Error: std::error::Error;
127
128 fn close(self) -> Result<(), Self::Error>;
130}
131
132impl<Storage> Repository<Storage>
134where
135 Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
136{
137 pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
144 self.storage.close()
145 }
146}
147
148use crate::macros::pattern;
149use std::collections::{HashSet, VecDeque};
150use std::convert::Infallible;
151use std::error::Error;
152use std::fmt::Debug;
153use std::fmt::{self};
154
155use commit::commit_metadata;
156use hifitime::Epoch;
157use itertools::Itertools;
158
159use crate::blob::schemas::simplearchive::UnarchiveError;
160use crate::blob::schemas::UnknownBlob;
161use crate::blob::Blob;
162use crate::blob::BlobSchema;
163use crate::blob::MemoryBlobStore;
164use crate::blob::ToBlob;
165use crate::blob::TryFromBlob;
166use crate::find;
167use crate::id::ufoid;
168use crate::id::Id;
169use crate::metadata;
170use crate::patch::Entry;
171use crate::patch::IdentitySchema;
172use crate::patch::PATCH;
173use crate::prelude::valueschemas::GenId;
174use crate::repo::branch::branch_metadata;
175use crate::trible::TribleSet;
176use crate::value::schemas::hash::Handle;
177use crate::value::schemas::hash::HashProtocol;
178use crate::value::Value;
179use crate::value::ValueSchema;
180use crate::value::VALUE_LEN;
181use ed25519_dalek::SigningKey;
182
183use crate::blob::schemas::longstring::LongString;
184use crate::blob::schemas::simplearchive::SimpleArchive;
185use crate::prelude::*;
186use crate::value::schemas::ed25519 as ed;
187use crate::value::schemas::hash::Blake3;
188use crate::value::schemas::shortstring::ShortString;
189use crate::value::schemas::time::NsTAIInterval;
190
191attributes! {
192 "4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
194 "317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
196 "B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
198 "12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
200 "272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
202 "8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
204 "71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
206 "ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
208 "9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
210 "1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
212}
213
214pub trait BlobStoreList<H: HashProtocol> {
216 type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
217 where
218 Self: 'a;
219 type Err: Error + Debug + Send + Sync + 'static;
220
221 fn blobs<'a>(&'a self) -> Self::Iter<'a>;
223}
224
225#[derive(Debug, Clone)]
227pub struct BlobMetadata {
228 pub timestamp: u64,
230 pub length: u64,
232}
233
234pub trait BlobStoreMeta<H: HashProtocol> {
236 type MetaError: std::error::Error + Send + Sync + 'static;
238
239 fn metadata<S>(
240 &self,
241 handle: Value<Handle<H, S>>,
242 ) -> Result<Option<BlobMetadata>, Self::MetaError>
243 where
244 S: BlobSchema + 'static,
245 Handle<H, S>: ValueSchema;
246}
247
248pub trait BlobStoreForget<H: HashProtocol> {
253 type ForgetError: std::error::Error + Send + Sync + 'static;
254
255 fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
256 where
257 S: BlobSchema + 'static,
258 Handle<H, S>: ValueSchema;
259}
260
261pub trait BlobStoreGet<H: HashProtocol> {
263 type GetError<E: std::error::Error>: Error;
264
265 fn get<T, S>(
274 &self,
275 handle: Value<Handle<H, S>>,
276 ) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
277 where
278 S: BlobSchema + 'static,
279 T: TryFromBlob<S>,
280 Handle<H, S>: ValueSchema;
281}
282
283pub trait BlobStorePut<H: HashProtocol> {
285 type PutError: Error + Debug + Send + Sync + 'static;
286
287 fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
288 where
289 S: BlobSchema + 'static,
290 T: ToBlob<S>,
291 Handle<H, S>: ValueSchema;
292}
293
294pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
295 type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
296 type ReaderError: Error + Debug + Send + Sync + 'static;
297 fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
298}
299
300pub trait BlobStoreKeep<H: HashProtocol> {
302 fn keep<I>(&mut self, handles: I)
304 where
305 I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
306}
307
308#[derive(Debug)]
309pub enum PushResult<H>
310where
311 H: HashProtocol,
312{
313 Success(),
314 Conflict(Option<Value<Handle<H, SimpleArchive>>>),
315}
316
317pub trait BranchStore<H: HashProtocol> {
318 type BranchesError: Error + Debug + Send + Sync + 'static;
319 type HeadError: Error + Debug + Send + Sync + 'static;
320 type UpdateError: Error + Debug + Send + Sync + 'static;
321
322 type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
323 where
324 Self: 'a;
325
326 fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
329
330 fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
346
347 fn update(
358 &mut self,
359 id: Id,
360 old: Option<Value<Handle<H, SimpleArchive>>>,
361 new: Value<Handle<H, SimpleArchive>>,
362 ) -> Result<PushResult<H>, Self::UpdateError>;
363}
364
365#[derive(Debug)]
366pub enum TransferError<ListErr, LoadErr, StoreErr> {
367 List(ListErr),
368 Load(LoadErr),
369 Store(StoreErr),
370}
371
372impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
373 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
374 write!(f, "failed to transfer blob")
375 }
376}
377
378impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
379where
380 ListErr: Debug + Error + 'static,
381 LoadErr: Debug + Error + 'static,
382 StoreErr: Debug + Error + 'static,
383{
384 fn source(&self) -> Option<&(dyn Error + 'static)> {
385 match self {
386 Self::List(e) => Some(e),
387 Self::Load(e) => Some(e),
388 Self::Store(e) => Some(e),
389 }
390 }
391}
392
393pub fn transfer<'a, BS, BT, HS, HT, Handles>(
395 source: &'a BS,
396 target: &'a mut BT,
397 handles: Handles,
398) -> impl Iterator<
399 Item = Result<
400 (
401 Value<Handle<HS, UnknownBlob>>,
402 Value<Handle<HT, UnknownBlob>>,
403 ),
404 TransferError<
405 Infallible,
406 <BS as BlobStoreGet<HS>>::GetError<Infallible>,
407 <BT as BlobStorePut<HT>>::PutError,
408 >,
409 >,
410> + 'a
411where
412 BS: BlobStoreGet<HS> + 'a,
413 BT: BlobStorePut<HT> + 'a,
414 HS: 'static + HashProtocol,
415 HT: 'static + HashProtocol,
416 Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
417 Handles::IntoIter: 'a,
418{
419 handles.into_iter().map(move |source_handle| {
420 let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
421 let target_handle = target.put(blob).map_err(TransferError::Store)?;
422 Ok((source_handle, target_handle))
423 })
424}
425
426pub struct ReachableHandles<'a, BS, H>
428where
429 BS: BlobStoreGet<H>,
430 H: 'static + HashProtocol,
431{
432 source: &'a BS,
433 queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
434 visited: HashSet<[u8; VALUE_LEN]>,
435}
436
437impl<'a, BS, H> ReachableHandles<'a, BS, H>
438where
439 BS: BlobStoreGet<H>,
440 H: 'static + HashProtocol,
441{
442 fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
443 let mut queue = VecDeque::new();
444 for handle in roots {
445 queue.push_back(handle);
446 }
447
448 Self {
449 source,
450 queue,
451 visited: HashSet::new(),
452 }
453 }
454
455 fn enqueue_from_blob(&mut self, blob: &Blob<UnknownBlob>) {
456 let bytes = blob.bytes.as_ref();
457 let mut offset = 0usize;
458
459 while offset + VALUE_LEN <= bytes.len() {
460 let mut raw = [0u8; VALUE_LEN];
461 raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
462
463 if !self.visited.contains(&raw) {
464 let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
465 if self
466 .source
467 .get::<anybytes::Bytes, UnknownBlob>(candidate)
468 .is_ok()
469 {
470 self.queue.push_back(candidate);
471 }
472 }
473
474 offset += VALUE_LEN;
475 }
476 }
477}
478
479impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
480where
481 BS: BlobStoreGet<H>,
482 H: 'static + HashProtocol,
483{
484 type Item = Value<Handle<H, UnknownBlob>>;
485
486 fn next(&mut self) -> Option<Self::Item> {
487 while let Some(handle) = self.queue.pop_front() {
488 let raw = handle.raw;
489
490 if !self.visited.insert(raw) {
491 continue;
492 }
493
494 if let Ok(blob) = self.source.get(handle) {
495 self.enqueue_from_blob(&blob);
496 }
497
498 return Some(handle);
499 }
500
501 None
502 }
503}
504
505pub fn reachable<'a, BS, H>(
507 source: &'a BS,
508 roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
509) -> ReachableHandles<'a, BS, H>
510where
511 BS: BlobStoreGet<H>,
512 H: 'static + HashProtocol,
513{
514 ReachableHandles::new(source, roots)
515}
516
517pub fn potential_handles<'a, H>(
524 set: &'a TribleSet,
525) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
526where
527 H: HashProtocol,
528{
529 set.vae.iter().map(|raw| {
530 let mut value = [0u8; VALUE_LEN];
531 value.copy_from_slice(&raw[0..VALUE_LEN]);
532 Value::<Handle<H, UnknownBlob>>::new(value)
533 })
534}
535
536#[derive(Debug)]
539pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
540 ContentStorageError(BlobErr),
542 CommitStorageError(BlobErr),
544}
545
546impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
547 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
548 match self {
549 CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
550 CreateCommitError::CommitStorageError(e) => {
551 write!(f, "Commit metadata storage failed: {e}")
552 }
553 }
554 }
555}
556
557impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
558 fn source(&self) -> Option<&(dyn Error + 'static)> {
559 match self {
560 CreateCommitError::ContentStorageError(e) => Some(e),
561 CreateCommitError::CommitStorageError(e) => Some(e),
562 }
563 }
564}
565
566#[derive(Debug)]
567pub enum MergeError {
568 DifferentRepos(),
570}
571
572#[derive(Debug)]
573pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
574 StorageBranches(Storage::BranchesError),
576 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
578 StorageGet(
580 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
581 ),
582 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
584 BranchUpdate(Storage::UpdateError),
586 BadBranchMetadata(),
588 MergeError(MergeError),
590}
591
592impl<Storage> From<MergeError> for PushError<Storage>
597where
598 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
599{
600 fn from(e: MergeError) -> Self {
601 PushError::MergeError(e)
602 }
603}
604
605#[derive(Debug)]
612pub enum BranchError<Storage>
613where
614 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
615{
616 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
618 StorageGet(
620 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
621 ),
622 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
624 BranchHead(Storage::HeadError),
626 BranchUpdate(Storage::UpdateError),
628 AlreadyExists(),
630 BranchNotFound(Id),
632}
633
634#[derive(Debug)]
635pub enum LookupError<Storage>
636where
637 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
638{
639 StorageBranches(Storage::BranchesError),
640 BranchHead(Storage::HeadError),
641 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
642 StorageGet(
643 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
644 ),
645 NameConflict(Vec<Id>),
647 BadBranchMetadata(),
648}
649
650pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
657 storage: Storage,
658 signing_key: SigningKey,
659}
660
661pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
662where
663 BranchStorageErr: Error,
664 BlobReaderErr: Error,
665 BlobStorageErr: Error,
666{
667 BranchNotFound(Id),
669 BranchStorage(BranchStorageErr),
671 BlobReader(BlobReaderErr),
673 BlobStorage(BlobStorageErr),
675 BadBranchMetadata(),
677}
678
679impl<B, R, C> fmt::Debug for PullError<B, R, C>
680where
681 B: Error + fmt::Debug,
682 R: Error + fmt::Debug,
683 C: Error + fmt::Debug,
684{
685 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
686 match self {
687 PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
688 PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
689 PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
690 PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
691 PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
692 }
693 }
694}
695
696impl<Storage> Repository<Storage>
697where
698 Storage: BlobStore<Blake3> + BranchStore<Blake3>,
699{
700 pub fn new(storage: Storage, signing_key: SigningKey) -> Self {
711 Self {
712 storage,
713 signing_key,
714 }
715 }
716
717 pub fn into_storage(self) -> Storage {
723 self.storage
724 }
725
726 pub fn set_signing_key(&mut self, signing_key: SigningKey) {
728 self.signing_key = signing_key;
729 }
730
731 pub fn create_branch(
745 &mut self,
746 branch_name: &str,
747 commit: Option<CommitHandle>,
748 ) -> Result<ExclusiveId, BranchError<Storage>> {
749 self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
750 }
751
752 pub fn create_branch_with_key(
754 &mut self,
755 branch_name: &str,
756 commit: Option<CommitHandle>,
757 signing_key: SigningKey,
758 ) -> Result<ExclusiveId, BranchError<Storage>> {
759 let branch_id = ufoid();
760
761 let branch_set = if let Some(commit) = commit {
762 let reader = self
763 .storage
764 .reader()
765 .map_err(|e| BranchError::StorageReader(e))?;
766 let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
767
768 branch::branch_metadata(&signing_key, *branch_id, branch_name, Some(set.to_blob()))
769 } else {
770 branch::branch_unsigned(*branch_id, branch_name, None)
771 };
772
773 let branch_blob = branch_set.to_blob();
774 let branch_handle = self
775 .storage
776 .put(branch_blob)
777 .map_err(|e| BranchError::StoragePut(e))?;
778
779 let push_result = self
780 .storage
781 .update(*branch_id, None, branch_handle)
782 .map_err(|e| BranchError::BranchUpdate(e))?;
783
784 match push_result {
785 PushResult::Success() => Ok(branch_id),
786 PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
787 }
788 }
789
790 pub fn pull(
792 &mut self,
793 branch_id: Id,
794 ) -> Result<
795 Workspace<Storage>,
796 PullError<
797 Storage::HeadError,
798 Storage::ReaderError,
799 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
800 >,
801 > {
802 self.pull_with_key(branch_id, self.signing_key.clone())
803 }
804
805 pub fn pull_with_key(
807 &mut self,
808 branch_id: Id,
809 signing_key: SigningKey,
810 ) -> Result<
811 Workspace<Storage>,
812 PullError<
813 Storage::HeadError,
814 Storage::ReaderError,
815 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
816 >,
817 > {
818 let base_branch_meta_handle = match self.storage.head(branch_id) {
820 Ok(Some(handle)) => handle,
821 Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
822 Err(e) => return Err(PullError::BranchStorage(e)),
823 };
824 let reader = self.storage.reader().map_err(PullError::BlobReader)?;
826 let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
827 Ok(metadata) => metadata,
828 Err(e) => return Err(PullError::BlobStorage(e)),
829 };
830
831 let head_ = match find!(
832 (head_: Value<_>),
833 pattern!(&base_branch_meta, [{ head: ?head_ }])
834 )
835 .at_most_one()
836 {
837 Ok(Some((h,))) => Some(h),
838 Ok(None) => None,
839 Err(_) => return Err(PullError::BadBranchMetadata()),
840 };
841 let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
843 Ok(Workspace {
844 base_blobs,
845 local_blobs: MemoryBlobStore::new(),
846 head: head_,
847 base_head: head_,
848 base_branch_id: branch_id,
849 base_branch_meta: base_branch_meta_handle,
850 signing_key,
851 })
852 }
853
854 pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
858 while let Some(mut conflict_ws) = self.try_push(workspace)? {
863 conflict_ws.merge(workspace)?;
867
868 *workspace = conflict_ws;
873 }
874
875 Ok(())
876 }
877
878 pub fn try_push(
882 &mut self,
883 workspace: &mut Workspace<Storage>,
884 ) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
885 let workspace_reader = workspace.local_blobs.reader().unwrap();
887 for handle in workspace_reader.blobs() {
888 let handle = handle.expect("infallible blob enumeration");
889 let blob: Blob<UnknownBlob> =
890 workspace_reader.get(handle).expect("infallible blob read");
891 self.storage.put(blob).map_err(PushError::StoragePut)?;
892 }
893
894 if workspace.base_head == workspace.head {
899 return Ok(None);
900 }
901
902 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
904 let base_branch_meta: TribleSet = repo_reader
905 .get(workspace.base_branch_meta)
906 .map_err(PushError::StorageGet)?;
907
908 let Ok((branch_name,)) = find!((name: Value<_>),
909 pattern!(base_branch_meta, [{ metadata::shortname: ?name }])
910 )
911 .exactly_one() else {
912 return Err(PushError::BadBranchMetadata());
913 };
914
915 let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
916 let head_: TribleSet = repo_reader
917 .get(head_handle)
918 .map_err(PushError::StorageGet)?;
919
920 let branch_meta = branch_metadata(
921 &workspace.signing_key,
922 workspace.base_branch_id,
923 branch_name.from_value(),
924 Some(head_.to_blob()),
925 );
926
927 let branch_meta_handle = self
928 .storage
929 .put(branch_meta)
930 .map_err(PushError::StoragePut)?;
931
932 let result = self
934 .storage
935 .update(
936 workspace.base_branch_id,
937 Some(workspace.base_branch_meta),
938 branch_meta_handle,
939 )
940 .map_err(PushError::BranchUpdate)?;
941
942 match result {
943 PushResult::Success() => {
944 workspace.base_branch_meta = branch_meta_handle;
947 workspace.base_head = workspace.head;
948 workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
951 workspace.local_blobs = MemoryBlobStore::new();
955 Ok(None)
956 }
957 PushResult::Conflict(conflicting_meta) => {
958 let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
959
960 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
961 let branch_meta: TribleSet = repo_reader
962 .get(conflicting_meta)
963 .map_err(PushError::StorageGet)?;
964
965 let head_ = match find!((head_: Value<_>),
966 pattern!(&branch_meta, [{ head: ?head_ }])
967 )
968 .at_most_one()
969 {
970 Ok(Some((h,))) => Some(h),
971 Ok(None) => None,
972 Err(_) => return Err(PushError::BadBranchMetadata()),
973 };
974
975 let conflict_ws = Workspace {
976 base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
977 local_blobs: MemoryBlobStore::new(),
978 head: head_,
979 base_head: head_,
980 base_branch_id: workspace.base_branch_id,
981 base_branch_meta: conflicting_meta,
982 signing_key: workspace.signing_key.clone(),
983 };
984
985 Ok(Some(conflict_ws))
986 }
987 }
988 }
989}
990
991type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
992type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
993type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
994
995pub struct Workspace<Blobs: BlobStore<Blake3>> {
999 local_blobs: MemoryBlobStore<Blake3>,
1001 base_blobs: Blobs::Reader,
1003 base_branch_id: Id,
1005 base_branch_meta: BranchMetaHandle,
1007 head: Option<CommitHandle>,
1009 base_head: Option<CommitHandle>,
1015 signing_key: SigningKey,
1017}
1018
1019impl<Blobs> fmt::Debug for Workspace<Blobs>
1020where
1021 Blobs: BlobStore<Blake3>,
1022 Blobs::Reader: fmt::Debug,
1023{
1024 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1025 f.debug_struct("Workspace")
1026 .field("local_blobs", &self.local_blobs)
1027 .field("base_blobs", &self.base_blobs)
1028 .field("base_branch_id", &self.base_branch_id)
1029 .field("base_branch_meta", &self.base_branch_meta)
1030 .field("base_head", &self.base_head)
1031 .field("head", &self.head)
1032 .finish()
1033 }
1034}
1035
1036pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
1038 fn select(
1039 self,
1040 ws: &mut Workspace<Blobs>,
1041 ) -> Result<
1042 CommitSet,
1043 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1044 >;
1045}
1046
1047pub struct Ancestors(pub CommitHandle);
1049
1050pub fn ancestors(commit: CommitHandle) -> Ancestors {
1052 Ancestors(commit)
1053}
1054
1055pub struct NthAncestor(pub CommitHandle, pub usize);
1057
1058pub fn nth_ancestor(commit: CommitHandle, n: usize) -> NthAncestor {
1060 NthAncestor(commit, n)
1061}
1062
1063pub struct Parents(pub CommitHandle);
1065
1066pub fn parents(commit: CommitHandle) -> Parents {
1068 Parents(commit)
1069}
1070
1071pub struct SymmetricDiff(pub CommitHandle, pub CommitHandle);
1074
1075pub fn symmetric_diff(a: CommitHandle, b: CommitHandle) -> SymmetricDiff {
1077 SymmetricDiff(a, b)
1078}
1079
1080pub struct Union<A, B> {
1082 left: A,
1083 right: B,
1084}
1085
1086pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
1088 Union { left, right }
1089}
1090
1091pub struct Intersect<A, B> {
1093 left: A,
1094 right: B,
1095}
1096
1097pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
1099 Intersect { left, right }
1100}
1101
1102pub struct Difference<A, B> {
1105 left: A,
1106 right: B,
1107}
1108
1109pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
1111 Difference { left, right }
1112}
1113
1114pub struct TimeRange(pub Epoch, pub Epoch);
1116
1117pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
1119 TimeRange(start, end)
1120}
1121
1122pub struct Filter<S, F> {
1124 selector: S,
1125 filter: F,
1126}
1127
1128pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
1130 Filter { selector, filter }
1131}
1132
1133impl<Blobs> CommitSelector<Blobs> for CommitHandle
1134where
1135 Blobs: BlobStore<Blake3>,
1136{
1137 fn select(
1138 self,
1139 _ws: &mut Workspace<Blobs>,
1140 ) -> Result<
1141 CommitSet,
1142 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1143 > {
1144 let mut patch = CommitSet::new();
1145 patch.insert(&Entry::new(&self.raw));
1146 Ok(patch)
1147 }
1148}
1149
1150impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
1151where
1152 Blobs: BlobStore<Blake3>,
1153{
1154 fn select(
1155 self,
1156 _ws: &mut Workspace<Blobs>,
1157 ) -> Result<
1158 CommitSet,
1159 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1160 > {
1161 let mut patch = CommitSet::new();
1162 for handle in self {
1163 patch.insert(&Entry::new(&handle.raw));
1164 }
1165 Ok(patch)
1166 }
1167}
1168
1169impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
1170where
1171 Blobs: BlobStore<Blake3>,
1172{
1173 fn select(
1174 self,
1175 _ws: &mut Workspace<Blobs>,
1176 ) -> Result<
1177 CommitSet,
1178 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1179 > {
1180 let mut patch = CommitSet::new();
1181 for handle in self {
1182 patch.insert(&Entry::new(&handle.raw));
1183 }
1184 Ok(patch)
1185 }
1186}
1187
1188impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
1189where
1190 Blobs: BlobStore<Blake3>,
1191{
1192 fn select(
1193 self,
1194 _ws: &mut Workspace<Blobs>,
1195 ) -> Result<
1196 CommitSet,
1197 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1198 > {
1199 let mut patch = CommitSet::new();
1200 if let Some(handle) = self {
1201 patch.insert(&Entry::new(&handle.raw));
1202 }
1203 Ok(patch)
1204 }
1205}
1206
1207impl<Blobs> CommitSelector<Blobs> for Ancestors
1208where
1209 Blobs: BlobStore<Blake3>,
1210{
1211 fn select(
1212 self,
1213 ws: &mut Workspace<Blobs>,
1214 ) -> Result<
1215 CommitSet,
1216 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1217 > {
1218 collect_reachable(ws, self.0)
1219 }
1220}
1221
1222impl<Blobs> CommitSelector<Blobs> for NthAncestor
1223where
1224 Blobs: BlobStore<Blake3>,
1225{
1226 fn select(
1227 self,
1228 ws: &mut Workspace<Blobs>,
1229 ) -> Result<
1230 CommitSet,
1231 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1232 > {
1233 let mut current = self.0;
1234 let mut remaining = self.1;
1235
1236 while remaining > 0 {
1237 let meta: TribleSet = ws.get(current).map_err(WorkspaceCheckoutError::Storage)?;
1238 let mut parents = find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }]));
1239 let Some((p,)) = parents.next() else {
1240 return Ok(CommitSet::new());
1241 };
1242 current = p;
1243 remaining -= 1;
1244 }
1245
1246 let mut patch = CommitSet::new();
1247 patch.insert(&Entry::new(¤t.raw));
1248 Ok(patch)
1249 }
1250}
1251
1252impl<Blobs> CommitSelector<Blobs> for Parents
1253where
1254 Blobs: BlobStore<Blake3>,
1255{
1256 fn select(
1257 self,
1258 ws: &mut Workspace<Blobs>,
1259 ) -> Result<
1260 CommitSet,
1261 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1262 > {
1263 let meta: TribleSet = ws.get(self.0).map_err(WorkspaceCheckoutError::Storage)?;
1264 let mut result = CommitSet::new();
1265 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1266 result.insert(&Entry::new(&p.raw));
1267 }
1268 Ok(result)
1269 }
1270}
1271
1272impl<Blobs> CommitSelector<Blobs> for SymmetricDiff
1273where
1274 Blobs: BlobStore<Blake3>,
1275{
1276 fn select(
1277 self,
1278 ws: &mut Workspace<Blobs>,
1279 ) -> Result<
1280 CommitSet,
1281 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1282 > {
1283 let a = collect_reachable(ws, self.0)?;
1284 let b = collect_reachable(ws, self.1)?;
1285 let inter = a.intersect(&b);
1286 let mut union = a;
1287 union.union(b);
1288 Ok(union.difference(&inter))
1289 }
1290}
1291
1292impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
1293where
1294 A: CommitSelector<Blobs>,
1295 B: CommitSelector<Blobs>,
1296 Blobs: BlobStore<Blake3>,
1297{
1298 fn select(
1299 self,
1300 ws: &mut Workspace<Blobs>,
1301 ) -> Result<
1302 CommitSet,
1303 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1304 > {
1305 let mut left = self.left.select(ws)?;
1306 let right = self.right.select(ws)?;
1307 left.union(right);
1308 Ok(left)
1309 }
1310}
1311
1312impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
1313where
1314 A: CommitSelector<Blobs>,
1315 B: CommitSelector<Blobs>,
1316 Blobs: BlobStore<Blake3>,
1317{
1318 fn select(
1319 self,
1320 ws: &mut Workspace<Blobs>,
1321 ) -> Result<
1322 CommitSet,
1323 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1324 > {
1325 let left = self.left.select(ws)?;
1326 let right = self.right.select(ws)?;
1327 Ok(left.intersect(&right))
1328 }
1329}
1330
1331impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
1332where
1333 A: CommitSelector<Blobs>,
1334 B: CommitSelector<Blobs>,
1335 Blobs: BlobStore<Blake3>,
1336{
1337 fn select(
1338 self,
1339 ws: &mut Workspace<Blobs>,
1340 ) -> Result<
1341 CommitSet,
1342 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1343 > {
1344 let left = self.left.select(ws)?;
1345 let right = self.right.select(ws)?;
1346 Ok(left.difference(&right))
1347 }
1348}
1349
1350impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
1351where
1352 Blobs: BlobStore<Blake3>,
1353 S: CommitSelector<Blobs>,
1354 F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
1355{
1356 fn select(
1357 self,
1358 ws: &mut Workspace<Blobs>,
1359 ) -> Result<
1360 CommitSet,
1361 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1362 > {
1363 let patch = self.selector.select(ws)?;
1364 let mut result = CommitSet::new();
1365 let filter = self.filter;
1366 for raw in patch.iter() {
1367 let handle = Value::new(*raw);
1368 let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1369
1370 let Ok((content_handle,)) = find!(
1371 (c: Value<_>),
1372 pattern!(&meta, [{ content: ?c }])
1373 )
1374 .exactly_one() else {
1375 return Err(WorkspaceCheckoutError::BadCommitMetadata());
1376 };
1377
1378 let payload: TribleSet = ws
1379 .get(content_handle)
1380 .map_err(WorkspaceCheckoutError::Storage)?;
1381
1382 if filter(&meta, &payload) {
1383 result.insert(&Entry::new(raw));
1384 }
1385 }
1386 Ok(result)
1387 }
1388}
1389
1390pub struct HistoryOf(pub Id);
1392
1393pub fn history_of(entity: Id) -> HistoryOf {
1395 HistoryOf(entity)
1396}
1397
1398impl<Blobs> CommitSelector<Blobs> for HistoryOf
1399where
1400 Blobs: BlobStore<Blake3>,
1401{
1402 fn select(
1403 self,
1404 ws: &mut Workspace<Blobs>,
1405 ) -> Result<
1406 CommitSet,
1407 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1408 > {
1409 let head_ = ws.head.ok_or(WorkspaceCheckoutError::NoHead)?;
1410 let entity = self.0;
1411 filter(
1412 ancestors(head_),
1413 move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
1414 )
1415 .select(ws)
1416 }
1417}
1418
1419fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
1427 ws: &mut Workspace<Blobs>,
1428 patch: CommitSet,
1429) -> Result<
1430 CommitSet,
1431 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1432> {
1433 let mut result = CommitSet::new();
1434 for raw in patch.iter() {
1435 let handle = Value::new(*raw);
1436 let reach = collect_reachable(ws, handle)?;
1437 result.union(reach);
1438 }
1439 Ok(result)
1440}
1441
1442fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
1443 ws: &mut Workspace<Blobs>,
1444 seeds: CommitSet,
1445 stop: &CommitSet,
1446) -> Result<
1447 CommitSet,
1448 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1449> {
1450 let mut visited = HashSet::new();
1451 let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
1452 let mut result = CommitSet::new();
1453
1454 while let Some(commit) = stack.pop() {
1455 if !visited.insert(commit) {
1456 continue;
1457 }
1458
1459 if stop.get(&commit.raw).is_some() {
1460 continue;
1461 }
1462
1463 result.insert(&Entry::new(&commit.raw));
1464
1465 let meta: TribleSet = ws
1466 .local_blobs
1467 .reader()
1468 .unwrap()
1469 .get(commit)
1470 .or_else(|_| ws.base_blobs.get(commit))
1471 .map_err(WorkspaceCheckoutError::Storage)?;
1472
1473 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1474 stack.push(p);
1475 }
1476 }
1477
1478 Ok(result)
1479}
1480
1481impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
1482where
1483 T: CommitSelector<Blobs>,
1484 Blobs: BlobStore<Blake3>,
1485{
1486 fn select(
1487 self,
1488 ws: &mut Workspace<Blobs>,
1489 ) -> Result<
1490 CommitSet,
1491 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1492 > {
1493 let end_patch = self.end.select(ws)?;
1494 let start_patch = self.start.select(ws)?;
1495
1496 collect_reachable_from_patch_until(ws, end_patch, &start_patch)
1497 }
1498}
1499
1500impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
1501where
1502 T: CommitSelector<Blobs>,
1503 Blobs: BlobStore<Blake3>,
1504{
1505 fn select(
1506 self,
1507 ws: &mut Workspace<Blobs>,
1508 ) -> Result<
1509 CommitSet,
1510 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1511 > {
1512 let head_ = ws.head.ok_or(WorkspaceCheckoutError::NoHead)?;
1513 let exclude_patch = self.start.select(ws)?;
1514
1515 let mut head_patch = CommitSet::new();
1516 head_patch.insert(&Entry::new(&head_.raw));
1517
1518 collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
1519 }
1520}
1521
1522impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
1523where
1524 T: CommitSelector<Blobs>,
1525 Blobs: BlobStore<Blake3>,
1526{
1527 fn select(
1528 self,
1529 ws: &mut Workspace<Blobs>,
1530 ) -> Result<
1531 CommitSet,
1532 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1533 > {
1534 let end_patch = self.end.select(ws)?;
1535 collect_reachable_from_patch(ws, end_patch)
1536 }
1537}
1538
1539impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
1540where
1541 Blobs: BlobStore<Blake3>,
1542{
1543 fn select(
1544 self,
1545 ws: &mut Workspace<Blobs>,
1546 ) -> Result<
1547 CommitSet,
1548 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1549 > {
1550 let head_ = ws.head.ok_or(WorkspaceCheckoutError::NoHead)?;
1551 collect_reachable(ws, head_)
1552 }
1553}
1554
1555impl<Blobs> CommitSelector<Blobs> for TimeRange
1556where
1557 Blobs: BlobStore<Blake3>,
1558{
1559 fn select(
1560 self,
1561 ws: &mut Workspace<Blobs>,
1562 ) -> Result<
1563 CommitSet,
1564 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1565 > {
1566 let head_ = ws.head.ok_or(WorkspaceCheckoutError::NoHead)?;
1567 let start = self.0;
1568 let end = self.1;
1569 filter(
1570 ancestors(head_),
1571 move |meta: &TribleSet, _payload: &TribleSet| {
1572 if let Ok(Some((ts,))) =
1573 find!((t: Value<_>), pattern!(meta, [{ timestamp: ?t }])).at_most_one()
1574 {
1575 let (ts_start, ts_end): (Epoch, Epoch) =
1576 crate::value::FromValue::from_value(&ts);
1577 ts_start <= end && ts_end >= start
1578 } else {
1579 false
1580 }
1581 },
1582 )
1583 .select(ws)
1584 }
1585}
1586
1587impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
1588 pub fn branch_id(&self) -> Id {
1590 self.base_branch_id
1591 }
1592
1593 pub fn head(&self) -> Option<CommitHandle> {
1595 self.head
1596 }
1597
1598 pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
1601 where
1602 S: BlobSchema + 'static,
1603 T: ToBlob<S>,
1604 Handle<Blake3, S>: ValueSchema,
1605 {
1606 self.local_blobs.put(item).expect("infallible blob put")
1607 }
1608
1609 pub fn get<T, S>(
1614 &mut self,
1615 handle: Value<Handle<Blake3, S>>,
1616 ) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
1617 where
1618 S: BlobSchema + 'static,
1619 T: TryFromBlob<S>,
1620 Handle<Blake3, S>: ValueSchema,
1621 {
1622 self.local_blobs
1623 .reader()
1624 .unwrap()
1625 .get(handle)
1626 .or_else(|_| self.base_blobs.get(handle))
1627 }
1628
1629 pub fn commit(&mut self, content_: TribleSet, message_: Option<&str>) {
1633 let content_blob = content_.to_blob();
1635 let message_handle = message_.map(|m| self.put::<LongString, String>(m.to_string()));
1637 let parents = self.head.iter().copied();
1638
1639 let commit_set = crate::repo::commit::commit_metadata(
1640 &self.signing_key,
1641 parents,
1642 message_handle,
1643 Some(content_blob.clone()),
1644 );
1645 let _ = self
1647 .local_blobs
1648 .put(content_blob)
1649 .expect("failed to put content blob");
1650 let commit_handle = self
1651 .local_blobs
1652 .put(commit_set)
1653 .expect("failed to put commit blob");
1654 self.head = Some(commit_handle);
1656 }
1657
1658 pub fn merge(&mut self, other: &mut Workspace<Blobs>) -> Result<CommitHandle, MergeError> {
1672 let other_local = other.local_blobs.reader().unwrap();
1674 for r in other_local.blobs() {
1675 let handle = r.expect("infallible blob enumeration");
1676 let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
1677
1678 self.local_blobs.put(blob).expect("infallible blob put");
1680 }
1681 let parents = self.head.iter().copied().chain(other.head.iter().copied());
1683 let merge_commit = commit_metadata(
1684 &self.signing_key,
1685 parents,
1686 None, None, );
1689 let commit_handle = self
1691 .local_blobs
1692 .put(merge_commit)
1693 .expect("failed to put merge commit blob");
1694 self.head = Some(commit_handle);
1695
1696 Ok(commit_handle)
1697 }
1698
1699 pub fn merge_commit(
1705 &mut self,
1706 other: Value<Handle<Blake3, SimpleArchive>>,
1707 ) -> Result<CommitHandle, MergeError> {
1708 let parents = self.head.iter().copied().chain(Some(other));
1715 let merge_commit = commit_metadata(&self.signing_key, parents, None, None);
1716 let commit_handle = self
1717 .local_blobs
1718 .put(merge_commit)
1719 .expect("failed to put merge commit blob");
1720 self.head = Some(commit_handle);
1721 Ok(commit_handle)
1722 }
1723
1724 fn checkout_commits<I>(
1731 &mut self,
1732 commits: I,
1733 ) -> Result<
1734 TribleSet,
1735 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1736 >
1737 where
1738 I: IntoIterator<Item = CommitHandle>,
1739 {
1740 let local = self.local_blobs.reader().unwrap();
1741 let mut result = TribleSet::new();
1742 for commit in commits {
1743 let meta: TribleSet = local
1744 .get(commit)
1745 .or_else(|_| self.base_blobs.get(commit))
1746 .map_err(WorkspaceCheckoutError::Storage)?;
1747
1748 let content_opt =
1753 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1754 Ok(Some((c,))) => Some(c),
1755 Ok(None) => None,
1756 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1757 };
1758
1759 if let Some(c) = content_opt {
1760 let set: TribleSet = local
1761 .get(c)
1762 .or_else(|_| self.base_blobs.get(c))
1763 .map_err(WorkspaceCheckoutError::Storage)?;
1764 result.union(set);
1765 } else {
1766 continue;
1768 }
1769 }
1770 Ok(result)
1771 }
1772
1773 pub fn checkout<R>(
1777 &mut self,
1778 spec: R,
1779 ) -> Result<
1780 TribleSet,
1781 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1782 >
1783 where
1784 R: CommitSelector<Blobs>,
1785 {
1786 let patch = spec.select(self)?;
1787 let commits = patch.iter().map(|raw| Value::new(*raw));
1788 self.checkout_commits(commits)
1789 }
1790}
1791
1792#[derive(Debug)]
1793pub enum WorkspaceCheckoutError<GetErr: Error> {
1794 Storage(GetErr),
1796 BadCommitMetadata(),
1798 NoHead,
1800}
1801
1802impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
1803 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1804 match self {
1805 WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
1806 WorkspaceCheckoutError::BadCommitMetadata() => {
1807 write!(f, "commit metadata missing content field")
1808 }
1809 WorkspaceCheckoutError::NoHead => write!(f, "workspace has no commits"),
1810 }
1811 }
1812}
1813
1814impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
1815
1816fn collect_reachable<Blobs: BlobStore<Blake3>>(
1817 ws: &mut Workspace<Blobs>,
1818 from: CommitHandle,
1819) -> Result<
1820 CommitSet,
1821 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1822> {
1823 let mut visited = HashSet::new();
1824 let mut stack = vec![from];
1825 let mut result = CommitSet::new();
1826
1827 while let Some(commit) = stack.pop() {
1828 if !visited.insert(commit) {
1829 continue;
1830 }
1831 result.insert(&Entry::new(&commit.raw));
1832
1833 let meta: TribleSet = ws
1834 .local_blobs
1835 .reader()
1836 .unwrap()
1837 .get(commit)
1838 .or_else(|_| ws.base_blobs.get(commit))
1839 .map_err(WorkspaceCheckoutError::Storage)?;
1840
1841 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1842 stack.push(p);
1843 }
1844 }
1845
1846 Ok(result)
1847}