1#![allow(clippy::type_complexity)]
8pub mod branch;
113pub mod commit;
114pub mod hybridstore;
115pub mod memoryrepo;
116pub mod objectstore;
117pub mod pile;
118
119pub trait StorageClose {
125 type Error: std::error::Error;
127
128 fn close(self) -> Result<(), Self::Error>;
130}
131
132impl<Storage> Repository<Storage>
134where
135 Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
136{
137 pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
144 self.storage.close()
145 }
146}
147
148use crate::macros::pattern;
149use std::collections::{HashSet, VecDeque};
150use std::convert::Infallible;
151use std::error::Error;
152use std::fmt::Debug;
153use std::fmt::{self};
154
155use commit::commit_metadata;
156use hifitime::Epoch;
157use itertools::Itertools;
158
159use crate::blob::schemas::simplearchive::UnarchiveError;
160use crate::blob::schemas::UnknownBlob;
161use crate::blob::Blob;
162use crate::blob::BlobSchema;
163use crate::blob::MemoryBlobStore;
164use crate::blob::ToBlob;
165use crate::blob::TryFromBlob;
166use crate::find;
167use crate::id::genid;
168use crate::id::Id;
169use crate::patch::Entry;
170use crate::patch::IdentitySchema;
171use crate::patch::PATCH;
172use crate::prelude::valueschemas::GenId;
173use crate::repo::branch::branch_metadata;
174use crate::trible::TribleSet;
175use crate::value::schemas::hash::Handle;
176use crate::value::schemas::hash::HashProtocol;
177use crate::value::Value;
178use crate::value::ValueSchema;
179use crate::value::VALUE_LEN;
180use ed25519_dalek::SigningKey;
181
182use crate::blob::schemas::longstring::LongString;
183use crate::blob::schemas::simplearchive::SimpleArchive;
184use crate::prelude::*;
185use crate::value::schemas::ed25519 as ed;
186use crate::value::schemas::hash::Blake3;
187use crate::value::schemas::shortstring::ShortString;
188use crate::value::schemas::time::NsTAIInterval;
189
190attributes! {
191 "4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
193 "88B59BD497540AC5AECDB7518E737C87" as pub metadata: Handle<Blake3, SimpleArchive>;
195 "317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
197 "B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
199 "12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
201 "272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
203 "8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
205 "71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
207 "ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
209 "9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
211 "1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
213}
214
215pub trait BlobStoreList<H: HashProtocol> {
217 type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
218 where
219 Self: 'a;
220 type Err: Error + Debug + Send + Sync + 'static;
221
222 fn blobs<'a>(&'a self) -> Self::Iter<'a>;
224}
225
226#[derive(Debug, Clone)]
228pub struct BlobMetadata {
229 pub timestamp: u64,
231 pub length: u64,
233}
234
235pub trait BlobStoreMeta<H: HashProtocol> {
237 type MetaError: std::error::Error + Send + Sync + 'static;
239
240 fn metadata<S>(
241 &self,
242 handle: Value<Handle<H, S>>,
243 ) -> Result<Option<BlobMetadata>, Self::MetaError>
244 where
245 S: BlobSchema + 'static,
246 Handle<H, S>: ValueSchema;
247}
248
249pub trait BlobStoreForget<H: HashProtocol> {
254 type ForgetError: std::error::Error + Send + Sync + 'static;
255
256 fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
257 where
258 S: BlobSchema + 'static,
259 Handle<H, S>: ValueSchema;
260}
261
262pub trait BlobStoreGet<H: HashProtocol> {
264 type GetError<E: std::error::Error>: Error;
265
266 fn get<T, S>(
275 &self,
276 handle: Value<Handle<H, S>>,
277 ) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
278 where
279 S: BlobSchema + 'static,
280 T: TryFromBlob<S>,
281 Handle<H, S>: ValueSchema;
282}
283
284pub trait BlobStorePut<H: HashProtocol> {
286 type PutError: Error + Debug + Send + Sync + 'static;
287
288 fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
289 where
290 S: BlobSchema + 'static,
291 T: ToBlob<S>,
292 Handle<H, S>: ValueSchema;
293}
294
295pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
296 type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
297 type ReaderError: Error + Debug + Send + Sync + 'static;
298 fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
299}
300
301pub trait BlobStoreKeep<H: HashProtocol> {
303 fn keep<I>(&mut self, handles: I)
305 where
306 I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
307}
308
309#[derive(Debug)]
310pub enum PushResult<H>
311where
312 H: HashProtocol,
313{
314 Success(),
315 Conflict(Option<Value<Handle<H, SimpleArchive>>>),
316}
317
318pub trait BranchStore<H: HashProtocol> {
319 type BranchesError: Error + Debug + Send + Sync + 'static;
320 type HeadError: Error + Debug + Send + Sync + 'static;
321 type UpdateError: Error + Debug + Send + Sync + 'static;
322
323 type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
324 where
325 Self: 'a;
326
327 fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
330
331 fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
347
348 fn update(
359 &mut self,
360 id: Id,
361 old: Option<Value<Handle<H, SimpleArchive>>>,
362 new: Option<Value<Handle<H, SimpleArchive>>>,
363 ) -> Result<PushResult<H>, Self::UpdateError>;
364}
365
366#[derive(Debug)]
367pub enum TransferError<ListErr, LoadErr, StoreErr> {
368 List(ListErr),
369 Load(LoadErr),
370 Store(StoreErr),
371}
372
373impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
374 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
375 write!(f, "failed to transfer blob")
376 }
377}
378
379impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
380where
381 ListErr: Debug + Error + 'static,
382 LoadErr: Debug + Error + 'static,
383 StoreErr: Debug + Error + 'static,
384{
385 fn source(&self) -> Option<&(dyn Error + 'static)> {
386 match self {
387 Self::List(e) => Some(e),
388 Self::Load(e) => Some(e),
389 Self::Store(e) => Some(e),
390 }
391 }
392}
393
394pub fn transfer<'a, BS, BT, HS, HT, Handles>(
396 source: &'a BS,
397 target: &'a mut BT,
398 handles: Handles,
399) -> impl Iterator<
400 Item = Result<
401 (
402 Value<Handle<HS, UnknownBlob>>,
403 Value<Handle<HT, UnknownBlob>>,
404 ),
405 TransferError<
406 Infallible,
407 <BS as BlobStoreGet<HS>>::GetError<Infallible>,
408 <BT as BlobStorePut<HT>>::PutError,
409 >,
410 >,
411> + 'a
412where
413 BS: BlobStoreGet<HS> + 'a,
414 BT: BlobStorePut<HT> + 'a,
415 HS: 'static + HashProtocol,
416 HT: 'static + HashProtocol,
417 Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
418 Handles::IntoIter: 'a,
419{
420 handles.into_iter().map(move |source_handle| {
421 let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
422
423 Ok((
424 source_handle,
425 (target.put(blob).map_err(TransferError::Store)?),
426 ))
427 })
428}
429
430pub struct ReachableHandles<'a, BS, H>
432where
433 BS: BlobStoreGet<H>,
434 H: 'static + HashProtocol,
435{
436 source: &'a BS,
437 queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
438 visited: HashSet<[u8; VALUE_LEN]>,
439}
440
441impl<'a, BS, H> ReachableHandles<'a, BS, H>
442where
443 BS: BlobStoreGet<H>,
444 H: 'static + HashProtocol,
445{
446 fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
447 let mut queue = VecDeque::new();
448 for handle in roots {
449 queue.push_back(handle);
450 }
451
452 Self {
453 source,
454 queue,
455 visited: HashSet::new(),
456 }
457 }
458
459 fn enqueue_from_blob(&mut self, blob: &Blob<UnknownBlob>) {
460 let bytes = blob.bytes.as_ref();
461 let mut offset = 0usize;
462
463 while offset + VALUE_LEN <= bytes.len() {
464 let mut raw = [0u8; VALUE_LEN];
465 raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
466
467 if !self.visited.contains(&raw) {
468 let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
469 if self
470 .source
471 .get::<anybytes::Bytes, UnknownBlob>(candidate)
472 .is_ok()
473 {
474 self.queue.push_back(candidate);
475 }
476 }
477
478 offset += VALUE_LEN;
479 }
480 }
481}
482
483impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
484where
485 BS: BlobStoreGet<H>,
486 H: 'static + HashProtocol,
487{
488 type Item = Value<Handle<H, UnknownBlob>>;
489
490 fn next(&mut self) -> Option<Self::Item> {
491 while let Some(handle) = self.queue.pop_front() {
492 let raw = handle.raw;
493
494 if !self.visited.insert(raw) {
495 continue;
496 }
497
498 if let Ok(blob) = self.source.get(handle) {
499 self.enqueue_from_blob(&blob);
500 }
501
502 return Some(handle);
503 }
504
505 None
506 }
507}
508
509pub fn reachable<'a, BS, H>(
511 source: &'a BS,
512 roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
513) -> ReachableHandles<'a, BS, H>
514where
515 BS: BlobStoreGet<H>,
516 H: 'static + HashProtocol,
517{
518 ReachableHandles::new(source, roots)
519}
520
521pub fn potential_handles<'a, H>(
528 set: &'a TribleSet,
529) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
530where
531 H: HashProtocol,
532{
533 set.vae.iter().map(|raw| {
534 let mut value = [0u8; VALUE_LEN];
535 value.copy_from_slice(&raw[0..VALUE_LEN]);
536 Value::<Handle<H, UnknownBlob>>::new(value)
537 })
538}
539
540#[derive(Debug)]
543pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
544 ContentStorageError(BlobErr),
546 CommitStorageError(BlobErr),
548}
549
550impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
551 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
552 match self {
553 CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
554 CreateCommitError::CommitStorageError(e) => {
555 write!(f, "Commit metadata storage failed: {e}")
556 }
557 }
558 }
559}
560
561impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
562 fn source(&self) -> Option<&(dyn Error + 'static)> {
563 match self {
564 CreateCommitError::ContentStorageError(e) => Some(e),
565 CreateCommitError::CommitStorageError(e) => Some(e),
566 }
567 }
568}
569
570#[derive(Debug)]
571pub enum MergeError {
572 DifferentRepos(),
574}
575
576#[derive(Debug)]
577pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
578 StorageBranches(Storage::BranchesError),
580 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
582 StorageGet(
584 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
585 ),
586 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
588 BranchUpdate(Storage::UpdateError),
590 BadBranchMetadata(),
592 MergeError(MergeError),
594}
595
596impl<Storage> From<MergeError> for PushError<Storage>
601where
602 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
603{
604 fn from(e: MergeError) -> Self {
605 PushError::MergeError(e)
606 }
607}
608
609#[derive(Debug)]
616pub enum BranchError<Storage>
617where
618 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
619{
620 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
622 StorageGet(
624 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
625 ),
626 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
628 BranchHead(Storage::HeadError),
630 BranchUpdate(Storage::UpdateError),
632 AlreadyExists(),
634 BranchNotFound(Id),
636}
637
638#[derive(Debug)]
639pub enum LookupError<Storage>
640where
641 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
642{
643 StorageBranches(Storage::BranchesError),
644 BranchHead(Storage::HeadError),
645 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
646 StorageGet(
647 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
648 ),
649 NameConflict(Vec<Id>),
651 BadBranchMetadata(),
652}
653
654pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
661 storage: Storage,
662 signing_key: SigningKey,
663 commit_metadata: MetadataHandle,
664}
665
666pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
667where
668 BranchStorageErr: Error,
669 BlobReaderErr: Error,
670 BlobStorageErr: Error,
671{
672 BranchNotFound(Id),
674 BranchStorage(BranchStorageErr),
676 BlobReader(BlobReaderErr),
678 BlobStorage(BlobStorageErr),
680 BadBranchMetadata(),
682}
683
684impl<B, R, C> fmt::Debug for PullError<B, R, C>
685where
686 B: Error + fmt::Debug,
687 R: Error + fmt::Debug,
688 C: Error + fmt::Debug,
689{
690 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
691 match self {
692 PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
693 PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
694 PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
695 PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
696 PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
697 }
698 }
699}
700
701impl<Storage> Repository<Storage>
702where
703 Storage: BlobStore<Blake3> + BranchStore<Blake3>,
704{
705 pub fn new(
710 mut storage: Storage,
711 signing_key: SigningKey,
712 commit_metadata: TribleSet,
713 ) -> Result<Self, <Storage as BlobStorePut<Blake3>>::PutError> {
714 let commit_metadata = storage.put(commit_metadata)?;
715 Ok(Self {
716 storage,
717 signing_key,
718 commit_metadata,
719 })
720 }
721
722 pub fn into_storage(self) -> Storage {
728 self.storage
729 }
730
731 pub fn storage(&self) -> &Storage {
733 &self.storage
734 }
735
736 pub fn storage_mut(&mut self) -> &mut Storage {
738 &mut self.storage
739 }
740
741 pub fn set_signing_key(&mut self, signing_key: SigningKey) {
743 self.signing_key = signing_key;
744 }
745
746 pub fn commit_metadata(&self) -> MetadataHandle {
748 self.commit_metadata
749 }
750
751 pub fn create_branch(
765 &mut self,
766 branch_name: &str,
767 commit: Option<CommitHandle>,
768 ) -> Result<ExclusiveId, BranchError<Storage>> {
769 self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
770 }
771
772 pub fn create_branch_with_key(
774 &mut self,
775 branch_name: &str,
776 commit: Option<CommitHandle>,
777 signing_key: SigningKey,
778 ) -> Result<ExclusiveId, BranchError<Storage>> {
779 let branch_id = genid();
780 let name_blob = branch_name.to_owned().to_blob();
781 let name_handle = name_blob.get_handle::<Blake3>();
782 self.storage
783 .put(name_blob)
784 .map_err(|e| BranchError::StoragePut(e))?;
785
786 let branch_set = if let Some(commit) = commit {
787 let reader = self
788 .storage
789 .reader()
790 .map_err(|e| BranchError::StorageReader(e))?;
791 let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
792
793 branch::branch_metadata(&signing_key, *branch_id, name_handle, Some(set.to_blob()))
794 } else {
795 branch::branch_unsigned(*branch_id, name_handle, None)
796 };
797
798 let branch_blob = branch_set.to_blob();
799 let branch_handle = self
800 .storage
801 .put(branch_blob)
802 .map_err(|e| BranchError::StoragePut(e))?;
803 let push_result = self
804 .storage
805 .update(*branch_id, None, Some(branch_handle))
806 .map_err(|e| BranchError::BranchUpdate(e))?;
807
808 match push_result {
809 PushResult::Success() => Ok(branch_id),
810 PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
811 }
812 }
813
814 pub fn pull(
817 &mut self,
818 branch_id: Id,
819 ) -> Result<
820 Workspace<Storage>,
821 PullError<
822 Storage::HeadError,
823 Storage::ReaderError,
824 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
825 >,
826 > {
827 self.pull_with_key(branch_id, self.signing_key.clone())
828 }
829
830 pub fn pull_with_key(
832 &mut self,
833 branch_id: Id,
834 signing_key: SigningKey,
835 ) -> Result<
836 Workspace<Storage>,
837 PullError<
838 Storage::HeadError,
839 Storage::ReaderError,
840 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
841 >,
842 > {
843 let base_branch_meta_handle = match self.storage.head(branch_id) {
845 Ok(Some(handle)) => handle,
846 Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
847 Err(e) => return Err(PullError::BranchStorage(e)),
848 };
849 let reader = self.storage.reader().map_err(PullError::BlobReader)?;
851 let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
852 Ok(meta_set) => meta_set,
853 Err(e) => return Err(PullError::BlobStorage(e)),
854 };
855
856 let head_ = match find!(
857 (head_: Value<_>),
858 pattern!(&base_branch_meta, [{ head: ?head_ }])
859 )
860 .at_most_one()
861 {
862 Ok(Some((h,))) => Some(h),
863 Ok(None) => None,
864 Err(_) => return Err(PullError::BadBranchMetadata()),
865 };
866 let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
868 Ok(Workspace {
869 base_blobs,
870 local_blobs: MemoryBlobStore::new(),
871 head: head_,
872 base_head: head_,
873 base_branch_id: branch_id,
874 base_branch_meta: base_branch_meta_handle,
875 signing_key,
876 commit_metadata: self.commit_metadata,
877 })
878 }
879
880 pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
884 while let Some(mut conflict_ws) = self.try_push(workspace)? {
889 conflict_ws.merge(workspace)?;
893
894 *workspace = conflict_ws;
899 }
900
901 Ok(())
902 }
903
904 pub fn try_push(
908 &mut self,
909 workspace: &mut Workspace<Storage>,
910 ) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
911 let workspace_reader = workspace.local_blobs.reader().unwrap();
913 for handle in workspace_reader.blobs() {
914 let handle = handle.expect("infallible blob enumeration");
915 let blob: Blob<UnknownBlob> =
916 workspace_reader.get(handle).expect("infallible blob read");
917 self.storage.put(blob).map_err(PushError::StoragePut)?;
918 }
919
920 if workspace.base_head == workspace.head {
925 return Ok(None);
926 }
927
928 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
930 let base_branch_meta: TribleSet = repo_reader
931 .get(workspace.base_branch_meta)
932 .map_err(PushError::StorageGet)?;
933
934 let Ok((branch_name,)) = find!(
935 (name: Value<Handle<Blake3, LongString>>),
936 pattern!(base_branch_meta, [{ crate::metadata::name: ?name }])
937 )
938 .exactly_one() else {
939 return Err(PushError::BadBranchMetadata());
940 };
941
942 let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
943 let head_: TribleSet = repo_reader
944 .get(head_handle)
945 .map_err(PushError::StorageGet)?;
946
947 let branch_meta = branch_metadata(
948 &workspace.signing_key,
949 workspace.base_branch_id,
950 branch_name,
951 Some(head_.to_blob()),
952 );
953
954 let branch_meta_handle = self
955 .storage
956 .put(branch_meta)
957 .map_err(PushError::StoragePut)?;
958
959 let result = self
961 .storage
962 .update(
963 workspace.base_branch_id,
964 Some(workspace.base_branch_meta),
965 Some(branch_meta_handle),
966 )
967 .map_err(PushError::BranchUpdate)?;
968
969 match result {
970 PushResult::Success() => {
971 workspace.base_branch_meta = branch_meta_handle;
974 workspace.base_head = workspace.head;
975 workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
978 workspace.local_blobs = MemoryBlobStore::new();
982 Ok(None)
983 }
984 PushResult::Conflict(conflicting_meta) => {
985 let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
986
987 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
988 let branch_meta: TribleSet = repo_reader
989 .get(conflicting_meta)
990 .map_err(PushError::StorageGet)?;
991
992 let head_ = match find!((head_: Value<_>),
993 pattern!(&branch_meta, [{ head: ?head_ }])
994 )
995 .at_most_one()
996 {
997 Ok(Some((h,))) => Some(h),
998 Ok(None) => None,
999 Err(_) => return Err(PushError::BadBranchMetadata()),
1000 };
1001
1002 let conflict_ws = Workspace {
1003 base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
1004 local_blobs: MemoryBlobStore::new(),
1005 head: head_,
1006 base_head: head_,
1007 base_branch_id: workspace.base_branch_id,
1008 base_branch_meta: conflicting_meta,
1009 signing_key: workspace.signing_key.clone(),
1010 commit_metadata: workspace.commit_metadata,
1011 };
1012
1013 Ok(Some(conflict_ws))
1014 }
1015 }
1016 }
1017}
1018
1019type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
1020type MetadataHandle = Value<Handle<Blake3, SimpleArchive>>;
1021type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
1022type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
1023
1024pub struct Workspace<Blobs: BlobStore<Blake3>> {
1028 local_blobs: MemoryBlobStore<Blake3>,
1030 base_blobs: Blobs::Reader,
1032 base_branch_id: Id,
1034 base_branch_meta: BranchMetaHandle,
1036 head: Option<CommitHandle>,
1038 base_head: Option<CommitHandle>,
1044 signing_key: SigningKey,
1046 commit_metadata: MetadataHandle,
1048}
1049
1050impl<Blobs> fmt::Debug for Workspace<Blobs>
1051where
1052 Blobs: BlobStore<Blake3>,
1053 Blobs::Reader: fmt::Debug,
1054{
1055 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1056 f.debug_struct("Workspace")
1057 .field("local_blobs", &self.local_blobs)
1058 .field("base_blobs", &self.base_blobs)
1059 .field("base_branch_id", &self.base_branch_id)
1060 .field("base_branch_meta", &self.base_branch_meta)
1061 .field("base_head", &self.base_head)
1062 .field("head", &self.head)
1063 .field("commit_metadata", &self.commit_metadata)
1064 .finish()
1065 }
1066}
1067
1068pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
1070 fn select(
1071 self,
1072 ws: &mut Workspace<Blobs>,
1073 ) -> Result<
1074 CommitSet,
1075 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1076 >;
1077}
1078
1079pub struct Ancestors(pub CommitHandle);
1081
1082pub fn ancestors(commit: CommitHandle) -> Ancestors {
1084 Ancestors(commit)
1085}
1086
1087pub struct NthAncestor(pub CommitHandle, pub usize);
1089
1090pub fn nth_ancestor(commit: CommitHandle, n: usize) -> NthAncestor {
1092 NthAncestor(commit, n)
1093}
1094
1095pub struct Parents(pub CommitHandle);
1097
1098pub fn parents(commit: CommitHandle) -> Parents {
1100 Parents(commit)
1101}
1102
1103pub struct SymmetricDiff(pub CommitHandle, pub CommitHandle);
1106
1107pub fn symmetric_diff(a: CommitHandle, b: CommitHandle) -> SymmetricDiff {
1109 SymmetricDiff(a, b)
1110}
1111
1112pub struct Union<A, B> {
1114 left: A,
1115 right: B,
1116}
1117
1118pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
1120 Union { left, right }
1121}
1122
1123pub struct Intersect<A, B> {
1125 left: A,
1126 right: B,
1127}
1128
1129pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
1131 Intersect { left, right }
1132}
1133
1134pub struct Difference<A, B> {
1137 left: A,
1138 right: B,
1139}
1140
1141pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
1143 Difference { left, right }
1144}
1145
1146pub struct TimeRange(pub Epoch, pub Epoch);
1148
1149pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
1151 TimeRange(start, end)
1152}
1153
1154pub struct Filter<S, F> {
1156 selector: S,
1157 filter: F,
1158}
1159
1160pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
1162 Filter { selector, filter }
1163}
1164
1165impl<Blobs> CommitSelector<Blobs> for CommitHandle
1166where
1167 Blobs: BlobStore<Blake3>,
1168{
1169 fn select(
1170 self,
1171 _ws: &mut Workspace<Blobs>,
1172 ) -> Result<
1173 CommitSet,
1174 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1175 > {
1176 let mut patch = CommitSet::new();
1177 patch.insert(&Entry::new(&self.raw));
1178 Ok(patch)
1179 }
1180}
1181
1182impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
1183where
1184 Blobs: BlobStore<Blake3>,
1185{
1186 fn select(
1187 self,
1188 _ws: &mut Workspace<Blobs>,
1189 ) -> Result<
1190 CommitSet,
1191 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1192 > {
1193 let mut patch = CommitSet::new();
1194 for handle in self {
1195 patch.insert(&Entry::new(&handle.raw));
1196 }
1197 Ok(patch)
1198 }
1199}
1200
1201impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
1202where
1203 Blobs: BlobStore<Blake3>,
1204{
1205 fn select(
1206 self,
1207 _ws: &mut Workspace<Blobs>,
1208 ) -> Result<
1209 CommitSet,
1210 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1211 > {
1212 let mut patch = CommitSet::new();
1213 for handle in self {
1214 patch.insert(&Entry::new(&handle.raw));
1215 }
1216 Ok(patch)
1217 }
1218}
1219
1220impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
1221where
1222 Blobs: BlobStore<Blake3>,
1223{
1224 fn select(
1225 self,
1226 _ws: &mut Workspace<Blobs>,
1227 ) -> Result<
1228 CommitSet,
1229 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1230 > {
1231 let mut patch = CommitSet::new();
1232 if let Some(handle) = self {
1233 patch.insert(&Entry::new(&handle.raw));
1234 }
1235 Ok(patch)
1236 }
1237}
1238
1239impl<Blobs> CommitSelector<Blobs> for Ancestors
1240where
1241 Blobs: BlobStore<Blake3>,
1242{
1243 fn select(
1244 self,
1245 ws: &mut Workspace<Blobs>,
1246 ) -> Result<
1247 CommitSet,
1248 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1249 > {
1250 collect_reachable(ws, self.0)
1251 }
1252}
1253
1254impl<Blobs> CommitSelector<Blobs> for NthAncestor
1255where
1256 Blobs: BlobStore<Blake3>,
1257{
1258 fn select(
1259 self,
1260 ws: &mut Workspace<Blobs>,
1261 ) -> Result<
1262 CommitSet,
1263 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1264 > {
1265 let mut current = self.0;
1266 let mut remaining = self.1;
1267
1268 while remaining > 0 {
1269 let meta: TribleSet = ws.get(current).map_err(WorkspaceCheckoutError::Storage)?;
1270 let mut parents = find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }]));
1271 let Some((p,)) = parents.next() else {
1272 return Ok(CommitSet::new());
1273 };
1274 current = p;
1275 remaining -= 1;
1276 }
1277
1278 let mut patch = CommitSet::new();
1279 patch.insert(&Entry::new(¤t.raw));
1280 Ok(patch)
1281 }
1282}
1283
1284impl<Blobs> CommitSelector<Blobs> for Parents
1285where
1286 Blobs: BlobStore<Blake3>,
1287{
1288 fn select(
1289 self,
1290 ws: &mut Workspace<Blobs>,
1291 ) -> Result<
1292 CommitSet,
1293 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1294 > {
1295 let meta: TribleSet = ws.get(self.0).map_err(WorkspaceCheckoutError::Storage)?;
1296 let mut result = CommitSet::new();
1297 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1298 result.insert(&Entry::new(&p.raw));
1299 }
1300 Ok(result)
1301 }
1302}
1303
1304impl<Blobs> CommitSelector<Blobs> for SymmetricDiff
1305where
1306 Blobs: BlobStore<Blake3>,
1307{
1308 fn select(
1309 self,
1310 ws: &mut Workspace<Blobs>,
1311 ) -> Result<
1312 CommitSet,
1313 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1314 > {
1315 let a = collect_reachable(ws, self.0)?;
1316 let b = collect_reachable(ws, self.1)?;
1317 let inter = a.intersect(&b);
1318 let mut union = a;
1319 union.union(b);
1320 Ok(union.difference(&inter))
1321 }
1322}
1323
1324impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
1325where
1326 A: CommitSelector<Blobs>,
1327 B: CommitSelector<Blobs>,
1328 Blobs: BlobStore<Blake3>,
1329{
1330 fn select(
1331 self,
1332 ws: &mut Workspace<Blobs>,
1333 ) -> Result<
1334 CommitSet,
1335 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1336 > {
1337 let mut left = self.left.select(ws)?;
1338 let right = self.right.select(ws)?;
1339 left.union(right);
1340 Ok(left)
1341 }
1342}
1343
1344impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
1345where
1346 A: CommitSelector<Blobs>,
1347 B: CommitSelector<Blobs>,
1348 Blobs: BlobStore<Blake3>,
1349{
1350 fn select(
1351 self,
1352 ws: &mut Workspace<Blobs>,
1353 ) -> Result<
1354 CommitSet,
1355 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1356 > {
1357 let left = self.left.select(ws)?;
1358 let right = self.right.select(ws)?;
1359 Ok(left.intersect(&right))
1360 }
1361}
1362
1363impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
1364where
1365 A: CommitSelector<Blobs>,
1366 B: CommitSelector<Blobs>,
1367 Blobs: BlobStore<Blake3>,
1368{
1369 fn select(
1370 self,
1371 ws: &mut Workspace<Blobs>,
1372 ) -> Result<
1373 CommitSet,
1374 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1375 > {
1376 let left = self.left.select(ws)?;
1377 let right = self.right.select(ws)?;
1378 Ok(left.difference(&right))
1379 }
1380}
1381
1382impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
1383where
1384 Blobs: BlobStore<Blake3>,
1385 S: CommitSelector<Blobs>,
1386 F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
1387{
1388 fn select(
1389 self,
1390 ws: &mut Workspace<Blobs>,
1391 ) -> Result<
1392 CommitSet,
1393 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1394 > {
1395 let patch = self.selector.select(ws)?;
1396 let mut result = CommitSet::new();
1397 let filter = self.filter;
1398 for raw in patch.iter() {
1399 let handle = Value::new(*raw);
1400 let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1401
1402 let Ok((content_handle,)) = find!(
1403 (c: Value<_>),
1404 pattern!(&meta, [{ content: ?c }])
1405 )
1406 .exactly_one() else {
1407 return Err(WorkspaceCheckoutError::BadCommitMetadata());
1408 };
1409
1410 let payload: TribleSet = ws
1411 .get(content_handle)
1412 .map_err(WorkspaceCheckoutError::Storage)?;
1413
1414 if filter(&meta, &payload) {
1415 result.insert(&Entry::new(raw));
1416 }
1417 }
1418 Ok(result)
1419 }
1420}
1421
1422pub struct HistoryOf(pub Id);
1424
1425pub fn history_of(entity: Id) -> HistoryOf {
1427 HistoryOf(entity)
1428}
1429
1430impl<Blobs> CommitSelector<Blobs> for HistoryOf
1431where
1432 Blobs: BlobStore<Blake3>,
1433{
1434 fn select(
1435 self,
1436 ws: &mut Workspace<Blobs>,
1437 ) -> Result<
1438 CommitSet,
1439 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1440 > {
1441 let Some(head_) = ws.head else {
1442 return Ok(CommitSet::new());
1443 };
1444 let entity = self.0;
1445 filter(
1446 ancestors(head_),
1447 move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
1448 )
1449 .select(ws)
1450 }
1451}
1452
1453fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
1461 ws: &mut Workspace<Blobs>,
1462 patch: CommitSet,
1463) -> Result<
1464 CommitSet,
1465 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1466> {
1467 let mut result = CommitSet::new();
1468 for raw in patch.iter() {
1469 let handle = Value::new(*raw);
1470 let reach = collect_reachable(ws, handle)?;
1471 result.union(reach);
1472 }
1473 Ok(result)
1474}
1475
1476fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
1477 ws: &mut Workspace<Blobs>,
1478 seeds: CommitSet,
1479 stop: &CommitSet,
1480) -> Result<
1481 CommitSet,
1482 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1483> {
1484 let mut visited = HashSet::new();
1485 let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
1486 let mut result = CommitSet::new();
1487
1488 while let Some(commit) = stack.pop() {
1489 if !visited.insert(commit) {
1490 continue;
1491 }
1492
1493 if stop.get(&commit.raw).is_some() {
1494 continue;
1495 }
1496
1497 result.insert(&Entry::new(&commit.raw));
1498
1499 let meta: TribleSet = ws
1500 .local_blobs
1501 .reader()
1502 .unwrap()
1503 .get(commit)
1504 .or_else(|_| ws.base_blobs.get(commit))
1505 .map_err(WorkspaceCheckoutError::Storage)?;
1506
1507 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1508 stack.push(p);
1509 }
1510 }
1511
1512 Ok(result)
1513}
1514
1515impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
1516where
1517 T: CommitSelector<Blobs>,
1518 Blobs: BlobStore<Blake3>,
1519{
1520 fn select(
1521 self,
1522 ws: &mut Workspace<Blobs>,
1523 ) -> Result<
1524 CommitSet,
1525 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1526 > {
1527 let end_patch = self.end.select(ws)?;
1528 let start_patch = self.start.select(ws)?;
1529
1530 collect_reachable_from_patch_until(ws, end_patch, &start_patch)
1531 }
1532}
1533
1534impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
1535where
1536 T: CommitSelector<Blobs>,
1537 Blobs: BlobStore<Blake3>,
1538{
1539 fn select(
1540 self,
1541 ws: &mut Workspace<Blobs>,
1542 ) -> Result<
1543 CommitSet,
1544 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1545 > {
1546 let Some(head_) = ws.head else {
1547 return Ok(CommitSet::new());
1548 };
1549 let exclude_patch = self.start.select(ws)?;
1550
1551 let mut head_patch = CommitSet::new();
1552 head_patch.insert(&Entry::new(&head_.raw));
1553
1554 collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
1555 }
1556}
1557
1558impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
1559where
1560 T: CommitSelector<Blobs>,
1561 Blobs: BlobStore<Blake3>,
1562{
1563 fn select(
1564 self,
1565 ws: &mut Workspace<Blobs>,
1566 ) -> Result<
1567 CommitSet,
1568 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1569 > {
1570 let end_patch = self.end.select(ws)?;
1571 collect_reachable_from_patch(ws, end_patch)
1572 }
1573}
1574
1575impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
1576where
1577 Blobs: BlobStore<Blake3>,
1578{
1579 fn select(
1580 self,
1581 ws: &mut Workspace<Blobs>,
1582 ) -> Result<
1583 CommitSet,
1584 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1585 > {
1586 let Some(head_) = ws.head else {
1587 return Ok(CommitSet::new());
1588 };
1589 collect_reachable(ws, head_)
1590 }
1591}
1592
1593impl<Blobs> CommitSelector<Blobs> for TimeRange
1594where
1595 Blobs: BlobStore<Blake3>,
1596{
1597 fn select(
1598 self,
1599 ws: &mut Workspace<Blobs>,
1600 ) -> Result<
1601 CommitSet,
1602 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1603 > {
1604 let Some(head_) = ws.head else {
1605 return Ok(CommitSet::new());
1606 };
1607 let start = self.0;
1608 let end = self.1;
1609 filter(
1610 ancestors(head_),
1611 move |meta: &TribleSet, _payload: &TribleSet| {
1612 if let Ok(Some((ts,))) =
1613 find!((t: Value<_>), pattern!(meta, [{ timestamp: ?t }])).at_most_one()
1614 {
1615 let (ts_start, ts_end): (Epoch, Epoch) =
1616 crate::value::FromValue::from_value(&ts);
1617 ts_start <= end && ts_end >= start
1618 } else {
1619 false
1620 }
1621 },
1622 )
1623 .select(ws)
1624 }
1625}
1626
1627impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
1628 pub fn branch_id(&self) -> Id {
1630 self.base_branch_id
1631 }
1632
1633 pub fn head(&self) -> Option<CommitHandle> {
1635 self.head
1636 }
1637
1638 pub fn metadata(&self) -> MetadataHandle {
1640 self.commit_metadata
1641 }
1642
1643 pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
1646 where
1647 S: BlobSchema + 'static,
1648 T: ToBlob<S>,
1649 Handle<Blake3, S>: ValueSchema,
1650 {
1651 self.local_blobs.put(item).expect("infallible blob put")
1652 }
1653
1654 pub fn get<T, S>(
1659 &mut self,
1660 handle: Value<Handle<Blake3, S>>,
1661 ) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
1662 where
1663 S: BlobSchema + 'static,
1664 T: TryFromBlob<S>,
1665 Handle<Blake3, S>: ValueSchema,
1666 {
1667 self.local_blobs
1668 .reader()
1669 .unwrap()
1670 .get(handle)
1671 .or_else(|_| self.base_blobs.get(handle))
1672 }
1673
1674 pub fn commit(
1678 &mut self,
1679 content_: impl Into<TribleSet>,
1680 message_: &str,
1681 ) {
1682 let content_ = content_.into();
1683 self.commit_internal(content_, Some(self.commit_metadata), Some(message_));
1684 }
1685
1686 pub fn commit_with_metadata(
1689 &mut self,
1690 content_: impl Into<TribleSet>,
1691 metadata_: MetadataHandle,
1692 message_: &str,
1693 ) {
1694 let content_ = content_.into();
1695 self.commit_internal(content_, Some(metadata_), Some(message_));
1696 }
1697
1698 fn commit_internal(
1699 &mut self,
1700 content_: TribleSet,
1701 metadata_handle: Option<MetadataHandle>,
1702 message_: Option<&str>,
1703 ) {
1704 let content_blob = content_.to_blob();
1706 let message_handle = message_.map(|m| self.put(m.to_string()));
1708 let parents = self.head.iter().copied();
1709
1710 let commit_set = crate::repo::commit::commit_metadata(
1711 &self.signing_key,
1712 parents,
1713 message_handle,
1714 Some(content_blob.clone()),
1715 metadata_handle,
1716 );
1717 let _ = self
1719 .local_blobs
1720 .put(content_blob)
1721 .expect("failed to put content blob");
1722 let commit_handle = self
1723 .local_blobs
1724 .put(commit_set)
1725 .expect("failed to put commit blob");
1726 self.head = Some(commit_handle);
1728 }
1729
1730 pub fn merge(&mut self, other: &mut Workspace<Blobs>) -> Result<CommitHandle, MergeError> {
1744 let other_local = other.local_blobs.reader().unwrap();
1746 for r in other_local.blobs() {
1747 let handle = r.expect("infallible blob enumeration");
1748 let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
1749
1750 self.local_blobs.put(blob).expect("infallible blob put");
1752 }
1753 let parents = self.head.iter().copied().chain(other.head.iter().copied());
1755 let merge_commit = commit_metadata(
1756 &self.signing_key,
1757 parents,
1758 None, None, None, );
1762 let commit_handle = self
1764 .local_blobs
1765 .put(merge_commit)
1766 .expect("failed to put merge commit blob");
1767 self.head = Some(commit_handle);
1768
1769 Ok(commit_handle)
1770 }
1771
1772 pub fn merge_commit(
1778 &mut self,
1779 other: Value<Handle<Blake3, SimpleArchive>>,
1780 ) -> Result<CommitHandle, MergeError> {
1781 let parents = self.head.iter().copied().chain(Some(other));
1788 let merge_commit = commit_metadata(&self.signing_key, parents, None, None, None);
1789 let commit_handle = self
1790 .local_blobs
1791 .put(merge_commit)
1792 .expect("failed to put merge commit blob");
1793 self.head = Some(commit_handle);
1794 Ok(commit_handle)
1795 }
1796
1797 fn checkout_commits<I>(
1804 &mut self,
1805 commits: I,
1806 ) -> Result<
1807 TribleSet,
1808 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1809 >
1810 where
1811 I: IntoIterator<Item = CommitHandle>,
1812 {
1813 let local = self.local_blobs.reader().unwrap();
1814 let mut result = TribleSet::new();
1815 for commit in commits {
1816 let meta: TribleSet = local
1817 .get(commit)
1818 .or_else(|_| self.base_blobs.get(commit))
1819 .map_err(WorkspaceCheckoutError::Storage)?;
1820
1821 let content_opt =
1826 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1827 Ok(Some((c,))) => Some(c),
1828 Ok(None) => None,
1829 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1830 };
1831
1832 if let Some(c) = content_opt {
1833 let set: TribleSet = local
1834 .get(c)
1835 .or_else(|_| self.base_blobs.get(c))
1836 .map_err(WorkspaceCheckoutError::Storage)?;
1837 result += set;
1838 } else {
1839 continue;
1841 }
1842 }
1843 Ok(result)
1844 }
1845
1846 fn checkout_commits_metadata<I>(
1847 &mut self,
1848 commits: I,
1849 ) -> Result<
1850 TribleSet,
1851 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1852 >
1853 where
1854 I: IntoIterator<Item = CommitHandle>,
1855 {
1856 let local = self.local_blobs.reader().unwrap();
1857 let mut result = TribleSet::new();
1858 for commit in commits {
1859 let meta: TribleSet = local
1860 .get(commit)
1861 .or_else(|_| self.base_blobs.get(commit))
1862 .map_err(WorkspaceCheckoutError::Storage)?;
1863
1864 let metadata_opt =
1865 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
1866 Ok(Some((c,))) => Some(c),
1867 Ok(None) => None,
1868 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1869 };
1870
1871 if let Some(c) = metadata_opt {
1872 let set: TribleSet = local
1873 .get(c)
1874 .or_else(|_| self.base_blobs.get(c))
1875 .map_err(WorkspaceCheckoutError::Storage)?;
1876 result += set;
1877 }
1878 }
1879 Ok(result)
1880 }
1881
1882 fn checkout_commits_with_metadata<I>(
1883 &mut self,
1884 commits: I,
1885 ) -> Result<
1886 (TribleSet, TribleSet),
1887 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1888 >
1889 where
1890 I: IntoIterator<Item = CommitHandle>,
1891 {
1892 let local = self.local_blobs.reader().unwrap();
1893 let mut data = TribleSet::new();
1894 let mut metadata_set = TribleSet::new();
1895 for commit in commits {
1896 let meta: TribleSet = local
1897 .get(commit)
1898 .or_else(|_| self.base_blobs.get(commit))
1899 .map_err(WorkspaceCheckoutError::Storage)?;
1900
1901 let content_opt =
1902 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1903 Ok(Some((c,))) => Some(c),
1904 Ok(None) => None,
1905 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1906 };
1907
1908 if let Some(c) = content_opt {
1909 let set: TribleSet = local
1910 .get(c)
1911 .or_else(|_| self.base_blobs.get(c))
1912 .map_err(WorkspaceCheckoutError::Storage)?;
1913 data += set;
1914 }
1915
1916 let metadata_opt =
1917 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
1918 Ok(Some((c,))) => Some(c),
1919 Ok(None) => None,
1920 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1921 };
1922
1923 if let Some(c) = metadata_opt {
1924 let set: TribleSet = local
1925 .get(c)
1926 .or_else(|_| self.base_blobs.get(c))
1927 .map_err(WorkspaceCheckoutError::Storage)?;
1928 metadata_set += set;
1929 }
1930 }
1931 Ok((data, metadata_set))
1932 }
1933
1934 pub fn checkout<R>(
1938 &mut self,
1939 spec: R,
1940 ) -> Result<
1941 TribleSet,
1942 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1943 >
1944 where
1945 R: CommitSelector<Blobs>,
1946 {
1947 let patch = spec.select(self)?;
1948 let commits = patch.iter().map(|raw| Value::new(*raw));
1949 self.checkout_commits(commits)
1950 }
1951
1952 pub fn checkout_metadata<R>(
1955 &mut self,
1956 spec: R,
1957 ) -> Result<
1958 TribleSet,
1959 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1960 >
1961 where
1962 R: CommitSelector<Blobs>,
1963 {
1964 let patch = spec.select(self)?;
1965 let commits = patch.iter().map(|raw| Value::new(*raw));
1966 self.checkout_commits_metadata(commits)
1967 }
1968
1969 pub fn checkout_with_metadata<R>(
1972 &mut self,
1973 spec: R,
1974 ) -> Result<
1975 (TribleSet, TribleSet),
1976 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1977 >
1978 where
1979 R: CommitSelector<Blobs>,
1980 {
1981 let patch = spec.select(self)?;
1982 let commits = patch.iter().map(|raw| Value::new(*raw));
1983 self.checkout_commits_with_metadata(commits)
1984 }
1985}
1986
1987#[derive(Debug)]
1988pub enum WorkspaceCheckoutError<GetErr: Error> {
1989 Storage(GetErr),
1991 BadCommitMetadata(),
1993}
1994
1995impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
1996 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1997 match self {
1998 WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
1999 WorkspaceCheckoutError::BadCommitMetadata() => {
2000 write!(f, "commit metadata malformed")
2001 }
2002 }
2003 }
2004}
2005
2006impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
2007
2008fn collect_reachable<Blobs: BlobStore<Blake3>>(
2009 ws: &mut Workspace<Blobs>,
2010 from: CommitHandle,
2011) -> Result<
2012 CommitSet,
2013 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2014> {
2015 let mut visited = HashSet::new();
2016 let mut stack = vec![from];
2017 let mut result = CommitSet::new();
2018
2019 while let Some(commit) = stack.pop() {
2020 if !visited.insert(commit) {
2021 continue;
2022 }
2023 result.insert(&Entry::new(&commit.raw));
2024
2025 let meta: TribleSet = ws
2026 .local_blobs
2027 .reader()
2028 .unwrap()
2029 .get(commit)
2030 .or_else(|_| ws.base_blobs.get(commit))
2031 .map_err(WorkspaceCheckoutError::Storage)?;
2032
2033 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
2034 stack.push(p);
2035 }
2036 }
2037
2038 Ok(result)
2039}