1#![allow(clippy::type_complexity)]
8pub mod branch;
113pub mod commit;
114pub mod hybridstore;
115pub mod memoryrepo;
116pub mod objectstore;
117pub mod pile;
118
119pub trait StorageClose {
125 type Error: std::error::Error;
127
128 fn close(self) -> Result<(), Self::Error>;
130}
131
132impl<Storage> Repository<Storage>
134where
135 Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
136{
137 pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
144 self.storage.close()
145 }
146}
147
148use crate::macros::pattern;
149use std::collections::{HashSet, VecDeque};
150use std::convert::Infallible;
151use std::error::Error;
152use std::fmt::Debug;
153use std::fmt::{self};
154
155use commit::commit_metadata;
156use hifitime::Epoch;
157use itertools::Itertools;
158
159use crate::blob::schemas::simplearchive::UnarchiveError;
160use crate::blob::schemas::UnknownBlob;
161use crate::blob::Blob;
162use crate::blob::BlobSchema;
163use crate::blob::MemoryBlobStore;
164use crate::blob::ToBlob;
165use crate::blob::TryFromBlob;
166use crate::find;
167use crate::id::genid;
168use crate::id::Id;
169use crate::patch::Entry;
170use crate::patch::IdentitySchema;
171use crate::patch::PATCH;
172use crate::prelude::valueschemas::GenId;
173use crate::repo::branch::branch_metadata;
174use crate::trible::TribleSet;
175use crate::value::schemas::hash::Handle;
176use crate::value::schemas::hash::HashProtocol;
177use crate::value::Value;
178use crate::value::ValueSchema;
179use crate::value::VALUE_LEN;
180use ed25519_dalek::SigningKey;
181
182use crate::blob::schemas::longstring::LongString;
183use crate::blob::schemas::simplearchive::SimpleArchive;
184use crate::prelude::*;
185use crate::value::schemas::ed25519 as ed;
186use crate::value::schemas::hash::Blake3;
187use crate::value::schemas::shortstring::ShortString;
188use crate::value::schemas::time::NsTAIInterval;
189
190attributes! {
191 "4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
193 "88B59BD497540AC5AECDB7518E737C87" as pub metadata: Handle<Blake3, SimpleArchive>;
195 "317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
197 "B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
199 "12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
201 "272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
203 "8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
205 "71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
207 "ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
209 "9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
211 "1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
213}
214
215pub trait BlobStoreList<H: HashProtocol> {
217 type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
218 where
219 Self: 'a;
220 type Err: Error + Debug + Send + Sync + 'static;
221
222 fn blobs<'a>(&'a self) -> Self::Iter<'a>;
224}
225
226#[derive(Debug, Clone)]
228pub struct BlobMetadata {
229 pub timestamp: u64,
231 pub length: u64,
233}
234
235pub trait BlobStoreMeta<H: HashProtocol> {
237 type MetaError: std::error::Error + Send + Sync + 'static;
239
240 fn metadata<S>(
241 &self,
242 handle: Value<Handle<H, S>>,
243 ) -> Result<Option<BlobMetadata>, Self::MetaError>
244 where
245 S: BlobSchema + 'static,
246 Handle<H, S>: ValueSchema;
247}
248
249pub trait BlobStoreForget<H: HashProtocol> {
254 type ForgetError: std::error::Error + Send + Sync + 'static;
255
256 fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
257 where
258 S: BlobSchema + 'static,
259 Handle<H, S>: ValueSchema;
260}
261
262pub trait BlobStoreGet<H: HashProtocol> {
264 type GetError<E: std::error::Error>: Error;
265
266 fn get<T, S>(
275 &self,
276 handle: Value<Handle<H, S>>,
277 ) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
278 where
279 S: BlobSchema + 'static,
280 T: TryFromBlob<S>,
281 Handle<H, S>: ValueSchema;
282}
283
284pub trait BlobStorePut<H: HashProtocol> {
286 type PutError: Error + Debug + Send + Sync + 'static;
287
288 fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
289 where
290 S: BlobSchema + 'static,
291 T: ToBlob<S>,
292 Handle<H, S>: ValueSchema;
293}
294
295pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
296 type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
297 type ReaderError: Error + Debug + Send + Sync + 'static;
298 fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
299}
300
301pub trait BlobStoreKeep<H: HashProtocol> {
303 fn keep<I>(&mut self, handles: I)
305 where
306 I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
307}
308
309#[derive(Debug)]
310pub enum PushResult<H>
311where
312 H: HashProtocol,
313{
314 Success(),
315 Conflict(Option<Value<Handle<H, SimpleArchive>>>),
316}
317
318pub trait BranchStore<H: HashProtocol> {
319 type BranchesError: Error + Debug + Send + Sync + 'static;
320 type HeadError: Error + Debug + Send + Sync + 'static;
321 type UpdateError: Error + Debug + Send + Sync + 'static;
322
323 type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
324 where
325 Self: 'a;
326
327 fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
330
331 fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
347
348 fn update(
359 &mut self,
360 id: Id,
361 old: Option<Value<Handle<H, SimpleArchive>>>,
362 new: Option<Value<Handle<H, SimpleArchive>>>,
363 ) -> Result<PushResult<H>, Self::UpdateError>;
364}
365
366#[derive(Debug)]
367pub enum TransferError<ListErr, LoadErr, StoreErr> {
368 List(ListErr),
369 Load(LoadErr),
370 Store(StoreErr),
371}
372
373impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
374 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
375 write!(f, "failed to transfer blob")
376 }
377}
378
379impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
380where
381 ListErr: Debug + Error + 'static,
382 LoadErr: Debug + Error + 'static,
383 StoreErr: Debug + Error + 'static,
384{
385 fn source(&self) -> Option<&(dyn Error + 'static)> {
386 match self {
387 Self::List(e) => Some(e),
388 Self::Load(e) => Some(e),
389 Self::Store(e) => Some(e),
390 }
391 }
392}
393
394pub fn transfer<'a, BS, BT, HS, HT, Handles>(
396 source: &'a BS,
397 target: &'a mut BT,
398 handles: Handles,
399) -> impl Iterator<
400 Item = Result<
401 (
402 Value<Handle<HS, UnknownBlob>>,
403 Value<Handle<HT, UnknownBlob>>,
404 ),
405 TransferError<
406 Infallible,
407 <BS as BlobStoreGet<HS>>::GetError<Infallible>,
408 <BT as BlobStorePut<HT>>::PutError,
409 >,
410 >,
411> + 'a
412where
413 BS: BlobStoreGet<HS> + 'a,
414 BT: BlobStorePut<HT> + 'a,
415 HS: 'static + HashProtocol,
416 HT: 'static + HashProtocol,
417 Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
418 Handles::IntoIter: 'a,
419{
420 handles.into_iter().map(move |source_handle| {
421 let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
422
423 Ok((
424 source_handle,
425 (target.put(blob).map_err(TransferError::Store)?),
426 ))
427 })
428}
429
430pub struct ReachableHandles<'a, BS, H>
432where
433 BS: BlobStoreGet<H>,
434 H: 'static + HashProtocol,
435{
436 source: &'a BS,
437 queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
438 visited: HashSet<[u8; VALUE_LEN]>,
439}
440
441impl<'a, BS, H> ReachableHandles<'a, BS, H>
442where
443 BS: BlobStoreGet<H>,
444 H: 'static + HashProtocol,
445{
446 fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
447 let mut queue = VecDeque::new();
448 for handle in roots {
449 queue.push_back(handle);
450 }
451
452 Self {
453 source,
454 queue,
455 visited: HashSet::new(),
456 }
457 }
458
459 fn enqueue_from_blob(&mut self, blob: &Blob<UnknownBlob>) {
460 let bytes = blob.bytes.as_ref();
461 let mut offset = 0usize;
462
463 while offset + VALUE_LEN <= bytes.len() {
464 let mut raw = [0u8; VALUE_LEN];
465 raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
466
467 if !self.visited.contains(&raw) {
468 let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
469 if self
470 .source
471 .get::<anybytes::Bytes, UnknownBlob>(candidate)
472 .is_ok()
473 {
474 self.queue.push_back(candidate);
475 }
476 }
477
478 offset += VALUE_LEN;
479 }
480 }
481}
482
483impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
484where
485 BS: BlobStoreGet<H>,
486 H: 'static + HashProtocol,
487{
488 type Item = Value<Handle<H, UnknownBlob>>;
489
490 fn next(&mut self) -> Option<Self::Item> {
491 while let Some(handle) = self.queue.pop_front() {
492 let raw = handle.raw;
493
494 if !self.visited.insert(raw) {
495 continue;
496 }
497
498 if let Ok(blob) = self.source.get(handle) {
499 self.enqueue_from_blob(&blob);
500 }
501
502 return Some(handle);
503 }
504
505 None
506 }
507}
508
509pub fn reachable<'a, BS, H>(
511 source: &'a BS,
512 roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
513) -> ReachableHandles<'a, BS, H>
514where
515 BS: BlobStoreGet<H>,
516 H: 'static + HashProtocol,
517{
518 ReachableHandles::new(source, roots)
519}
520
521pub fn potential_handles<'a, H>(
528 set: &'a TribleSet,
529) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
530where
531 H: HashProtocol,
532{
533 set.vae.iter().map(|raw| {
534 let mut value = [0u8; VALUE_LEN];
535 value.copy_from_slice(&raw[0..VALUE_LEN]);
536 Value::<Handle<H, UnknownBlob>>::new(value)
537 })
538}
539
540#[derive(Debug)]
543pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
544 ContentStorageError(BlobErr),
546 CommitStorageError(BlobErr),
548}
549
550impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
551 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
552 match self {
553 CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
554 CreateCommitError::CommitStorageError(e) => {
555 write!(f, "Commit metadata storage failed: {e}")
556 }
557 }
558 }
559}
560
561impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
562 fn source(&self) -> Option<&(dyn Error + 'static)> {
563 match self {
564 CreateCommitError::ContentStorageError(e) => Some(e),
565 CreateCommitError::CommitStorageError(e) => Some(e),
566 }
567 }
568}
569
570#[derive(Debug)]
571pub enum MergeError {
572 DifferentRepos(),
574}
575
576#[derive(Debug)]
577pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
578 StorageBranches(Storage::BranchesError),
580 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
582 StorageGet(
584 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
585 ),
586 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
588 BranchUpdate(Storage::UpdateError),
590 BadBranchMetadata(),
592 MergeError(MergeError),
594}
595
596impl<Storage> From<MergeError> for PushError<Storage>
601where
602 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
603{
604 fn from(e: MergeError) -> Self {
605 PushError::MergeError(e)
606 }
607}
608
609#[derive(Debug)]
616pub enum BranchError<Storage>
617where
618 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
619{
620 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
622 StorageGet(
624 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
625 ),
626 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
628 BranchHead(Storage::HeadError),
630 BranchUpdate(Storage::UpdateError),
632 AlreadyExists(),
634 BranchNotFound(Id),
636}
637
638#[derive(Debug)]
639pub enum LookupError<Storage>
640where
641 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
642{
643 StorageBranches(Storage::BranchesError),
644 BranchHead(Storage::HeadError),
645 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
646 StorageGet(
647 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
648 ),
649 NameConflict(Vec<Id>),
651 BadBranchMetadata(),
652}
653
654#[derive(Debug)]
655pub enum EnsureBranchError<Storage>
656where
657 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
658{
659 Lookup(LookupError<Storage>),
660 Create(BranchError<Storage>),
661}
662
663pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
670 storage: Storage,
671 signing_key: SigningKey,
672 commit_metadata: MetadataHandle,
673}
674
675pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
676where
677 BranchStorageErr: Error,
678 BlobReaderErr: Error,
679 BlobStorageErr: Error,
680{
681 BranchNotFound(Id),
683 BranchStorage(BranchStorageErr),
685 BlobReader(BlobReaderErr),
687 BlobStorage(BlobStorageErr),
689 BadBranchMetadata(),
691}
692
693impl<B, R, C> fmt::Debug for PullError<B, R, C>
694where
695 B: Error + fmt::Debug,
696 R: Error + fmt::Debug,
697 C: Error + fmt::Debug,
698{
699 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
700 match self {
701 PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
702 PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
703 PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
704 PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
705 PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
706 }
707 }
708}
709
710impl<Storage> Repository<Storage>
711where
712 Storage: BlobStore<Blake3> + BranchStore<Blake3>,
713{
714 pub fn new(
719 mut storage: Storage,
720 signing_key: SigningKey,
721 commit_metadata: TribleSet,
722 ) -> Result<Self, <Storage as BlobStorePut<Blake3>>::PutError> {
723 let commit_metadata = storage.put(commit_metadata)?;
724 Ok(Self {
725 storage,
726 signing_key,
727 commit_metadata,
728 })
729 }
730
731 pub fn into_storage(self) -> Storage {
737 self.storage
738 }
739
740 pub fn storage(&self) -> &Storage {
742 &self.storage
743 }
744
745 pub fn storage_mut(&mut self) -> &mut Storage {
747 &mut self.storage
748 }
749
750 pub fn set_signing_key(&mut self, signing_key: SigningKey) {
752 self.signing_key = signing_key;
753 }
754
755 pub fn commit_metadata(&self) -> MetadataHandle {
757 self.commit_metadata
758 }
759
760 pub fn create_branch(
774 &mut self,
775 branch_name: &str,
776 commit: Option<CommitHandle>,
777 ) -> Result<ExclusiveId, BranchError<Storage>> {
778 self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
779 }
780
781 pub fn create_branch_with_key(
783 &mut self,
784 branch_name: &str,
785 commit: Option<CommitHandle>,
786 signing_key: SigningKey,
787 ) -> Result<ExclusiveId, BranchError<Storage>> {
788 let branch_id = genid();
789 let name_blob = branch_name.to_owned().to_blob();
790 let name_handle = name_blob.get_handle::<Blake3>();
791 self.storage
792 .put(name_blob)
793 .map_err(|e| BranchError::StoragePut(e))?;
794
795 let branch_set = if let Some(commit) = commit {
796 let reader = self
797 .storage
798 .reader()
799 .map_err(|e| BranchError::StorageReader(e))?;
800 let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
801
802 branch::branch_metadata(&signing_key, *branch_id, name_handle, Some(set.to_blob()))
803 } else {
804 branch::branch_unsigned(*branch_id, name_handle, None)
805 };
806
807 let branch_blob = branch_set.to_blob();
808 let branch_handle = self
809 .storage
810 .put(branch_blob)
811 .map_err(|e| BranchError::StoragePut(e))?;
812 let push_result = self
813 .storage
814 .update(*branch_id, None, Some(branch_handle))
815 .map_err(|e| BranchError::BranchUpdate(e))?;
816
817 match push_result {
818 PushResult::Success() => Ok(branch_id),
819 PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
820 }
821 }
822
823 pub fn lookup_branch(
829 &mut self,
830 name: &str,
831 ) -> Result<Option<Id>, LookupError<Storage>> {
832 let branch_ids: Vec<Id> = self
833 .storage
834 .branches()
835 .map_err(LookupError::StorageBranches)?
836 .collect::<Result<Vec<_>, _>>()
837 .map_err(LookupError::StorageBranches)?;
838
839 let mut matches = Vec::new();
840
841 for branch_id in branch_ids {
842 let Some(meta_handle) = self
843 .storage
844 .head(branch_id)
845 .map_err(LookupError::BranchHead)?
846 else {
847 continue;
848 };
849
850 let reader = self
851 .storage
852 .reader()
853 .map_err(LookupError::StorageReader)?;
854 let meta_set: TribleSet =
855 reader.get(meta_handle).map_err(LookupError::StorageGet)?;
856
857 let Ok((name_handle,)) = find!(
858 (n: Value<Handle<Blake3, LongString>>),
859 pattern!(&meta_set, [{ crate::metadata::name: ?n }])
860 )
861 .exactly_one()
862 else {
863 continue;
864 };
865
866 let Ok(branch_name): Result<anybytes::View<str>, _> = reader.get(name_handle) else {
867 continue;
868 };
869
870 if branch_name.as_ref() == name {
871 matches.push(branch_id);
872 }
873 }
874
875 match matches.len() {
876 0 => Ok(None),
877 1 => Ok(Some(matches[0])),
878 _ => Err(LookupError::NameConflict(matches)),
879 }
880 }
881
882 pub fn ensure_branch(
890 &mut self,
891 name: &str,
892 commit: Option<CommitHandle>,
893 ) -> Result<Id, EnsureBranchError<Storage>> {
894 match self
895 .lookup_branch(name)
896 .map_err(EnsureBranchError::Lookup)?
897 {
898 Some(id) => Ok(id),
899 None => {
900 let id = self
901 .create_branch(name, commit)
902 .map_err(EnsureBranchError::Create)?;
903 Ok(*id)
904 }
905 }
906 }
907
908 pub fn pull(
911 &mut self,
912 branch_id: Id,
913 ) -> Result<
914 Workspace<Storage>,
915 PullError<
916 Storage::HeadError,
917 Storage::ReaderError,
918 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
919 >,
920 > {
921 self.pull_with_key(branch_id, self.signing_key.clone())
922 }
923
924 pub fn pull_with_key(
926 &mut self,
927 branch_id: Id,
928 signing_key: SigningKey,
929 ) -> Result<
930 Workspace<Storage>,
931 PullError<
932 Storage::HeadError,
933 Storage::ReaderError,
934 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
935 >,
936 > {
937 let base_branch_meta_handle = match self.storage.head(branch_id) {
939 Ok(Some(handle)) => handle,
940 Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
941 Err(e) => return Err(PullError::BranchStorage(e)),
942 };
943 let reader = self.storage.reader().map_err(PullError::BlobReader)?;
945 let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
946 Ok(meta_set) => meta_set,
947 Err(e) => return Err(PullError::BlobStorage(e)),
948 };
949
950 let head_ = match find!(
951 (head_: Value<_>),
952 pattern!(&base_branch_meta, [{ head: ?head_ }])
953 )
954 .at_most_one()
955 {
956 Ok(Some((h,))) => Some(h),
957 Ok(None) => None,
958 Err(_) => return Err(PullError::BadBranchMetadata()),
959 };
960 let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
962 Ok(Workspace {
963 base_blobs,
964 local_blobs: MemoryBlobStore::new(),
965 head: head_,
966 base_head: head_,
967 base_branch_id: branch_id,
968 base_branch_meta: base_branch_meta_handle,
969 signing_key,
970 commit_metadata: self.commit_metadata,
971 })
972 }
973
974 pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
978 while let Some(mut conflict_ws) = self.try_push(workspace)? {
983 conflict_ws.merge(workspace)?;
987
988 *workspace = conflict_ws;
993 }
994
995 Ok(())
996 }
997
998 pub fn try_push(
1002 &mut self,
1003 workspace: &mut Workspace<Storage>,
1004 ) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
1005 let workspace_reader = workspace.local_blobs.reader().unwrap();
1007 for handle in workspace_reader.blobs() {
1008 let handle = handle.expect("infallible blob enumeration");
1009 let blob: Blob<UnknownBlob> =
1010 workspace_reader.get(handle).expect("infallible blob read");
1011 self.storage.put(blob).map_err(PushError::StoragePut)?;
1012 }
1013
1014 if workspace.base_head == workspace.head {
1019 return Ok(None);
1020 }
1021
1022 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1024 let base_branch_meta: TribleSet = repo_reader
1025 .get(workspace.base_branch_meta)
1026 .map_err(PushError::StorageGet)?;
1027
1028 let Ok((branch_name,)) = find!(
1029 (name: Value<Handle<Blake3, LongString>>),
1030 pattern!(base_branch_meta, [{ crate::metadata::name: ?name }])
1031 )
1032 .exactly_one() else {
1033 return Err(PushError::BadBranchMetadata());
1034 };
1035
1036 let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
1037 let head_: TribleSet = repo_reader
1038 .get(head_handle)
1039 .map_err(PushError::StorageGet)?;
1040
1041 let branch_meta = branch_metadata(
1042 &workspace.signing_key,
1043 workspace.base_branch_id,
1044 branch_name,
1045 Some(head_.to_blob()),
1046 );
1047
1048 let branch_meta_handle = self
1049 .storage
1050 .put(branch_meta)
1051 .map_err(PushError::StoragePut)?;
1052
1053 let result = self
1055 .storage
1056 .update(
1057 workspace.base_branch_id,
1058 Some(workspace.base_branch_meta),
1059 Some(branch_meta_handle),
1060 )
1061 .map_err(PushError::BranchUpdate)?;
1062
1063 match result {
1064 PushResult::Success() => {
1065 workspace.base_branch_meta = branch_meta_handle;
1068 workspace.base_head = workspace.head;
1069 workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
1072 workspace.local_blobs = MemoryBlobStore::new();
1076 Ok(None)
1077 }
1078 PushResult::Conflict(conflicting_meta) => {
1079 let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
1080
1081 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1082 let branch_meta: TribleSet = repo_reader
1083 .get(conflicting_meta)
1084 .map_err(PushError::StorageGet)?;
1085
1086 let head_ = match find!((head_: Value<_>),
1087 pattern!(&branch_meta, [{ head: ?head_ }])
1088 )
1089 .at_most_one()
1090 {
1091 Ok(Some((h,))) => Some(h),
1092 Ok(None) => None,
1093 Err(_) => return Err(PushError::BadBranchMetadata()),
1094 };
1095
1096 let conflict_ws = Workspace {
1097 base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
1098 local_blobs: MemoryBlobStore::new(),
1099 head: head_,
1100 base_head: head_,
1101 base_branch_id: workspace.base_branch_id,
1102 base_branch_meta: conflicting_meta,
1103 signing_key: workspace.signing_key.clone(),
1104 commit_metadata: workspace.commit_metadata,
1105 };
1106
1107 Ok(Some(conflict_ws))
1108 }
1109 }
1110 }
1111}
1112
1113type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
1114type MetadataHandle = Value<Handle<Blake3, SimpleArchive>>;
1115type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
1116type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
1117
1118pub struct Workspace<Blobs: BlobStore<Blake3>> {
1122 local_blobs: MemoryBlobStore<Blake3>,
1124 base_blobs: Blobs::Reader,
1126 base_branch_id: Id,
1128 base_branch_meta: BranchMetaHandle,
1130 head: Option<CommitHandle>,
1132 base_head: Option<CommitHandle>,
1138 signing_key: SigningKey,
1140 commit_metadata: MetadataHandle,
1142}
1143
1144impl<Blobs> fmt::Debug for Workspace<Blobs>
1145where
1146 Blobs: BlobStore<Blake3>,
1147 Blobs::Reader: fmt::Debug,
1148{
1149 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1150 f.debug_struct("Workspace")
1151 .field("local_blobs", &self.local_blobs)
1152 .field("base_blobs", &self.base_blobs)
1153 .field("base_branch_id", &self.base_branch_id)
1154 .field("base_branch_meta", &self.base_branch_meta)
1155 .field("base_head", &self.base_head)
1156 .field("head", &self.head)
1157 .field("commit_metadata", &self.commit_metadata)
1158 .finish()
1159 }
1160}
1161
1162pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
1164 fn select(
1165 self,
1166 ws: &mut Workspace<Blobs>,
1167 ) -> Result<
1168 CommitSet,
1169 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1170 >;
1171}
1172
1173pub struct Ancestors(pub CommitHandle);
1175
1176pub fn ancestors(commit: CommitHandle) -> Ancestors {
1178 Ancestors(commit)
1179}
1180
1181pub struct NthAncestor(pub CommitHandle, pub usize);
1183
1184pub fn nth_ancestor(commit: CommitHandle, n: usize) -> NthAncestor {
1186 NthAncestor(commit, n)
1187}
1188
1189pub struct Parents(pub CommitHandle);
1191
1192pub fn parents(commit: CommitHandle) -> Parents {
1194 Parents(commit)
1195}
1196
1197pub struct SymmetricDiff(pub CommitHandle, pub CommitHandle);
1200
1201pub fn symmetric_diff(a: CommitHandle, b: CommitHandle) -> SymmetricDiff {
1203 SymmetricDiff(a, b)
1204}
1205
1206pub struct Union<A, B> {
1208 left: A,
1209 right: B,
1210}
1211
1212pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
1214 Union { left, right }
1215}
1216
1217pub struct Intersect<A, B> {
1219 left: A,
1220 right: B,
1221}
1222
1223pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
1225 Intersect { left, right }
1226}
1227
1228pub struct Difference<A, B> {
1231 left: A,
1232 right: B,
1233}
1234
1235pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
1237 Difference { left, right }
1238}
1239
1240pub struct TimeRange(pub Epoch, pub Epoch);
1242
1243pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
1245 TimeRange(start, end)
1246}
1247
1248pub struct Filter<S, F> {
1250 selector: S,
1251 filter: F,
1252}
1253
1254pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
1256 Filter { selector, filter }
1257}
1258
1259impl<Blobs> CommitSelector<Blobs> for CommitHandle
1260where
1261 Blobs: BlobStore<Blake3>,
1262{
1263 fn select(
1264 self,
1265 _ws: &mut Workspace<Blobs>,
1266 ) -> Result<
1267 CommitSet,
1268 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1269 > {
1270 let mut patch = CommitSet::new();
1271 patch.insert(&Entry::new(&self.raw));
1272 Ok(patch)
1273 }
1274}
1275
1276impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
1277where
1278 Blobs: BlobStore<Blake3>,
1279{
1280 fn select(
1281 self,
1282 _ws: &mut Workspace<Blobs>,
1283 ) -> Result<
1284 CommitSet,
1285 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1286 > {
1287 let mut patch = CommitSet::new();
1288 for handle in self {
1289 patch.insert(&Entry::new(&handle.raw));
1290 }
1291 Ok(patch)
1292 }
1293}
1294
1295impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
1296where
1297 Blobs: BlobStore<Blake3>,
1298{
1299 fn select(
1300 self,
1301 _ws: &mut Workspace<Blobs>,
1302 ) -> Result<
1303 CommitSet,
1304 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1305 > {
1306 let mut patch = CommitSet::new();
1307 for handle in self {
1308 patch.insert(&Entry::new(&handle.raw));
1309 }
1310 Ok(patch)
1311 }
1312}
1313
1314impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
1315where
1316 Blobs: BlobStore<Blake3>,
1317{
1318 fn select(
1319 self,
1320 _ws: &mut Workspace<Blobs>,
1321 ) -> Result<
1322 CommitSet,
1323 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1324 > {
1325 let mut patch = CommitSet::new();
1326 if let Some(handle) = self {
1327 patch.insert(&Entry::new(&handle.raw));
1328 }
1329 Ok(patch)
1330 }
1331}
1332
1333impl<Blobs> CommitSelector<Blobs> for Ancestors
1334where
1335 Blobs: BlobStore<Blake3>,
1336{
1337 fn select(
1338 self,
1339 ws: &mut Workspace<Blobs>,
1340 ) -> Result<
1341 CommitSet,
1342 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1343 > {
1344 collect_reachable(ws, self.0)
1345 }
1346}
1347
1348impl<Blobs> CommitSelector<Blobs> for NthAncestor
1349where
1350 Blobs: BlobStore<Blake3>,
1351{
1352 fn select(
1353 self,
1354 ws: &mut Workspace<Blobs>,
1355 ) -> Result<
1356 CommitSet,
1357 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1358 > {
1359 let mut current = self.0;
1360 let mut remaining = self.1;
1361
1362 while remaining > 0 {
1363 let meta: TribleSet = ws.get(current).map_err(WorkspaceCheckoutError::Storage)?;
1364 let mut parents = find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }]));
1365 let Some((p,)) = parents.next() else {
1366 return Ok(CommitSet::new());
1367 };
1368 current = p;
1369 remaining -= 1;
1370 }
1371
1372 let mut patch = CommitSet::new();
1373 patch.insert(&Entry::new(¤t.raw));
1374 Ok(patch)
1375 }
1376}
1377
1378impl<Blobs> CommitSelector<Blobs> for Parents
1379where
1380 Blobs: BlobStore<Blake3>,
1381{
1382 fn select(
1383 self,
1384 ws: &mut Workspace<Blobs>,
1385 ) -> Result<
1386 CommitSet,
1387 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1388 > {
1389 let meta: TribleSet = ws.get(self.0).map_err(WorkspaceCheckoutError::Storage)?;
1390 let mut result = CommitSet::new();
1391 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1392 result.insert(&Entry::new(&p.raw));
1393 }
1394 Ok(result)
1395 }
1396}
1397
1398impl<Blobs> CommitSelector<Blobs> for SymmetricDiff
1399where
1400 Blobs: BlobStore<Blake3>,
1401{
1402 fn select(
1403 self,
1404 ws: &mut Workspace<Blobs>,
1405 ) -> Result<
1406 CommitSet,
1407 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1408 > {
1409 let a = collect_reachable(ws, self.0)?;
1410 let b = collect_reachable(ws, self.1)?;
1411 let inter = a.intersect(&b);
1412 let mut union = a;
1413 union.union(b);
1414 Ok(union.difference(&inter))
1415 }
1416}
1417
1418impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
1419where
1420 A: CommitSelector<Blobs>,
1421 B: CommitSelector<Blobs>,
1422 Blobs: BlobStore<Blake3>,
1423{
1424 fn select(
1425 self,
1426 ws: &mut Workspace<Blobs>,
1427 ) -> Result<
1428 CommitSet,
1429 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1430 > {
1431 let mut left = self.left.select(ws)?;
1432 let right = self.right.select(ws)?;
1433 left.union(right);
1434 Ok(left)
1435 }
1436}
1437
1438impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
1439where
1440 A: CommitSelector<Blobs>,
1441 B: CommitSelector<Blobs>,
1442 Blobs: BlobStore<Blake3>,
1443{
1444 fn select(
1445 self,
1446 ws: &mut Workspace<Blobs>,
1447 ) -> Result<
1448 CommitSet,
1449 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1450 > {
1451 let left = self.left.select(ws)?;
1452 let right = self.right.select(ws)?;
1453 Ok(left.intersect(&right))
1454 }
1455}
1456
1457impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
1458where
1459 A: CommitSelector<Blobs>,
1460 B: CommitSelector<Blobs>,
1461 Blobs: BlobStore<Blake3>,
1462{
1463 fn select(
1464 self,
1465 ws: &mut Workspace<Blobs>,
1466 ) -> Result<
1467 CommitSet,
1468 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1469 > {
1470 let left = self.left.select(ws)?;
1471 let right = self.right.select(ws)?;
1472 Ok(left.difference(&right))
1473 }
1474}
1475
1476impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
1477where
1478 Blobs: BlobStore<Blake3>,
1479 S: CommitSelector<Blobs>,
1480 F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
1481{
1482 fn select(
1483 self,
1484 ws: &mut Workspace<Blobs>,
1485 ) -> Result<
1486 CommitSet,
1487 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1488 > {
1489 let patch = self.selector.select(ws)?;
1490 let mut result = CommitSet::new();
1491 let filter = self.filter;
1492 for raw in patch.iter() {
1493 let handle = Value::new(*raw);
1494 let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1495
1496 let Ok((content_handle,)) = find!(
1497 (c: Value<_>),
1498 pattern!(&meta, [{ content: ?c }])
1499 )
1500 .exactly_one() else {
1501 return Err(WorkspaceCheckoutError::BadCommitMetadata());
1502 };
1503
1504 let payload: TribleSet = ws
1505 .get(content_handle)
1506 .map_err(WorkspaceCheckoutError::Storage)?;
1507
1508 if filter(&meta, &payload) {
1509 result.insert(&Entry::new(raw));
1510 }
1511 }
1512 Ok(result)
1513 }
1514}
1515
1516pub struct HistoryOf(pub Id);
1518
1519pub fn history_of(entity: Id) -> HistoryOf {
1521 HistoryOf(entity)
1522}
1523
1524impl<Blobs> CommitSelector<Blobs> for HistoryOf
1525where
1526 Blobs: BlobStore<Blake3>,
1527{
1528 fn select(
1529 self,
1530 ws: &mut Workspace<Blobs>,
1531 ) -> Result<
1532 CommitSet,
1533 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1534 > {
1535 let Some(head_) = ws.head else {
1536 return Ok(CommitSet::new());
1537 };
1538 let entity = self.0;
1539 filter(
1540 ancestors(head_),
1541 move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
1542 )
1543 .select(ws)
1544 }
1545}
1546
1547fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
1555 ws: &mut Workspace<Blobs>,
1556 patch: CommitSet,
1557) -> Result<
1558 CommitSet,
1559 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1560> {
1561 let mut result = CommitSet::new();
1562 for raw in patch.iter() {
1563 let handle = Value::new(*raw);
1564 let reach = collect_reachable(ws, handle)?;
1565 result.union(reach);
1566 }
1567 Ok(result)
1568}
1569
1570fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
1571 ws: &mut Workspace<Blobs>,
1572 seeds: CommitSet,
1573 stop: &CommitSet,
1574) -> Result<
1575 CommitSet,
1576 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1577> {
1578 let mut visited = HashSet::new();
1579 let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
1580 let mut result = CommitSet::new();
1581
1582 while let Some(commit) = stack.pop() {
1583 if !visited.insert(commit) {
1584 continue;
1585 }
1586
1587 if stop.get(&commit.raw).is_some() {
1588 continue;
1589 }
1590
1591 result.insert(&Entry::new(&commit.raw));
1592
1593 let meta: TribleSet = ws
1594 .local_blobs
1595 .reader()
1596 .unwrap()
1597 .get(commit)
1598 .or_else(|_| ws.base_blobs.get(commit))
1599 .map_err(WorkspaceCheckoutError::Storage)?;
1600
1601 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1602 stack.push(p);
1603 }
1604 }
1605
1606 Ok(result)
1607}
1608
1609impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
1610where
1611 T: CommitSelector<Blobs>,
1612 Blobs: BlobStore<Blake3>,
1613{
1614 fn select(
1615 self,
1616 ws: &mut Workspace<Blobs>,
1617 ) -> Result<
1618 CommitSet,
1619 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1620 > {
1621 let end_patch = self.end.select(ws)?;
1622 let start_patch = self.start.select(ws)?;
1623
1624 collect_reachable_from_patch_until(ws, end_patch, &start_patch)
1625 }
1626}
1627
1628impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
1629where
1630 T: CommitSelector<Blobs>,
1631 Blobs: BlobStore<Blake3>,
1632{
1633 fn select(
1634 self,
1635 ws: &mut Workspace<Blobs>,
1636 ) -> Result<
1637 CommitSet,
1638 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1639 > {
1640 let Some(head_) = ws.head else {
1641 return Ok(CommitSet::new());
1642 };
1643 let exclude_patch = self.start.select(ws)?;
1644
1645 let mut head_patch = CommitSet::new();
1646 head_patch.insert(&Entry::new(&head_.raw));
1647
1648 collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
1649 }
1650}
1651
1652impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
1653where
1654 T: CommitSelector<Blobs>,
1655 Blobs: BlobStore<Blake3>,
1656{
1657 fn select(
1658 self,
1659 ws: &mut Workspace<Blobs>,
1660 ) -> Result<
1661 CommitSet,
1662 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1663 > {
1664 let end_patch = self.end.select(ws)?;
1665 collect_reachable_from_patch(ws, end_patch)
1666 }
1667}
1668
1669impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
1670where
1671 Blobs: BlobStore<Blake3>,
1672{
1673 fn select(
1674 self,
1675 ws: &mut Workspace<Blobs>,
1676 ) -> Result<
1677 CommitSet,
1678 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1679 > {
1680 let Some(head_) = ws.head else {
1681 return Ok(CommitSet::new());
1682 };
1683 collect_reachable(ws, head_)
1684 }
1685}
1686
1687impl<Blobs> CommitSelector<Blobs> for TimeRange
1688where
1689 Blobs: BlobStore<Blake3>,
1690{
1691 fn select(
1692 self,
1693 ws: &mut Workspace<Blobs>,
1694 ) -> Result<
1695 CommitSet,
1696 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1697 > {
1698 let Some(head_) = ws.head else {
1699 return Ok(CommitSet::new());
1700 };
1701 let start = self.0;
1702 let end = self.1;
1703 filter(
1704 ancestors(head_),
1705 move |meta: &TribleSet, _payload: &TribleSet| {
1706 if let Ok(Some((ts,))) =
1707 find!((t: Value<_>), pattern!(meta, [{ timestamp: ?t }])).at_most_one()
1708 {
1709 let (ts_start, ts_end): (Epoch, Epoch) = ts.from_value();
1710 ts_start <= end && ts_end >= start
1711 } else {
1712 false
1713 }
1714 },
1715 )
1716 .select(ws)
1717 }
1718}
1719
1720impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
1721 pub fn branch_id(&self) -> Id {
1723 self.base_branch_id
1724 }
1725
1726 pub fn head(&self) -> Option<CommitHandle> {
1728 self.head
1729 }
1730
1731 pub fn metadata(&self) -> MetadataHandle {
1733 self.commit_metadata
1734 }
1735
1736 pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
1739 where
1740 S: BlobSchema + 'static,
1741 T: ToBlob<S>,
1742 Handle<Blake3, S>: ValueSchema,
1743 {
1744 self.local_blobs.put(item).expect("infallible blob put")
1745 }
1746
1747 pub fn get<T, S>(
1752 &mut self,
1753 handle: Value<Handle<Blake3, S>>,
1754 ) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
1755 where
1756 S: BlobSchema + 'static,
1757 T: TryFromBlob<S>,
1758 Handle<Blake3, S>: ValueSchema,
1759 {
1760 self.local_blobs
1761 .reader()
1762 .unwrap()
1763 .get(handle)
1764 .or_else(|_| self.base_blobs.get(handle))
1765 }
1766
1767 pub fn commit(
1771 &mut self,
1772 content_: impl Into<TribleSet>,
1773 message_: &str,
1774 ) {
1775 let content_ = content_.into();
1776 self.commit_internal(content_, Some(self.commit_metadata), Some(message_));
1777 }
1778
1779 pub fn commit_with_metadata(
1782 &mut self,
1783 content_: impl Into<TribleSet>,
1784 metadata_: MetadataHandle,
1785 message_: &str,
1786 ) {
1787 let content_ = content_.into();
1788 self.commit_internal(content_, Some(metadata_), Some(message_));
1789 }
1790
1791 fn commit_internal(
1792 &mut self,
1793 content_: TribleSet,
1794 metadata_handle: Option<MetadataHandle>,
1795 message_: Option<&str>,
1796 ) {
1797 let content_blob = content_.to_blob();
1799 let message_handle = message_.map(|m| self.put(m.to_string()));
1801 let parents = self.head.iter().copied();
1802
1803 let commit_set = crate::repo::commit::commit_metadata(
1804 &self.signing_key,
1805 parents,
1806 message_handle,
1807 Some(content_blob.clone()),
1808 metadata_handle,
1809 );
1810 let _ = self
1812 .local_blobs
1813 .put(content_blob)
1814 .expect("failed to put content blob");
1815 let commit_handle = self
1816 .local_blobs
1817 .put(commit_set)
1818 .expect("failed to put commit blob");
1819 self.head = Some(commit_handle);
1821 }
1822
1823 pub fn merge(&mut self, other: &mut Workspace<Blobs>) -> Result<CommitHandle, MergeError> {
1837 let other_local = other.local_blobs.reader().unwrap();
1839 for r in other_local.blobs() {
1840 let handle = r.expect("infallible blob enumeration");
1841 let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
1842
1843 self.local_blobs.put(blob).expect("infallible blob put");
1845 }
1846 let parents = self.head.iter().copied().chain(other.head.iter().copied());
1848 let merge_commit = commit_metadata(
1849 &self.signing_key,
1850 parents,
1851 None, None, None, );
1855 let commit_handle = self
1857 .local_blobs
1858 .put(merge_commit)
1859 .expect("failed to put merge commit blob");
1860 self.head = Some(commit_handle);
1861
1862 Ok(commit_handle)
1863 }
1864
1865 pub fn merge_commit(
1871 &mut self,
1872 other: Value<Handle<Blake3, SimpleArchive>>,
1873 ) -> Result<CommitHandle, MergeError> {
1874 let parents = self.head.iter().copied().chain(Some(other));
1881 let merge_commit = commit_metadata(&self.signing_key, parents, None, None, None);
1882 let commit_handle = self
1883 .local_blobs
1884 .put(merge_commit)
1885 .expect("failed to put merge commit blob");
1886 self.head = Some(commit_handle);
1887 Ok(commit_handle)
1888 }
1889
1890 fn checkout_commits<I>(
1897 &mut self,
1898 commits: I,
1899 ) -> Result<
1900 TribleSet,
1901 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1902 >
1903 where
1904 I: IntoIterator<Item = CommitHandle>,
1905 {
1906 let local = self.local_blobs.reader().unwrap();
1907 let mut result = TribleSet::new();
1908 for commit in commits {
1909 let meta: TribleSet = local
1910 .get(commit)
1911 .or_else(|_| self.base_blobs.get(commit))
1912 .map_err(WorkspaceCheckoutError::Storage)?;
1913
1914 let content_opt =
1919 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1920 Ok(Some((c,))) => Some(c),
1921 Ok(None) => None,
1922 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1923 };
1924
1925 if let Some(c) = content_opt {
1926 let set: TribleSet = local
1927 .get(c)
1928 .or_else(|_| self.base_blobs.get(c))
1929 .map_err(WorkspaceCheckoutError::Storage)?;
1930 result += set;
1931 } else {
1932 continue;
1934 }
1935 }
1936 Ok(result)
1937 }
1938
1939 fn checkout_commits_metadata<I>(
1940 &mut self,
1941 commits: I,
1942 ) -> Result<
1943 TribleSet,
1944 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1945 >
1946 where
1947 I: IntoIterator<Item = CommitHandle>,
1948 {
1949 let local = self.local_blobs.reader().unwrap();
1950 let mut result = TribleSet::new();
1951 for commit in commits {
1952 let meta: TribleSet = local
1953 .get(commit)
1954 .or_else(|_| self.base_blobs.get(commit))
1955 .map_err(WorkspaceCheckoutError::Storage)?;
1956
1957 let metadata_opt =
1958 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
1959 Ok(Some((c,))) => Some(c),
1960 Ok(None) => None,
1961 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1962 };
1963
1964 if let Some(c) = metadata_opt {
1965 let set: TribleSet = local
1966 .get(c)
1967 .or_else(|_| self.base_blobs.get(c))
1968 .map_err(WorkspaceCheckoutError::Storage)?;
1969 result += set;
1970 }
1971 }
1972 Ok(result)
1973 }
1974
1975 fn checkout_commits_with_metadata<I>(
1976 &mut self,
1977 commits: I,
1978 ) -> Result<
1979 (TribleSet, TribleSet),
1980 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1981 >
1982 where
1983 I: IntoIterator<Item = CommitHandle>,
1984 {
1985 let local = self.local_blobs.reader().unwrap();
1986 let mut data = TribleSet::new();
1987 let mut metadata_set = TribleSet::new();
1988 for commit in commits {
1989 let meta: TribleSet = local
1990 .get(commit)
1991 .or_else(|_| self.base_blobs.get(commit))
1992 .map_err(WorkspaceCheckoutError::Storage)?;
1993
1994 let content_opt =
1995 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1996 Ok(Some((c,))) => Some(c),
1997 Ok(None) => None,
1998 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1999 };
2000
2001 if let Some(c) = content_opt {
2002 let set: TribleSet = local
2003 .get(c)
2004 .or_else(|_| self.base_blobs.get(c))
2005 .map_err(WorkspaceCheckoutError::Storage)?;
2006 data += set;
2007 }
2008
2009 let metadata_opt =
2010 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
2011 Ok(Some((c,))) => Some(c),
2012 Ok(None) => None,
2013 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2014 };
2015
2016 if let Some(c) = metadata_opt {
2017 let set: TribleSet = local
2018 .get(c)
2019 .or_else(|_| self.base_blobs.get(c))
2020 .map_err(WorkspaceCheckoutError::Storage)?;
2021 metadata_set += set;
2022 }
2023 }
2024 Ok((data, metadata_set))
2025 }
2026
2027 pub fn checkout<R>(
2031 &mut self,
2032 spec: R,
2033 ) -> Result<
2034 TribleSet,
2035 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2036 >
2037 where
2038 R: CommitSelector<Blobs>,
2039 {
2040 let patch = spec.select(self)?;
2041 let commits = patch.iter().map(|raw| Value::new(*raw));
2042 self.checkout_commits(commits)
2043 }
2044
2045 pub fn checkout_metadata<R>(
2048 &mut self,
2049 spec: R,
2050 ) -> Result<
2051 TribleSet,
2052 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2053 >
2054 where
2055 R: CommitSelector<Blobs>,
2056 {
2057 let patch = spec.select(self)?;
2058 let commits = patch.iter().map(|raw| Value::new(*raw));
2059 self.checkout_commits_metadata(commits)
2060 }
2061
2062 pub fn checkout_with_metadata<R>(
2065 &mut self,
2066 spec: R,
2067 ) -> Result<
2068 (TribleSet, TribleSet),
2069 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2070 >
2071 where
2072 R: CommitSelector<Blobs>,
2073 {
2074 let patch = spec.select(self)?;
2075 let commits = patch.iter().map(|raw| Value::new(*raw));
2076 self.checkout_commits_with_metadata(commits)
2077 }
2078}
2079
2080#[derive(Debug)]
2081pub enum WorkspaceCheckoutError<GetErr: Error> {
2082 Storage(GetErr),
2084 BadCommitMetadata(),
2086}
2087
2088impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
2089 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2090 match self {
2091 WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
2092 WorkspaceCheckoutError::BadCommitMetadata() => {
2093 write!(f, "commit metadata malformed")
2094 }
2095 }
2096 }
2097}
2098
2099impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
2100
2101fn collect_reachable<Blobs: BlobStore<Blake3>>(
2102 ws: &mut Workspace<Blobs>,
2103 from: CommitHandle,
2104) -> Result<
2105 CommitSet,
2106 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2107> {
2108 let mut visited = HashSet::new();
2109 let mut stack = vec![from];
2110 let mut result = CommitSet::new();
2111
2112 while let Some(commit) = stack.pop() {
2113 if !visited.insert(commit) {
2114 continue;
2115 }
2116 result.insert(&Entry::new(&commit.raw));
2117
2118 let meta: TribleSet = ws
2119 .local_blobs
2120 .reader()
2121 .unwrap()
2122 .get(commit)
2123 .or_else(|_| ws.base_blobs.get(commit))
2124 .map_err(WorkspaceCheckoutError::Storage)?;
2125
2126 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
2127 stack.push(p);
2128 }
2129 }
2130
2131 Ok(result)
2132}