1#![allow(clippy::type_complexity)]
8pub mod branch;
114pub mod commit;
115pub mod hybridstore;
116pub mod memoryrepo;
117pub mod objectstore;
118pub mod pile;
119
120pub trait StorageClose {
126 type Error: std::error::Error;
128
129 fn close(self) -> Result<(), Self::Error>;
131}
132
133impl<Storage> Repository<Storage>
135where
136 Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
137{
138 pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
145 self.storage.close()
146 }
147}
148
149use crate::macros::pattern;
150use std::collections::{HashSet, VecDeque};
151use std::convert::Infallible;
152use std::error::Error;
153use std::fmt::Debug;
154use std::fmt::{self};
155
156use commit::commit_metadata;
157use hifitime::Epoch;
158use itertools::Itertools;
159
160use crate::blob::schemas::simplearchive::UnarchiveError;
161use crate::blob::schemas::UnknownBlob;
162use crate::blob::Blob;
163use crate::blob::BlobSchema;
164use crate::blob::MemoryBlobStore;
165use crate::blob::ToBlob;
166use crate::blob::TryFromBlob;
167use crate::find;
168use crate::id::ufoid;
169use crate::id::Id;
170use crate::patch::Entry;
171use crate::patch::IdentitySchema;
172use crate::patch::PATCH;
173use crate::prelude::valueschemas::GenId;
174use crate::repo::branch::branch_metadata;
175use crate::trible::TribleSet;
176use crate::value::schemas::hash::Handle;
177use crate::value::schemas::hash::HashProtocol;
178use crate::value::Value;
179use crate::value::ValueSchema;
180use crate::value::VALUE_LEN;
181use ed25519_dalek::SigningKey;
182
183use crate::blob::schemas::longstring::LongString;
184use crate::blob::schemas::simplearchive::SimpleArchive;
185use crate::prelude::*;
186use crate::value::schemas::ed25519 as ed;
187use crate::value::schemas::hash::Blake3;
188use crate::value::schemas::shortstring::ShortString;
189use crate::value::schemas::time::NsTAIInterval;
190
191attributes! {
192 "4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
194 "88B59BD497540AC5AECDB7518E737C87" as pub metadata: Handle<Blake3, SimpleArchive>;
196 "317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
198 "B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
200 "12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
202 "272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
204 "8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
206 "71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
208 "ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
210 "9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
212 "1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
214}
215
216pub trait BlobStoreList<H: HashProtocol> {
218 type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
219 where
220 Self: 'a;
221 type Err: Error + Debug + Send + Sync + 'static;
222
223 fn blobs<'a>(&'a self) -> Self::Iter<'a>;
225}
226
227#[derive(Debug, Clone)]
229pub struct BlobMetadata {
230 pub timestamp: u64,
232 pub length: u64,
234}
235
236pub trait BlobStoreMeta<H: HashProtocol> {
238 type MetaError: std::error::Error + Send + Sync + 'static;
240
241 fn metadata<S>(
242 &self,
243 handle: Value<Handle<H, S>>,
244 ) -> Result<Option<BlobMetadata>, Self::MetaError>
245 where
246 S: BlobSchema + 'static,
247 Handle<H, S>: ValueSchema;
248}
249
250pub trait BlobStoreForget<H: HashProtocol> {
255 type ForgetError: std::error::Error + Send + Sync + 'static;
256
257 fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
258 where
259 S: BlobSchema + 'static,
260 Handle<H, S>: ValueSchema;
261}
262
263pub trait BlobStoreGet<H: HashProtocol> {
265 type GetError<E: std::error::Error>: Error;
266
267 fn get<T, S>(
276 &self,
277 handle: Value<Handle<H, S>>,
278 ) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
279 where
280 S: BlobSchema + 'static,
281 T: TryFromBlob<S>,
282 Handle<H, S>: ValueSchema;
283}
284
285pub trait BlobStorePut<H: HashProtocol> {
287 type PutError: Error + Debug + Send + Sync + 'static;
288
289 fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
290 where
291 S: BlobSchema + 'static,
292 T: ToBlob<S>,
293 Handle<H, S>: ValueSchema;
294}
295
296pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
297 type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
298 type ReaderError: Error + Debug + Send + Sync + 'static;
299 fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
300}
301
302pub trait BlobStoreKeep<H: HashProtocol> {
304 fn keep<I>(&mut self, handles: I)
306 where
307 I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
308}
309
310#[derive(Debug)]
311pub enum PushResult<H>
312where
313 H: HashProtocol,
314{
315 Success(),
316 Conflict(Option<Value<Handle<H, SimpleArchive>>>),
317}
318
319pub trait BranchStore<H: HashProtocol> {
320 type BranchesError: Error + Debug + Send + Sync + 'static;
321 type HeadError: Error + Debug + Send + Sync + 'static;
322 type UpdateError: Error + Debug + Send + Sync + 'static;
323
324 type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
325 where
326 Self: 'a;
327
328 fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
331
332 fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
348
349 fn update(
360 &mut self,
361 id: Id,
362 old: Option<Value<Handle<H, SimpleArchive>>>,
363 new: Value<Handle<H, SimpleArchive>>,
364 ) -> Result<PushResult<H>, Self::UpdateError>;
365}
366
367#[derive(Debug)]
368pub enum TransferError<ListErr, LoadErr, StoreErr> {
369 List(ListErr),
370 Load(LoadErr),
371 Store(StoreErr),
372}
373
374impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
375 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
376 write!(f, "failed to transfer blob")
377 }
378}
379
380impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
381where
382 ListErr: Debug + Error + 'static,
383 LoadErr: Debug + Error + 'static,
384 StoreErr: Debug + Error + 'static,
385{
386 fn source(&self) -> Option<&(dyn Error + 'static)> {
387 match self {
388 Self::List(e) => Some(e),
389 Self::Load(e) => Some(e),
390 Self::Store(e) => Some(e),
391 }
392 }
393}
394
395pub fn transfer<'a, BS, BT, HS, HT, Handles>(
397 source: &'a BS,
398 target: &'a mut BT,
399 handles: Handles,
400) -> impl Iterator<
401 Item = Result<
402 (
403 Value<Handle<HS, UnknownBlob>>,
404 Value<Handle<HT, UnknownBlob>>,
405 ),
406 TransferError<
407 Infallible,
408 <BS as BlobStoreGet<HS>>::GetError<Infallible>,
409 <BT as BlobStorePut<HT>>::PutError,
410 >,
411 >,
412> + 'a
413where
414 BS: BlobStoreGet<HS> + 'a,
415 BT: BlobStorePut<HT> + 'a,
416 HS: 'static + HashProtocol,
417 HT: 'static + HashProtocol,
418 Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
419 Handles::IntoIter: 'a,
420{
421 handles.into_iter().map(move |source_handle| {
422 let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
423 let target_handle = target.put(blob).map_err(TransferError::Store)?;
424 Ok((source_handle, target_handle))
425 })
426}
427
428pub struct ReachableHandles<'a, BS, H>
430where
431 BS: BlobStoreGet<H>,
432 H: 'static + HashProtocol,
433{
434 source: &'a BS,
435 queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
436 visited: HashSet<[u8; VALUE_LEN]>,
437}
438
439impl<'a, BS, H> ReachableHandles<'a, BS, H>
440where
441 BS: BlobStoreGet<H>,
442 H: 'static + HashProtocol,
443{
444 fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
445 let mut queue = VecDeque::new();
446 for handle in roots {
447 queue.push_back(handle);
448 }
449
450 Self {
451 source,
452 queue,
453 visited: HashSet::new(),
454 }
455 }
456
457 fn enqueue_from_blob(&mut self, blob: &Blob<UnknownBlob>) {
458 let bytes = blob.bytes.as_ref();
459 let mut offset = 0usize;
460
461 while offset + VALUE_LEN <= bytes.len() {
462 let mut raw = [0u8; VALUE_LEN];
463 raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
464
465 if !self.visited.contains(&raw) {
466 let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
467 if self
468 .source
469 .get::<anybytes::Bytes, UnknownBlob>(candidate)
470 .is_ok()
471 {
472 self.queue.push_back(candidate);
473 }
474 }
475
476 offset += VALUE_LEN;
477 }
478 }
479}
480
481impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
482where
483 BS: BlobStoreGet<H>,
484 H: 'static + HashProtocol,
485{
486 type Item = Value<Handle<H, UnknownBlob>>;
487
488 fn next(&mut self) -> Option<Self::Item> {
489 while let Some(handle) = self.queue.pop_front() {
490 let raw = handle.raw;
491
492 if !self.visited.insert(raw) {
493 continue;
494 }
495
496 if let Ok(blob) = self.source.get(handle) {
497 self.enqueue_from_blob(&blob);
498 }
499
500 return Some(handle);
501 }
502
503 None
504 }
505}
506
507pub fn reachable<'a, BS, H>(
509 source: &'a BS,
510 roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
511) -> ReachableHandles<'a, BS, H>
512where
513 BS: BlobStoreGet<H>,
514 H: 'static + HashProtocol,
515{
516 ReachableHandles::new(source, roots)
517}
518
519pub fn potential_handles<'a, H>(
526 set: &'a TribleSet,
527) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
528where
529 H: HashProtocol,
530{
531 set.vae.iter().map(|raw| {
532 let mut value = [0u8; VALUE_LEN];
533 value.copy_from_slice(&raw[0..VALUE_LEN]);
534 Value::<Handle<H, UnknownBlob>>::new(value)
535 })
536}
537
538#[derive(Debug)]
541pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
542 ContentStorageError(BlobErr),
544 CommitStorageError(BlobErr),
546}
547
548impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
549 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
550 match self {
551 CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
552 CreateCommitError::CommitStorageError(e) => {
553 write!(f, "Commit metadata storage failed: {e}")
554 }
555 }
556 }
557}
558
559impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
560 fn source(&self) -> Option<&(dyn Error + 'static)> {
561 match self {
562 CreateCommitError::ContentStorageError(e) => Some(e),
563 CreateCommitError::CommitStorageError(e) => Some(e),
564 }
565 }
566}
567
568#[derive(Debug)]
569pub enum MergeError {
570 DifferentRepos(),
572}
573
574#[derive(Debug)]
575pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
576 StorageBranches(Storage::BranchesError),
578 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
580 StorageGet(
582 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
583 ),
584 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
586 BranchUpdate(Storage::UpdateError),
588 BadBranchMetadata(),
590 MergeError(MergeError),
592}
593
594impl<Storage> From<MergeError> for PushError<Storage>
599where
600 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
601{
602 fn from(e: MergeError) -> Self {
603 PushError::MergeError(e)
604 }
605}
606
607#[derive(Debug)]
614pub enum BranchError<Storage>
615where
616 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
617{
618 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
620 StorageGet(
622 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
623 ),
624 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
626 BranchHead(Storage::HeadError),
628 BranchUpdate(Storage::UpdateError),
630 AlreadyExists(),
632 BranchNotFound(Id),
634}
635
636#[derive(Debug)]
637pub enum LookupError<Storage>
638where
639 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
640{
641 StorageBranches(Storage::BranchesError),
642 BranchHead(Storage::HeadError),
643 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
644 StorageGet(
645 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
646 ),
647 NameConflict(Vec<Id>),
649 BadBranchMetadata(),
650}
651
652pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
659 storage: Storage,
660 signing_key: SigningKey,
661 default_metadata: Option<MetadataHandle>,
662}
663
664pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
665where
666 BranchStorageErr: Error,
667 BlobReaderErr: Error,
668 BlobStorageErr: Error,
669{
670 BranchNotFound(Id),
672 BranchStorage(BranchStorageErr),
674 BlobReader(BlobReaderErr),
676 BlobStorage(BlobStorageErr),
678 BadBranchMetadata(),
680}
681
682impl<B, R, C> fmt::Debug for PullError<B, R, C>
683where
684 B: Error + fmt::Debug,
685 R: Error + fmt::Debug,
686 C: Error + fmt::Debug,
687{
688 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
689 match self {
690 PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
691 PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
692 PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
693 PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
694 PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
695 }
696 }
697}
698
699impl<Storage> Repository<Storage>
700where
701 Storage: BlobStore<Blake3> + BranchStore<Blake3>,
702{
703 pub fn new(storage: Storage, signing_key: SigningKey) -> Self {
714 Self {
715 storage,
716 signing_key,
717 default_metadata: None,
718 }
719 }
720
721 pub fn into_storage(self) -> Storage {
727 self.storage
728 }
729
730 pub fn storage(&self) -> &Storage {
732 &self.storage
733 }
734
735 pub fn storage_mut(&mut self) -> &mut Storage {
737 &mut self.storage
738 }
739
740 pub fn set_signing_key(&mut self, signing_key: SigningKey) {
742 self.signing_key = signing_key;
743 }
744
745 pub fn set_default_metadata(
748 &mut self,
749 metadata_set: TribleSet,
750 ) -> Result<MetadataHandle, <Storage as BlobStorePut<Blake3>>::PutError> {
751 let handle = self.storage.put(metadata_set)?;
752 self.default_metadata = Some(handle);
753 Ok(handle)
754 }
755
756 pub fn clear_default_metadata(&mut self) {
758 self.default_metadata = None;
759 }
760
761 pub fn default_metadata(&self) -> Option<MetadataHandle> {
763 self.default_metadata
764 }
765
766 pub fn create_branch(
780 &mut self,
781 branch_name: &str,
782 commit: Option<CommitHandle>,
783 ) -> Result<ExclusiveId, BranchError<Storage>> {
784 self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
785 }
786
787 pub fn create_branch_with_key(
789 &mut self,
790 branch_name: &str,
791 commit: Option<CommitHandle>,
792 signing_key: SigningKey,
793 ) -> Result<ExclusiveId, BranchError<Storage>> {
794 let branch_id = ufoid();
795
796 let branch_set = if let Some(commit) = commit {
797 let reader = self
798 .storage
799 .reader()
800 .map_err(|e| BranchError::StorageReader(e))?;
801 let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
802
803 branch::branch_metadata(&signing_key, *branch_id, branch_name, Some(set.to_blob()))
804 } else {
805 branch::branch_unsigned(*branch_id, branch_name, None)
806 };
807
808 let branch_blob = branch_set.to_blob();
809 let branch_handle = self
810 .storage
811 .put(branch_blob)
812 .map_err(|e| BranchError::StoragePut(e))?;
813
814 let push_result = self
815 .storage
816 .update(*branch_id, None, branch_handle)
817 .map_err(|e| BranchError::BranchUpdate(e))?;
818
819 match push_result {
820 PushResult::Success() => Ok(branch_id),
821 PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
822 }
823 }
824
825 pub fn pull(
828 &mut self,
829 branch_id: Id,
830 ) -> Result<
831 Workspace<Storage>,
832 PullError<
833 Storage::HeadError,
834 Storage::ReaderError,
835 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
836 >,
837 > {
838 self.pull_with_key(branch_id, self.signing_key.clone())
839 }
840
841 pub fn pull_with_key(
843 &mut self,
844 branch_id: Id,
845 signing_key: SigningKey,
846 ) -> Result<
847 Workspace<Storage>,
848 PullError<
849 Storage::HeadError,
850 Storage::ReaderError,
851 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
852 >,
853 > {
854 let base_branch_meta_handle = match self.storage.head(branch_id) {
856 Ok(Some(handle)) => handle,
857 Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
858 Err(e) => return Err(PullError::BranchStorage(e)),
859 };
860 let reader = self.storage.reader().map_err(PullError::BlobReader)?;
862 let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
863 Ok(meta_set) => meta_set,
864 Err(e) => return Err(PullError::BlobStorage(e)),
865 };
866
867 let head_ = match find!(
868 (head_: Value<_>),
869 pattern!(&base_branch_meta, [{ head: ?head_ }])
870 )
871 .at_most_one()
872 {
873 Ok(Some((h,))) => Some(h),
874 Ok(None) => None,
875 Err(_) => return Err(PullError::BadBranchMetadata()),
876 };
877 let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
879 Ok(Workspace {
880 base_blobs,
881 local_blobs: MemoryBlobStore::new(),
882 head: head_,
883 base_head: head_,
884 base_branch_id: branch_id,
885 base_branch_meta: base_branch_meta_handle,
886 signing_key,
887 default_metadata: self.default_metadata,
888 })
889 }
890
891 pub fn pull_with_metadata(
893 &mut self,
894 branch_id: Id,
895 metadata_set: TribleSet,
896 ) -> Result<
897 Workspace<Storage>,
898 PullError<
899 Storage::HeadError,
900 Storage::ReaderError,
901 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
902 >,
903 > {
904 let mut workspace = self.pull_with_key(branch_id, self.signing_key.clone())?;
905 workspace.set_default_metadata(metadata_set);
906 Ok(workspace)
907 }
908
909 pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
913 while let Some(mut conflict_ws) = self.try_push(workspace)? {
918 conflict_ws.merge(workspace)?;
922
923 *workspace = conflict_ws;
928 }
929
930 Ok(())
931 }
932
933 pub fn try_push(
937 &mut self,
938 workspace: &mut Workspace<Storage>,
939 ) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
940 let workspace_reader = workspace.local_blobs.reader().unwrap();
942 for handle in workspace_reader.blobs() {
943 let handle = handle.expect("infallible blob enumeration");
944 let blob: Blob<UnknownBlob> =
945 workspace_reader.get(handle).expect("infallible blob read");
946 self.storage.put(blob).map_err(PushError::StoragePut)?;
947 }
948
949 if workspace.base_head == workspace.head {
954 return Ok(None);
955 }
956
957 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
959 let base_branch_meta: TribleSet = repo_reader
960 .get(workspace.base_branch_meta)
961 .map_err(PushError::StorageGet)?;
962
963 let Ok((branch_name,)) = find!((name: Value<_>),
964 pattern!(base_branch_meta, [{ crate::metadata::shortname: ?name }])
965 )
966 .exactly_one() else {
967 return Err(PushError::BadBranchMetadata());
968 };
969
970 let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
971 let head_: TribleSet = repo_reader
972 .get(head_handle)
973 .map_err(PushError::StorageGet)?;
974
975 let branch_meta = branch_metadata(
976 &workspace.signing_key,
977 workspace.base_branch_id,
978 branch_name.from_value(),
979 Some(head_.to_blob()),
980 );
981
982 let branch_meta_handle = self
983 .storage
984 .put(branch_meta)
985 .map_err(PushError::StoragePut)?;
986
987 let result = self
989 .storage
990 .update(
991 workspace.base_branch_id,
992 Some(workspace.base_branch_meta),
993 branch_meta_handle,
994 )
995 .map_err(PushError::BranchUpdate)?;
996
997 match result {
998 PushResult::Success() => {
999 workspace.base_branch_meta = branch_meta_handle;
1002 workspace.base_head = workspace.head;
1003 workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
1006 workspace.local_blobs = MemoryBlobStore::new();
1010 Ok(None)
1011 }
1012 PushResult::Conflict(conflicting_meta) => {
1013 let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
1014
1015 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1016 let branch_meta: TribleSet = repo_reader
1017 .get(conflicting_meta)
1018 .map_err(PushError::StorageGet)?;
1019
1020 let head_ = match find!((head_: Value<_>),
1021 pattern!(&branch_meta, [{ head: ?head_ }])
1022 )
1023 .at_most_one()
1024 {
1025 Ok(Some((h,))) => Some(h),
1026 Ok(None) => None,
1027 Err(_) => return Err(PushError::BadBranchMetadata()),
1028 };
1029
1030 let conflict_ws = Workspace {
1031 base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
1032 local_blobs: MemoryBlobStore::new(),
1033 head: head_,
1034 base_head: head_,
1035 base_branch_id: workspace.base_branch_id,
1036 base_branch_meta: conflicting_meta,
1037 signing_key: workspace.signing_key.clone(),
1038 default_metadata: workspace.default_metadata,
1039 };
1040
1041 Ok(Some(conflict_ws))
1042 }
1043 }
1044 }
1045}
1046
1047type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
1048type MetadataHandle = Value<Handle<Blake3, SimpleArchive>>;
1049type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
1050type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
1051
1052pub struct Workspace<Blobs: BlobStore<Blake3>> {
1056 local_blobs: MemoryBlobStore<Blake3>,
1058 base_blobs: Blobs::Reader,
1060 base_branch_id: Id,
1062 base_branch_meta: BranchMetaHandle,
1064 head: Option<CommitHandle>,
1066 base_head: Option<CommitHandle>,
1072 signing_key: SigningKey,
1074 default_metadata: Option<MetadataHandle>,
1076}
1077
1078impl<Blobs> fmt::Debug for Workspace<Blobs>
1079where
1080 Blobs: BlobStore<Blake3>,
1081 Blobs::Reader: fmt::Debug,
1082{
1083 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1084 f.debug_struct("Workspace")
1085 .field("local_blobs", &self.local_blobs)
1086 .field("base_blobs", &self.base_blobs)
1087 .field("base_branch_id", &self.base_branch_id)
1088 .field("base_branch_meta", &self.base_branch_meta)
1089 .field("base_head", &self.base_head)
1090 .field("head", &self.head)
1091 .field("default_metadata", &self.default_metadata)
1092 .finish()
1093 }
1094}
1095
1096pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
1098 fn select(
1099 self,
1100 ws: &mut Workspace<Blobs>,
1101 ) -> Result<
1102 CommitSet,
1103 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1104 >;
1105}
1106
1107pub struct Ancestors(pub CommitHandle);
1109
1110pub fn ancestors(commit: CommitHandle) -> Ancestors {
1112 Ancestors(commit)
1113}
1114
1115pub struct NthAncestor(pub CommitHandle, pub usize);
1117
1118pub fn nth_ancestor(commit: CommitHandle, n: usize) -> NthAncestor {
1120 NthAncestor(commit, n)
1121}
1122
1123pub struct Parents(pub CommitHandle);
1125
1126pub fn parents(commit: CommitHandle) -> Parents {
1128 Parents(commit)
1129}
1130
1131pub struct SymmetricDiff(pub CommitHandle, pub CommitHandle);
1134
1135pub fn symmetric_diff(a: CommitHandle, b: CommitHandle) -> SymmetricDiff {
1137 SymmetricDiff(a, b)
1138}
1139
1140pub struct Union<A, B> {
1142 left: A,
1143 right: B,
1144}
1145
1146pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
1148 Union { left, right }
1149}
1150
1151pub struct Intersect<A, B> {
1153 left: A,
1154 right: B,
1155}
1156
1157pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
1159 Intersect { left, right }
1160}
1161
1162pub struct Difference<A, B> {
1165 left: A,
1166 right: B,
1167}
1168
1169pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
1171 Difference { left, right }
1172}
1173
1174pub struct TimeRange(pub Epoch, pub Epoch);
1176
1177pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
1179 TimeRange(start, end)
1180}
1181
1182pub struct Filter<S, F> {
1184 selector: S,
1185 filter: F,
1186}
1187
1188pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
1190 Filter { selector, filter }
1191}
1192
1193impl<Blobs> CommitSelector<Blobs> for CommitHandle
1194where
1195 Blobs: BlobStore<Blake3>,
1196{
1197 fn select(
1198 self,
1199 _ws: &mut Workspace<Blobs>,
1200 ) -> Result<
1201 CommitSet,
1202 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1203 > {
1204 let mut patch = CommitSet::new();
1205 patch.insert(&Entry::new(&self.raw));
1206 Ok(patch)
1207 }
1208}
1209
1210impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
1211where
1212 Blobs: BlobStore<Blake3>,
1213{
1214 fn select(
1215 self,
1216 _ws: &mut Workspace<Blobs>,
1217 ) -> Result<
1218 CommitSet,
1219 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1220 > {
1221 let mut patch = CommitSet::new();
1222 for handle in self {
1223 patch.insert(&Entry::new(&handle.raw));
1224 }
1225 Ok(patch)
1226 }
1227}
1228
1229impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
1230where
1231 Blobs: BlobStore<Blake3>,
1232{
1233 fn select(
1234 self,
1235 _ws: &mut Workspace<Blobs>,
1236 ) -> Result<
1237 CommitSet,
1238 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1239 > {
1240 let mut patch = CommitSet::new();
1241 for handle in self {
1242 patch.insert(&Entry::new(&handle.raw));
1243 }
1244 Ok(patch)
1245 }
1246}
1247
1248impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
1249where
1250 Blobs: BlobStore<Blake3>,
1251{
1252 fn select(
1253 self,
1254 _ws: &mut Workspace<Blobs>,
1255 ) -> Result<
1256 CommitSet,
1257 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1258 > {
1259 let mut patch = CommitSet::new();
1260 if let Some(handle) = self {
1261 patch.insert(&Entry::new(&handle.raw));
1262 }
1263 Ok(patch)
1264 }
1265}
1266
1267impl<Blobs> CommitSelector<Blobs> for Ancestors
1268where
1269 Blobs: BlobStore<Blake3>,
1270{
1271 fn select(
1272 self,
1273 ws: &mut Workspace<Blobs>,
1274 ) -> Result<
1275 CommitSet,
1276 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1277 > {
1278 collect_reachable(ws, self.0)
1279 }
1280}
1281
1282impl<Blobs> CommitSelector<Blobs> for NthAncestor
1283where
1284 Blobs: BlobStore<Blake3>,
1285{
1286 fn select(
1287 self,
1288 ws: &mut Workspace<Blobs>,
1289 ) -> Result<
1290 CommitSet,
1291 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1292 > {
1293 let mut current = self.0;
1294 let mut remaining = self.1;
1295
1296 while remaining > 0 {
1297 let meta: TribleSet = ws.get(current).map_err(WorkspaceCheckoutError::Storage)?;
1298 let mut parents = find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }]));
1299 let Some((p,)) = parents.next() else {
1300 return Ok(CommitSet::new());
1301 };
1302 current = p;
1303 remaining -= 1;
1304 }
1305
1306 let mut patch = CommitSet::new();
1307 patch.insert(&Entry::new(¤t.raw));
1308 Ok(patch)
1309 }
1310}
1311
1312impl<Blobs> CommitSelector<Blobs> for Parents
1313where
1314 Blobs: BlobStore<Blake3>,
1315{
1316 fn select(
1317 self,
1318 ws: &mut Workspace<Blobs>,
1319 ) -> Result<
1320 CommitSet,
1321 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1322 > {
1323 let meta: TribleSet = ws.get(self.0).map_err(WorkspaceCheckoutError::Storage)?;
1324 let mut result = CommitSet::new();
1325 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1326 result.insert(&Entry::new(&p.raw));
1327 }
1328 Ok(result)
1329 }
1330}
1331
1332impl<Blobs> CommitSelector<Blobs> for SymmetricDiff
1333where
1334 Blobs: BlobStore<Blake3>,
1335{
1336 fn select(
1337 self,
1338 ws: &mut Workspace<Blobs>,
1339 ) -> Result<
1340 CommitSet,
1341 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1342 > {
1343 let a = collect_reachable(ws, self.0)?;
1344 let b = collect_reachable(ws, self.1)?;
1345 let inter = a.intersect(&b);
1346 let mut union = a;
1347 union.union(b);
1348 Ok(union.difference(&inter))
1349 }
1350}
1351
1352impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
1353where
1354 A: CommitSelector<Blobs>,
1355 B: CommitSelector<Blobs>,
1356 Blobs: BlobStore<Blake3>,
1357{
1358 fn select(
1359 self,
1360 ws: &mut Workspace<Blobs>,
1361 ) -> Result<
1362 CommitSet,
1363 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1364 > {
1365 let mut left = self.left.select(ws)?;
1366 let right = self.right.select(ws)?;
1367 left.union(right);
1368 Ok(left)
1369 }
1370}
1371
1372impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
1373where
1374 A: CommitSelector<Blobs>,
1375 B: CommitSelector<Blobs>,
1376 Blobs: BlobStore<Blake3>,
1377{
1378 fn select(
1379 self,
1380 ws: &mut Workspace<Blobs>,
1381 ) -> Result<
1382 CommitSet,
1383 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1384 > {
1385 let left = self.left.select(ws)?;
1386 let right = self.right.select(ws)?;
1387 Ok(left.intersect(&right))
1388 }
1389}
1390
1391impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
1392where
1393 A: CommitSelector<Blobs>,
1394 B: CommitSelector<Blobs>,
1395 Blobs: BlobStore<Blake3>,
1396{
1397 fn select(
1398 self,
1399 ws: &mut Workspace<Blobs>,
1400 ) -> Result<
1401 CommitSet,
1402 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1403 > {
1404 let left = self.left.select(ws)?;
1405 let right = self.right.select(ws)?;
1406 Ok(left.difference(&right))
1407 }
1408}
1409
1410impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
1411where
1412 Blobs: BlobStore<Blake3>,
1413 S: CommitSelector<Blobs>,
1414 F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
1415{
1416 fn select(
1417 self,
1418 ws: &mut Workspace<Blobs>,
1419 ) -> Result<
1420 CommitSet,
1421 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1422 > {
1423 let patch = self.selector.select(ws)?;
1424 let mut result = CommitSet::new();
1425 let filter = self.filter;
1426 for raw in patch.iter() {
1427 let handle = Value::new(*raw);
1428 let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1429
1430 let Ok((content_handle,)) = find!(
1431 (c: Value<_>),
1432 pattern!(&meta, [{ content: ?c }])
1433 )
1434 .exactly_one() else {
1435 return Err(WorkspaceCheckoutError::BadCommitMetadata());
1436 };
1437
1438 let payload: TribleSet = ws
1439 .get(content_handle)
1440 .map_err(WorkspaceCheckoutError::Storage)?;
1441
1442 if filter(&meta, &payload) {
1443 result.insert(&Entry::new(raw));
1444 }
1445 }
1446 Ok(result)
1447 }
1448}
1449
1450pub struct HistoryOf(pub Id);
1452
1453pub fn history_of(entity: Id) -> HistoryOf {
1455 HistoryOf(entity)
1456}
1457
1458impl<Blobs> CommitSelector<Blobs> for HistoryOf
1459where
1460 Blobs: BlobStore<Blake3>,
1461{
1462 fn select(
1463 self,
1464 ws: &mut Workspace<Blobs>,
1465 ) -> Result<
1466 CommitSet,
1467 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1468 > {
1469 let Some(head_) = ws.head else {
1470 return Ok(CommitSet::new());
1471 };
1472 let entity = self.0;
1473 filter(
1474 ancestors(head_),
1475 move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
1476 )
1477 .select(ws)
1478 }
1479}
1480
1481fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
1489 ws: &mut Workspace<Blobs>,
1490 patch: CommitSet,
1491) -> Result<
1492 CommitSet,
1493 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1494> {
1495 let mut result = CommitSet::new();
1496 for raw in patch.iter() {
1497 let handle = Value::new(*raw);
1498 let reach = collect_reachable(ws, handle)?;
1499 result.union(reach);
1500 }
1501 Ok(result)
1502}
1503
1504fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
1505 ws: &mut Workspace<Blobs>,
1506 seeds: CommitSet,
1507 stop: &CommitSet,
1508) -> Result<
1509 CommitSet,
1510 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1511> {
1512 let mut visited = HashSet::new();
1513 let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
1514 let mut result = CommitSet::new();
1515
1516 while let Some(commit) = stack.pop() {
1517 if !visited.insert(commit) {
1518 continue;
1519 }
1520
1521 if stop.get(&commit.raw).is_some() {
1522 continue;
1523 }
1524
1525 result.insert(&Entry::new(&commit.raw));
1526
1527 let meta: TribleSet = ws
1528 .local_blobs
1529 .reader()
1530 .unwrap()
1531 .get(commit)
1532 .or_else(|_| ws.base_blobs.get(commit))
1533 .map_err(WorkspaceCheckoutError::Storage)?;
1534
1535 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1536 stack.push(p);
1537 }
1538 }
1539
1540 Ok(result)
1541}
1542
1543impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
1544where
1545 T: CommitSelector<Blobs>,
1546 Blobs: BlobStore<Blake3>,
1547{
1548 fn select(
1549 self,
1550 ws: &mut Workspace<Blobs>,
1551 ) -> Result<
1552 CommitSet,
1553 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1554 > {
1555 let end_patch = self.end.select(ws)?;
1556 let start_patch = self.start.select(ws)?;
1557
1558 collect_reachable_from_patch_until(ws, end_patch, &start_patch)
1559 }
1560}
1561
1562impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
1563where
1564 T: CommitSelector<Blobs>,
1565 Blobs: BlobStore<Blake3>,
1566{
1567 fn select(
1568 self,
1569 ws: &mut Workspace<Blobs>,
1570 ) -> Result<
1571 CommitSet,
1572 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1573 > {
1574 let Some(head_) = ws.head else {
1575 return Ok(CommitSet::new());
1576 };
1577 let exclude_patch = self.start.select(ws)?;
1578
1579 let mut head_patch = CommitSet::new();
1580 head_patch.insert(&Entry::new(&head_.raw));
1581
1582 collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
1583 }
1584}
1585
1586impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
1587where
1588 T: CommitSelector<Blobs>,
1589 Blobs: BlobStore<Blake3>,
1590{
1591 fn select(
1592 self,
1593 ws: &mut Workspace<Blobs>,
1594 ) -> Result<
1595 CommitSet,
1596 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1597 > {
1598 let end_patch = self.end.select(ws)?;
1599 collect_reachable_from_patch(ws, end_patch)
1600 }
1601}
1602
1603impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
1604where
1605 Blobs: BlobStore<Blake3>,
1606{
1607 fn select(
1608 self,
1609 ws: &mut Workspace<Blobs>,
1610 ) -> Result<
1611 CommitSet,
1612 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1613 > {
1614 let Some(head_) = ws.head else {
1615 return Ok(CommitSet::new());
1616 };
1617 collect_reachable(ws, head_)
1618 }
1619}
1620
1621impl<Blobs> CommitSelector<Blobs> for TimeRange
1622where
1623 Blobs: BlobStore<Blake3>,
1624{
1625 fn select(
1626 self,
1627 ws: &mut Workspace<Blobs>,
1628 ) -> Result<
1629 CommitSet,
1630 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1631 > {
1632 let Some(head_) = ws.head else {
1633 return Ok(CommitSet::new());
1634 };
1635 let start = self.0;
1636 let end = self.1;
1637 filter(
1638 ancestors(head_),
1639 move |meta: &TribleSet, _payload: &TribleSet| {
1640 if let Ok(Some((ts,))) =
1641 find!((t: Value<_>), pattern!(meta, [{ timestamp: ?t }])).at_most_one()
1642 {
1643 let (ts_start, ts_end): (Epoch, Epoch) =
1644 crate::value::FromValue::from_value(&ts);
1645 ts_start <= end && ts_end >= start
1646 } else {
1647 false
1648 }
1649 },
1650 )
1651 .select(ws)
1652 }
1653}
1654
1655impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
1656 pub fn branch_id(&self) -> Id {
1658 self.base_branch_id
1659 }
1660
1661 pub fn head(&self) -> Option<CommitHandle> {
1663 self.head
1664 }
1665
1666 pub fn set_default_metadata(&mut self, metadata_set: TribleSet) -> MetadataHandle {
1669 let handle = self
1670 .local_blobs
1671 .put(metadata_set)
1672 .expect("infallible metadata blob put");
1673 self.default_metadata = Some(handle);
1674 handle
1675 }
1676
1677 pub fn clear_default_metadata(&mut self) {
1679 self.default_metadata = None;
1680 }
1681
1682 pub fn default_metadata(&self) -> Option<MetadataHandle> {
1684 self.default_metadata
1685 }
1686
1687 pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
1690 where
1691 S: BlobSchema + 'static,
1692 T: ToBlob<S>,
1693 Handle<Blake3, S>: ValueSchema,
1694 {
1695 self.local_blobs.put(item).expect("infallible blob put")
1696 }
1697
1698 pub fn get<T, S>(
1703 &mut self,
1704 handle: Value<Handle<Blake3, S>>,
1705 ) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
1706 where
1707 S: BlobSchema + 'static,
1708 T: TryFromBlob<S>,
1709 Handle<Blake3, S>: ValueSchema,
1710 {
1711 self.local_blobs
1712 .reader()
1713 .unwrap()
1714 .get(handle)
1715 .or_else(|_| self.base_blobs.get(handle))
1716 }
1717
1718 pub fn commit(
1724 &mut self,
1725 content_: TribleSet,
1726 metadata_: Option<TribleSet>,
1727 message_: Option<&str>,
1728 ) {
1729 let metadata_handle = match metadata_ {
1730 Some(metadata_set) => Some(
1731 self.local_blobs
1732 .put(metadata_set)
1733 .expect("infallible metadata blob put"),
1734 ),
1735 None => self.default_metadata,
1736 };
1737 self.commit_internal(content_, metadata_handle, message_);
1738 }
1739
1740 fn commit_internal(
1741 &mut self,
1742 content_: TribleSet,
1743 metadata_handle: Option<MetadataHandle>,
1744 message_: Option<&str>,
1745 ) {
1746 let content_blob = content_.to_blob();
1748 let message_handle = message_.map(|m| self.put::<LongString, String>(m.to_string()));
1750 let parents = self.head.iter().copied();
1751
1752 let commit_set = crate::repo::commit::commit_metadata(
1753 &self.signing_key,
1754 parents,
1755 message_handle,
1756 Some(content_blob.clone()),
1757 metadata_handle,
1758 );
1759 let _ = self
1761 .local_blobs
1762 .put(content_blob)
1763 .expect("failed to put content blob");
1764 let commit_handle = self
1765 .local_blobs
1766 .put(commit_set)
1767 .expect("failed to put commit blob");
1768 self.head = Some(commit_handle);
1770 }
1771
1772 pub fn merge(&mut self, other: &mut Workspace<Blobs>) -> Result<CommitHandle, MergeError> {
1786 let other_local = other.local_blobs.reader().unwrap();
1788 for r in other_local.blobs() {
1789 let handle = r.expect("infallible blob enumeration");
1790 let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
1791
1792 self.local_blobs.put(blob).expect("infallible blob put");
1794 }
1795 let parents = self.head.iter().copied().chain(other.head.iter().copied());
1797 let merge_commit = commit_metadata(
1798 &self.signing_key,
1799 parents,
1800 None, None, None, );
1804 let commit_handle = self
1806 .local_blobs
1807 .put(merge_commit)
1808 .expect("failed to put merge commit blob");
1809 self.head = Some(commit_handle);
1810
1811 Ok(commit_handle)
1812 }
1813
1814 pub fn merge_commit(
1820 &mut self,
1821 other: Value<Handle<Blake3, SimpleArchive>>,
1822 ) -> Result<CommitHandle, MergeError> {
1823 let parents = self.head.iter().copied().chain(Some(other));
1830 let merge_commit = commit_metadata(&self.signing_key, parents, None, None, None);
1831 let commit_handle = self
1832 .local_blobs
1833 .put(merge_commit)
1834 .expect("failed to put merge commit blob");
1835 self.head = Some(commit_handle);
1836 Ok(commit_handle)
1837 }
1838
1839 fn checkout_commits<I>(
1846 &mut self,
1847 commits: I,
1848 ) -> Result<
1849 TribleSet,
1850 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1851 >
1852 where
1853 I: IntoIterator<Item = CommitHandle>,
1854 {
1855 let local = self.local_blobs.reader().unwrap();
1856 let mut result = TribleSet::new();
1857 for commit in commits {
1858 let meta: TribleSet = local
1859 .get(commit)
1860 .or_else(|_| self.base_blobs.get(commit))
1861 .map_err(WorkspaceCheckoutError::Storage)?;
1862
1863 let content_opt =
1868 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1869 Ok(Some((c,))) => Some(c),
1870 Ok(None) => None,
1871 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1872 };
1873
1874 if let Some(c) = content_opt {
1875 let set: TribleSet = local
1876 .get(c)
1877 .or_else(|_| self.base_blobs.get(c))
1878 .map_err(WorkspaceCheckoutError::Storage)?;
1879 result.union(set);
1880 } else {
1881 continue;
1883 }
1884 }
1885 Ok(result)
1886 }
1887
1888 fn checkout_commits_metadata<I>(
1889 &mut self,
1890 commits: I,
1891 ) -> Result<
1892 TribleSet,
1893 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1894 >
1895 where
1896 I: IntoIterator<Item = CommitHandle>,
1897 {
1898 let local = self.local_blobs.reader().unwrap();
1899 let mut result = TribleSet::new();
1900 for commit in commits {
1901 let meta: TribleSet = local
1902 .get(commit)
1903 .or_else(|_| self.base_blobs.get(commit))
1904 .map_err(WorkspaceCheckoutError::Storage)?;
1905
1906 let metadata_opt =
1907 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
1908 Ok(Some((c,))) => Some(c),
1909 Ok(None) => None,
1910 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1911 };
1912
1913 if let Some(c) = metadata_opt {
1914 let set: TribleSet = local
1915 .get(c)
1916 .or_else(|_| self.base_blobs.get(c))
1917 .map_err(WorkspaceCheckoutError::Storage)?;
1918 result.union(set);
1919 }
1920 }
1921 Ok(result)
1922 }
1923
1924 fn checkout_commits_with_metadata<I>(
1925 &mut self,
1926 commits: I,
1927 ) -> Result<
1928 (TribleSet, TribleSet),
1929 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1930 >
1931 where
1932 I: IntoIterator<Item = CommitHandle>,
1933 {
1934 let local = self.local_blobs.reader().unwrap();
1935 let mut data = TribleSet::new();
1936 let mut metadata_set = TribleSet::new();
1937 for commit in commits {
1938 let meta: TribleSet = local
1939 .get(commit)
1940 .or_else(|_| self.base_blobs.get(commit))
1941 .map_err(WorkspaceCheckoutError::Storage)?;
1942
1943 let content_opt =
1944 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1945 Ok(Some((c,))) => Some(c),
1946 Ok(None) => None,
1947 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1948 };
1949
1950 if let Some(c) = content_opt {
1951 let set: TribleSet = local
1952 .get(c)
1953 .or_else(|_| self.base_blobs.get(c))
1954 .map_err(WorkspaceCheckoutError::Storage)?;
1955 data.union(set);
1956 }
1957
1958 let metadata_opt =
1959 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
1960 Ok(Some((c,))) => Some(c),
1961 Ok(None) => None,
1962 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1963 };
1964
1965 if let Some(c) = metadata_opt {
1966 let set: TribleSet = local
1967 .get(c)
1968 .or_else(|_| self.base_blobs.get(c))
1969 .map_err(WorkspaceCheckoutError::Storage)?;
1970 metadata_set.union(set);
1971 }
1972 }
1973 Ok((data, metadata_set))
1974 }
1975
1976 pub fn checkout<R>(
1980 &mut self,
1981 spec: R,
1982 ) -> Result<
1983 TribleSet,
1984 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1985 >
1986 where
1987 R: CommitSelector<Blobs>,
1988 {
1989 let patch = spec.select(self)?;
1990 let commits = patch.iter().map(|raw| Value::new(*raw));
1991 self.checkout_commits(commits)
1992 }
1993
1994 pub fn checkout_metadata<R>(
1997 &mut self,
1998 spec: R,
1999 ) -> Result<
2000 TribleSet,
2001 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2002 >
2003 where
2004 R: CommitSelector<Blobs>,
2005 {
2006 let patch = spec.select(self)?;
2007 let commits = patch.iter().map(|raw| Value::new(*raw));
2008 self.checkout_commits_metadata(commits)
2009 }
2010
2011 pub fn checkout_with_metadata<R>(
2014 &mut self,
2015 spec: R,
2016 ) -> Result<
2017 (TribleSet, TribleSet),
2018 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2019 >
2020 where
2021 R: CommitSelector<Blobs>,
2022 {
2023 let patch = spec.select(self)?;
2024 let commits = patch.iter().map(|raw| Value::new(*raw));
2025 self.checkout_commits_with_metadata(commits)
2026 }
2027}
2028
2029#[derive(Debug)]
2030pub enum WorkspaceCheckoutError<GetErr: Error> {
2031 Storage(GetErr),
2033 BadCommitMetadata(),
2035}
2036
2037impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
2038 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2039 match self {
2040 WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
2041 WorkspaceCheckoutError::BadCommitMetadata() => {
2042 write!(f, "commit metadata malformed")
2043 }
2044 }
2045 }
2046}
2047
2048impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
2049
2050fn collect_reachable<Blobs: BlobStore<Blake3>>(
2051 ws: &mut Workspace<Blobs>,
2052 from: CommitHandle,
2053) -> Result<
2054 CommitSet,
2055 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2056> {
2057 let mut visited = HashSet::new();
2058 let mut stack = vec![from];
2059 let mut result = CommitSet::new();
2060
2061 while let Some(commit) = stack.pop() {
2062 if !visited.insert(commit) {
2063 continue;
2064 }
2065 result.insert(&Entry::new(&commit.raw));
2066
2067 let meta: TribleSet = ws
2068 .local_blobs
2069 .reader()
2070 .unwrap()
2071 .get(commit)
2072 .or_else(|_| ws.base_blobs.get(commit))
2073 .map_err(WorkspaceCheckoutError::Storage)?;
2074
2075 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
2076 stack.push(p);
2077 }
2078 }
2079
2080 Ok(result)
2081}