1#![allow(clippy::type_complexity)]
8pub mod branch;
114pub mod commit;
116pub mod hybridstore;
118pub mod memoryrepo;
120#[cfg(feature = "object-store")]
121pub mod objectstore;
123pub mod pile;
125
126pub trait StorageClose {
132 type Error: std::error::Error;
134
135 fn close(self) -> Result<(), Self::Error>;
137}
138
139impl<Storage> Repository<Storage>
141where
142 Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
143{
144 pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
151 self.storage.close()
152 }
153}
154
155use crate::macros::pattern;
156use std::collections::{HashSet, VecDeque};
157use std::convert::Infallible;
158use std::error::Error;
159use std::fmt::Debug;
160use std::fmt::{self};
161
162use commit::commit_metadata;
163use hifitime::Epoch;
164use itertools::Itertools;
165
166use crate::blob::schemas::simplearchive::UnarchiveError;
167use crate::blob::schemas::UnknownBlob;
168use crate::blob::Blob;
169use crate::blob::BlobSchema;
170use crate::blob::MemoryBlobStore;
171use crate::blob::ToBlob;
172use crate::blob::TryFromBlob;
173use crate::find;
174use crate::id::genid;
175use crate::id::Id;
176use crate::patch::Entry;
177use crate::patch::IdentitySchema;
178use crate::patch::PATCH;
179use crate::prelude::valueschemas::GenId;
180use crate::repo::branch::branch_metadata;
181use crate::trible::TribleSet;
182use crate::value::schemas::hash::Handle;
183use crate::value::schemas::hash::HashProtocol;
184use crate::value::Value;
185use crate::value::ValueSchema;
186use crate::value::VALUE_LEN;
187use ed25519_dalek::SigningKey;
188
189use crate::blob::schemas::longstring::LongString;
190use crate::blob::schemas::simplearchive::SimpleArchive;
191use crate::prelude::*;
192use crate::value::schemas::ed25519 as ed;
193use crate::value::schemas::hash::Blake3;
194use crate::value::schemas::shortstring::ShortString;
195use crate::value::schemas::time::NsTAIInterval;
196
197attributes! {
198 "4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
200 "88B59BD497540AC5AECDB7518E737C87" as pub metadata: Handle<Blake3, SimpleArchive>;
202 "317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
204 "B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
206 "12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
208 "272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
210 "8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
212 "71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
214 "ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
216 "9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
218 "1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
220}
221
222pub trait BlobStoreList<H: HashProtocol> {
224 type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
226 where
227 Self: 'a;
228 type Err: Error + Debug + Send + Sync + 'static;
230
231 fn blobs<'a>(&'a self) -> Self::Iter<'a>;
233}
234
235#[derive(Debug, Clone)]
237pub struct BlobMetadata {
238 pub timestamp: u64,
240 pub length: u64,
242}
243
244pub trait BlobStoreMeta<H: HashProtocol> {
246 type MetaError: std::error::Error + Send + Sync + 'static;
248
249 fn metadata<S>(
252 &self,
253 handle: Value<Handle<H, S>>,
254 ) -> Result<Option<BlobMetadata>, Self::MetaError>
255 where
256 S: BlobSchema + 'static,
257 Handle<H, S>: ValueSchema;
258}
259
260pub trait BlobStoreForget<H: HashProtocol> {
265 type ForgetError: std::error::Error + Send + Sync + 'static;
267
268 fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
270 where
271 S: BlobSchema + 'static,
272 Handle<H, S>: ValueSchema;
273}
274
275pub trait BlobStoreGet<H: HashProtocol> {
277 type GetError<E: std::error::Error>: Error;
279
280 fn get<T, S>(
289 &self,
290 handle: Value<Handle<H, S>>,
291 ) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
292 where
293 S: BlobSchema + 'static,
294 T: TryFromBlob<S>,
295 Handle<H, S>: ValueSchema;
296}
297
298pub trait BlobStorePut<H: HashProtocol> {
300 type PutError: Error + Debug + Send + Sync + 'static;
302
303 fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
305 where
306 S: BlobSchema + 'static,
307 T: ToBlob<S>,
308 Handle<H, S>: ValueSchema;
309}
310
311pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
316 type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
318 type ReaderError: Error + Debug + Send + Sync + 'static;
320 fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
322}
323
324pub trait BlobStoreKeep<H: HashProtocol> {
326 fn keep<I>(&mut self, handles: I)
328 where
329 I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
330}
331
332#[derive(Debug)]
334pub enum PushResult<H>
335where
336 H: HashProtocol,
337{
338 Success(),
340 Conflict(Option<Value<Handle<H, SimpleArchive>>>),
343}
344
345pub trait BranchStore<H: HashProtocol> {
352 type BranchesError: Error + Debug + Send + Sync + 'static;
354 type HeadError: Error + Debug + Send + Sync + 'static;
356 type UpdateError: Error + Debug + Send + Sync + 'static;
358
359 type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
361 where
362 Self: 'a;
363
364 fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
367
368 fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
384
385 fn update(
396 &mut self,
397 id: Id,
398 old: Option<Value<Handle<H, SimpleArchive>>>,
399 new: Option<Value<Handle<H, SimpleArchive>>>,
400 ) -> Result<PushResult<H>, Self::UpdateError>;
401}
402
403#[derive(Debug)]
405pub enum TransferError<ListErr, LoadErr, StoreErr> {
406 List(ListErr),
408 Load(LoadErr),
410 Store(StoreErr),
412}
413
414impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
415 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
416 write!(f, "failed to transfer blob")
417 }
418}
419
420impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
421where
422 ListErr: Debug + Error + 'static,
423 LoadErr: Debug + Error + 'static,
424 StoreErr: Debug + Error + 'static,
425{
426 fn source(&self) -> Option<&(dyn Error + 'static)> {
427 match self {
428 Self::List(e) => Some(e),
429 Self::Load(e) => Some(e),
430 Self::Store(e) => Some(e),
431 }
432 }
433}
434
435pub fn transfer<'a, BS, BT, HS, HT, Handles>(
437 source: &'a BS,
438 target: &'a mut BT,
439 handles: Handles,
440) -> impl Iterator<
441 Item = Result<
442 (
443 Value<Handle<HS, UnknownBlob>>,
444 Value<Handle<HT, UnknownBlob>>,
445 ),
446 TransferError<
447 Infallible,
448 <BS as BlobStoreGet<HS>>::GetError<Infallible>,
449 <BT as BlobStorePut<HT>>::PutError,
450 >,
451 >,
452> + 'a
453where
454 BS: BlobStoreGet<HS> + 'a,
455 BT: BlobStorePut<HT> + 'a,
456 HS: 'static + HashProtocol,
457 HT: 'static + HashProtocol,
458 Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
459 Handles::IntoIter: 'a,
460{
461 handles.into_iter().map(move |source_handle| {
462 let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
463
464 Ok((
465 source_handle,
466 (target.put(blob).map_err(TransferError::Store)?),
467 ))
468 })
469}
470
471pub struct ReachableHandles<'a, BS, H>
473where
474 BS: BlobStoreGet<H>,
475 H: 'static + HashProtocol,
476{
477 source: &'a BS,
478 queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
479 visited: HashSet<[u8; VALUE_LEN]>,
480}
481
482impl<'a, BS, H> ReachableHandles<'a, BS, H>
483where
484 BS: BlobStoreGet<H>,
485 H: 'static + HashProtocol,
486{
487 fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
488 let mut queue = VecDeque::new();
489 for handle in roots {
490 queue.push_back(handle);
491 }
492
493 Self {
494 source,
495 queue,
496 visited: HashSet::new(),
497 }
498 }
499
500 fn enqueue_from_blob(&mut self, blob: &Blob<UnknownBlob>) {
501 let bytes = blob.bytes.as_ref();
502 let mut offset = 0usize;
503
504 while offset + VALUE_LEN <= bytes.len() {
505 let mut raw = [0u8; VALUE_LEN];
506 raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
507
508 if !self.visited.contains(&raw) {
509 let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
510 if self
511 .source
512 .get::<anybytes::Bytes, UnknownBlob>(candidate)
513 .is_ok()
514 {
515 self.queue.push_back(candidate);
516 }
517 }
518
519 offset += VALUE_LEN;
520 }
521 }
522}
523
524impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
525where
526 BS: BlobStoreGet<H>,
527 H: 'static + HashProtocol,
528{
529 type Item = Value<Handle<H, UnknownBlob>>;
530
531 fn next(&mut self) -> Option<Self::Item> {
532 while let Some(handle) = self.queue.pop_front() {
533 let raw = handle.raw;
534
535 if !self.visited.insert(raw) {
536 continue;
537 }
538
539 if let Ok(blob) = self.source.get(handle) {
540 self.enqueue_from_blob(&blob);
541 }
542
543 return Some(handle);
544 }
545
546 None
547 }
548}
549
550pub fn reachable<'a, BS, H>(
552 source: &'a BS,
553 roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
554) -> ReachableHandles<'a, BS, H>
555where
556 BS: BlobStoreGet<H>,
557 H: 'static + HashProtocol,
558{
559 ReachableHandles::new(source, roots)
560}
561
562pub fn potential_handles<'a, H>(
569 set: &'a TribleSet,
570) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
571where
572 H: HashProtocol,
573{
574 set.vae.iter().map(|raw| {
575 let mut value = [0u8; VALUE_LEN];
576 value.copy_from_slice(&raw[0..VALUE_LEN]);
577 Value::<Handle<H, UnknownBlob>>::new(value)
578 })
579}
580
581#[derive(Debug)]
584pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
585 ContentStorageError(BlobErr),
587 CommitStorageError(BlobErr),
589}
590
591impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
592 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
593 match self {
594 CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
595 CreateCommitError::CommitStorageError(e) => {
596 write!(f, "Commit metadata storage failed: {e}")
597 }
598 }
599 }
600}
601
602impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
603 fn source(&self) -> Option<&(dyn Error + 'static)> {
604 match self {
605 CreateCommitError::ContentStorageError(e) => Some(e),
606 CreateCommitError::CommitStorageError(e) => Some(e),
607 }
608 }
609}
610
611#[derive(Debug)]
613pub enum MergeError {
614 DifferentRepos(),
616}
617
618#[derive(Debug)]
620pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
621 StorageBranches(Storage::BranchesError),
623 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
625 StorageGet(
627 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
628 ),
629 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
631 BranchUpdate(Storage::UpdateError),
633 BadBranchMetadata(),
635 MergeError(MergeError),
637}
638
639impl<Storage> From<MergeError> for PushError<Storage>
644where
645 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
646{
647 fn from(e: MergeError) -> Self {
648 PushError::MergeError(e)
649 }
650}
651
652#[derive(Debug)]
660pub enum BranchError<Storage>
661where
662 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
663{
664 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
666 StorageGet(
668 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
669 ),
670 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
672 BranchHead(Storage::HeadError),
674 BranchUpdate(Storage::UpdateError),
676 AlreadyExists(),
678 BranchNotFound(Id),
680}
681
682#[derive(Debug)]
684pub enum LookupError<Storage>
685where
686 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
687{
688 StorageBranches(Storage::BranchesError),
690 BranchHead(Storage::HeadError),
692 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
694 StorageGet(
696 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
697 ),
698 NameConflict(Vec<Id>),
700 BadBranchMetadata(),
702}
703
704#[derive(Debug)]
706pub enum EnsureBranchError<Storage>
707where
708 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
709{
710 Lookup(LookupError<Storage>),
712 Create(BranchError<Storage>),
714}
715
716pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
723 storage: Storage,
724 signing_key: SigningKey,
725 commit_metadata: MetadataHandle,
726}
727
728pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
730where
731 BranchStorageErr: Error,
732 BlobReaderErr: Error,
733 BlobStorageErr: Error,
734{
735 BranchNotFound(Id),
737 BranchStorage(BranchStorageErr),
739 BlobReader(BlobReaderErr),
741 BlobStorage(BlobStorageErr),
743 BadBranchMetadata(),
745}
746
747impl<B, R, C> fmt::Debug for PullError<B, R, C>
748where
749 B: Error + fmt::Debug,
750 R: Error + fmt::Debug,
751 C: Error + fmt::Debug,
752{
753 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
754 match self {
755 PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
756 PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
757 PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
758 PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
759 PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
760 }
761 }
762}
763
764impl<Storage> Repository<Storage>
765where
766 Storage: BlobStore<Blake3> + BranchStore<Blake3>,
767{
768 pub fn new(
773 mut storage: Storage,
774 signing_key: SigningKey,
775 commit_metadata: TribleSet,
776 ) -> Result<Self, <Storage as BlobStorePut<Blake3>>::PutError> {
777 let commit_metadata = storage.put(commit_metadata)?;
778 Ok(Self {
779 storage,
780 signing_key,
781 commit_metadata,
782 })
783 }
784
785 pub fn into_storage(self) -> Storage {
791 self.storage
792 }
793
794 pub fn storage(&self) -> &Storage {
796 &self.storage
797 }
798
799 pub fn storage_mut(&mut self) -> &mut Storage {
801 &mut self.storage
802 }
803
804 pub fn set_signing_key(&mut self, signing_key: SigningKey) {
806 self.signing_key = signing_key;
807 }
808
809 pub fn commit_metadata(&self) -> MetadataHandle {
811 self.commit_metadata
812 }
813
814 pub fn create_branch(
828 &mut self,
829 branch_name: &str,
830 commit: Option<CommitHandle>,
831 ) -> Result<ExclusiveId, BranchError<Storage>> {
832 self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
833 }
834
835 pub fn create_branch_with_key(
837 &mut self,
838 branch_name: &str,
839 commit: Option<CommitHandle>,
840 signing_key: SigningKey,
841 ) -> Result<ExclusiveId, BranchError<Storage>> {
842 let branch_id = genid();
843 let name_blob = branch_name.to_owned().to_blob();
844 let name_handle = name_blob.get_handle::<Blake3>();
845 self.storage
846 .put(name_blob)
847 .map_err(|e| BranchError::StoragePut(e))?;
848
849 let branch_set = if let Some(commit) = commit {
850 let reader = self
851 .storage
852 .reader()
853 .map_err(|e| BranchError::StorageReader(e))?;
854 let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
855
856 branch::branch_metadata(&signing_key, *branch_id, name_handle, Some(set.to_blob()))
857 } else {
858 branch::branch_unsigned(*branch_id, name_handle, None)
859 };
860
861 let branch_blob = branch_set.to_blob();
862 let branch_handle = self
863 .storage
864 .put(branch_blob)
865 .map_err(|e| BranchError::StoragePut(e))?;
866 let push_result = self
867 .storage
868 .update(*branch_id, None, Some(branch_handle))
869 .map_err(|e| BranchError::BranchUpdate(e))?;
870
871 match push_result {
872 PushResult::Success() => Ok(branch_id),
873 PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
874 }
875 }
876
877 pub fn lookup_branch(
883 &mut self,
884 name: &str,
885 ) -> Result<Option<Id>, LookupError<Storage>> {
886 let branch_ids: Vec<Id> = self
887 .storage
888 .branches()
889 .map_err(LookupError::StorageBranches)?
890 .collect::<Result<Vec<_>, _>>()
891 .map_err(LookupError::StorageBranches)?;
892
893 let mut matches = Vec::new();
894
895 for branch_id in branch_ids {
896 let Some(meta_handle) = self
897 .storage
898 .head(branch_id)
899 .map_err(LookupError::BranchHead)?
900 else {
901 continue;
902 };
903
904 let reader = self
905 .storage
906 .reader()
907 .map_err(LookupError::StorageReader)?;
908 let meta_set: TribleSet =
909 reader.get(meta_handle).map_err(LookupError::StorageGet)?;
910
911 let Ok((name_handle,)) = find!(
912 (n: Value<Handle<Blake3, LongString>>),
913 pattern!(&meta_set, [{ crate::metadata::name: ?n }])
914 )
915 .exactly_one()
916 else {
917 continue;
918 };
919
920 let Ok(branch_name): Result<anybytes::View<str>, _> = reader.get(name_handle) else {
921 continue;
922 };
923
924 if branch_name.as_ref() == name {
925 matches.push(branch_id);
926 }
927 }
928
929 match matches.len() {
930 0 => Ok(None),
931 1 => Ok(Some(matches[0])),
932 _ => Err(LookupError::NameConflict(matches)),
933 }
934 }
935
936 pub fn ensure_branch(
944 &mut self,
945 name: &str,
946 commit: Option<CommitHandle>,
947 ) -> Result<Id, EnsureBranchError<Storage>> {
948 match self
949 .lookup_branch(name)
950 .map_err(EnsureBranchError::Lookup)?
951 {
952 Some(id) => Ok(id),
953 None => {
954 let id = self
955 .create_branch(name, commit)
956 .map_err(EnsureBranchError::Create)?;
957 Ok(*id)
958 }
959 }
960 }
961
962 pub fn pull(
965 &mut self,
966 branch_id: Id,
967 ) -> Result<
968 Workspace<Storage>,
969 PullError<
970 Storage::HeadError,
971 Storage::ReaderError,
972 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
973 >,
974 > {
975 self.pull_with_key(branch_id, self.signing_key.clone())
976 }
977
978 pub fn pull_with_key(
980 &mut self,
981 branch_id: Id,
982 signing_key: SigningKey,
983 ) -> Result<
984 Workspace<Storage>,
985 PullError<
986 Storage::HeadError,
987 Storage::ReaderError,
988 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
989 >,
990 > {
991 let base_branch_meta_handle = match self.storage.head(branch_id) {
993 Ok(Some(handle)) => handle,
994 Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
995 Err(e) => return Err(PullError::BranchStorage(e)),
996 };
997 let reader = self.storage.reader().map_err(PullError::BlobReader)?;
999 let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
1000 Ok(meta_set) => meta_set,
1001 Err(e) => return Err(PullError::BlobStorage(e)),
1002 };
1003
1004 let head_ = match find!(
1005 (head_: Value<_>),
1006 pattern!(&base_branch_meta, [{ head: ?head_ }])
1007 )
1008 .at_most_one()
1009 {
1010 Ok(Some((h,))) => Some(h),
1011 Ok(None) => None,
1012 Err(_) => return Err(PullError::BadBranchMetadata()),
1013 };
1014 let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
1016 Ok(Workspace {
1017 base_blobs,
1018 local_blobs: MemoryBlobStore::new(),
1019 head: head_,
1020 base_head: head_,
1021 base_branch_id: branch_id,
1022 base_branch_meta: base_branch_meta_handle,
1023 signing_key,
1024 commit_metadata: self.commit_metadata,
1025 })
1026 }
1027
1028 pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
1032 while let Some(mut conflict_ws) = self.try_push(workspace)? {
1037 conflict_ws.merge(workspace)?;
1041
1042 *workspace = conflict_ws;
1047 }
1048
1049 Ok(())
1050 }
1051
1052 pub fn try_push(
1056 &mut self,
1057 workspace: &mut Workspace<Storage>,
1058 ) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
1059 let workspace_reader = workspace.local_blobs.reader().unwrap();
1061 for handle in workspace_reader.blobs() {
1062 let handle = handle.expect("infallible blob enumeration");
1063 let blob: Blob<UnknownBlob> =
1064 workspace_reader.get(handle).expect("infallible blob read");
1065 self.storage.put(blob).map_err(PushError::StoragePut)?;
1066 }
1067
1068 if workspace.base_head == workspace.head {
1073 return Ok(None);
1074 }
1075
1076 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1078 let base_branch_meta: TribleSet = repo_reader
1079 .get(workspace.base_branch_meta)
1080 .map_err(PushError::StorageGet)?;
1081
1082 let Ok((branch_name,)) = find!(
1083 (name: Value<Handle<Blake3, LongString>>),
1084 pattern!(base_branch_meta, [{ crate::metadata::name: ?name }])
1085 )
1086 .exactly_one() else {
1087 return Err(PushError::BadBranchMetadata());
1088 };
1089
1090 let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
1091 let head_: TribleSet = repo_reader
1092 .get(head_handle)
1093 .map_err(PushError::StorageGet)?;
1094
1095 let branch_meta = branch_metadata(
1096 &workspace.signing_key,
1097 workspace.base_branch_id,
1098 branch_name,
1099 Some(head_.to_blob()),
1100 );
1101
1102 let branch_meta_handle = self
1103 .storage
1104 .put(branch_meta)
1105 .map_err(PushError::StoragePut)?;
1106
1107 let result = self
1109 .storage
1110 .update(
1111 workspace.base_branch_id,
1112 Some(workspace.base_branch_meta),
1113 Some(branch_meta_handle),
1114 )
1115 .map_err(PushError::BranchUpdate)?;
1116
1117 match result {
1118 PushResult::Success() => {
1119 workspace.base_branch_meta = branch_meta_handle;
1122 workspace.base_head = workspace.head;
1123 workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
1126 workspace.local_blobs = MemoryBlobStore::new();
1130 Ok(None)
1131 }
1132 PushResult::Conflict(conflicting_meta) => {
1133 let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
1134
1135 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1136 let branch_meta: TribleSet = repo_reader
1137 .get(conflicting_meta)
1138 .map_err(PushError::StorageGet)?;
1139
1140 let head_ = match find!((head_: Value<_>),
1141 pattern!(&branch_meta, [{ head: ?head_ }])
1142 )
1143 .at_most_one()
1144 {
1145 Ok(Some((h,))) => Some(h),
1146 Ok(None) => None,
1147 Err(_) => return Err(PushError::BadBranchMetadata()),
1148 };
1149
1150 let conflict_ws = Workspace {
1151 base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
1152 local_blobs: MemoryBlobStore::new(),
1153 head: head_,
1154 base_head: head_,
1155 base_branch_id: workspace.base_branch_id,
1156 base_branch_meta: conflicting_meta,
1157 signing_key: workspace.signing_key.clone(),
1158 commit_metadata: workspace.commit_metadata,
1159 };
1160
1161 Ok(Some(conflict_ws))
1162 }
1163 }
1164 }
1165}
1166
1167pub type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
1169type MetadataHandle = Value<Handle<Blake3, SimpleArchive>>;
1170pub type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
1172type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
1173
1174#[derive(Debug, Clone)]
1199pub struct Checkout {
1200 facts: TribleSet,
1201 commits: CommitSet,
1202}
1203
1204impl PartialEq<TribleSet> for Checkout {
1205 fn eq(&self, other: &TribleSet) -> bool {
1206 self.facts == *other
1207 }
1208}
1209
1210impl PartialEq<Checkout> for TribleSet {
1211 fn eq(&self, other: &Checkout) -> bool {
1212 *self == other.facts
1213 }
1214}
1215
1216impl Checkout {
1217 pub fn facts(&self) -> &TribleSet {
1219 &self.facts
1220 }
1221
1222 pub fn commits(&self) -> CommitSet {
1226 self.commits.clone()
1227 }
1228
1229 pub fn into_facts(self) -> TribleSet {
1231 self.facts
1232 }
1233}
1234
1235impl std::ops::Deref for Checkout {
1236 type Target = TribleSet;
1237 fn deref(&self) -> &TribleSet {
1238 &self.facts
1239 }
1240}
1241
1242impl std::ops::AddAssign<&Checkout> for Checkout {
1243 fn add_assign(&mut self, rhs: &Checkout) {
1244 self.facts += rhs.facts.clone();
1245 self.commits.union(rhs.commits.clone());
1246 }
1247}
1248
1249pub struct Workspace<Blobs: BlobStore<Blake3>> {
1253 local_blobs: MemoryBlobStore<Blake3>,
1255 base_blobs: Blobs::Reader,
1257 base_branch_id: Id,
1259 base_branch_meta: BranchMetaHandle,
1261 head: Option<CommitHandle>,
1263 base_head: Option<CommitHandle>,
1269 signing_key: SigningKey,
1271 commit_metadata: MetadataHandle,
1273}
1274
1275impl<Blobs> fmt::Debug for Workspace<Blobs>
1276where
1277 Blobs: BlobStore<Blake3>,
1278 Blobs::Reader: fmt::Debug,
1279{
1280 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1281 f.debug_struct("Workspace")
1282 .field("local_blobs", &self.local_blobs)
1283 .field("base_blobs", &self.base_blobs)
1284 .field("base_branch_id", &self.base_branch_id)
1285 .field("base_branch_meta", &self.base_branch_meta)
1286 .field("base_head", &self.base_head)
1287 .field("head", &self.head)
1288 .field("commit_metadata", &self.commit_metadata)
1289 .finish()
1290 }
1291}
1292
1293pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
1295 fn select(
1296 self,
1297 ws: &mut Workspace<Blobs>,
1298 ) -> Result<
1299 CommitSet,
1300 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1301 >;
1302}
1303
1304pub struct Ancestors(pub CommitHandle);
1306
1307pub fn ancestors(commit: CommitHandle) -> Ancestors {
1309 Ancestors(commit)
1310}
1311
1312pub struct NthAncestor(pub CommitHandle, pub usize);
1314
1315pub fn nth_ancestor(commit: CommitHandle, n: usize) -> NthAncestor {
1317 NthAncestor(commit, n)
1318}
1319
1320pub struct Parents(pub CommitHandle);
1322
1323pub fn parents(commit: CommitHandle) -> Parents {
1325 Parents(commit)
1326}
1327
1328pub struct SymmetricDiff(pub CommitHandle, pub CommitHandle);
1331
1332pub fn symmetric_diff(a: CommitHandle, b: CommitHandle) -> SymmetricDiff {
1334 SymmetricDiff(a, b)
1335}
1336
1337pub struct Union<A, B> {
1339 left: A,
1340 right: B,
1341}
1342
1343pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
1345 Union { left, right }
1346}
1347
1348pub struct Intersect<A, B> {
1350 left: A,
1351 right: B,
1352}
1353
1354pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
1356 Intersect { left, right }
1357}
1358
1359pub struct Difference<A, B> {
1362 left: A,
1363 right: B,
1364}
1365
1366pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
1368 Difference { left, right }
1369}
1370
1371pub struct TimeRange(pub Epoch, pub Epoch);
1373
1374pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
1376 TimeRange(start, end)
1377}
1378
1379pub struct Filter<S, F> {
1381 selector: S,
1382 filter: F,
1383}
1384
1385pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
1387 Filter { selector, filter }
1388}
1389
1390impl<Blobs> CommitSelector<Blobs> for CommitHandle
1391where
1392 Blobs: BlobStore<Blake3>,
1393{
1394 fn select(
1395 self,
1396 _ws: &mut Workspace<Blobs>,
1397 ) -> Result<
1398 CommitSet,
1399 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1400 > {
1401 let mut patch = CommitSet::new();
1402 patch.insert(&Entry::new(&self.raw));
1403 Ok(patch)
1404 }
1405}
1406
1407impl<Blobs> CommitSelector<Blobs> for CommitSet
1408where
1409 Blobs: BlobStore<Blake3>,
1410{
1411 fn select(
1412 self,
1413 _ws: &mut Workspace<Blobs>,
1414 ) -> Result<
1415 CommitSet,
1416 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1417 > {
1418 Ok(self)
1419 }
1420}
1421
1422impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
1423where
1424 Blobs: BlobStore<Blake3>,
1425{
1426 fn select(
1427 self,
1428 _ws: &mut Workspace<Blobs>,
1429 ) -> Result<
1430 CommitSet,
1431 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1432 > {
1433 let mut patch = CommitSet::new();
1434 for handle in self {
1435 patch.insert(&Entry::new(&handle.raw));
1436 }
1437 Ok(patch)
1438 }
1439}
1440
1441impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
1442where
1443 Blobs: BlobStore<Blake3>,
1444{
1445 fn select(
1446 self,
1447 _ws: &mut Workspace<Blobs>,
1448 ) -> Result<
1449 CommitSet,
1450 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1451 > {
1452 let mut patch = CommitSet::new();
1453 for handle in self {
1454 patch.insert(&Entry::new(&handle.raw));
1455 }
1456 Ok(patch)
1457 }
1458}
1459
1460impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
1461where
1462 Blobs: BlobStore<Blake3>,
1463{
1464 fn select(
1465 self,
1466 _ws: &mut Workspace<Blobs>,
1467 ) -> Result<
1468 CommitSet,
1469 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1470 > {
1471 let mut patch = CommitSet::new();
1472 if let Some(handle) = self {
1473 patch.insert(&Entry::new(&handle.raw));
1474 }
1475 Ok(patch)
1476 }
1477}
1478
1479impl<Blobs> CommitSelector<Blobs> for Ancestors
1480where
1481 Blobs: BlobStore<Blake3>,
1482{
1483 fn select(
1484 self,
1485 ws: &mut Workspace<Blobs>,
1486 ) -> Result<
1487 CommitSet,
1488 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1489 > {
1490 collect_reachable(ws, self.0)
1491 }
1492}
1493
1494impl<Blobs> CommitSelector<Blobs> for NthAncestor
1495where
1496 Blobs: BlobStore<Blake3>,
1497{
1498 fn select(
1499 self,
1500 ws: &mut Workspace<Blobs>,
1501 ) -> Result<
1502 CommitSet,
1503 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1504 > {
1505 let mut current = self.0;
1506 let mut remaining = self.1;
1507
1508 while remaining > 0 {
1509 let meta: TribleSet = ws.get(current).map_err(WorkspaceCheckoutError::Storage)?;
1510 let mut parents = find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }]));
1511 let Some((p,)) = parents.next() else {
1512 return Ok(CommitSet::new());
1513 };
1514 current = p;
1515 remaining -= 1;
1516 }
1517
1518 let mut patch = CommitSet::new();
1519 patch.insert(&Entry::new(¤t.raw));
1520 Ok(patch)
1521 }
1522}
1523
1524impl<Blobs> CommitSelector<Blobs> for Parents
1525where
1526 Blobs: BlobStore<Blake3>,
1527{
1528 fn select(
1529 self,
1530 ws: &mut Workspace<Blobs>,
1531 ) -> Result<
1532 CommitSet,
1533 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1534 > {
1535 let meta: TribleSet = ws.get(self.0).map_err(WorkspaceCheckoutError::Storage)?;
1536 let mut result = CommitSet::new();
1537 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1538 result.insert(&Entry::new(&p.raw));
1539 }
1540 Ok(result)
1541 }
1542}
1543
1544impl<Blobs> CommitSelector<Blobs> for SymmetricDiff
1545where
1546 Blobs: BlobStore<Blake3>,
1547{
1548 fn select(
1549 self,
1550 ws: &mut Workspace<Blobs>,
1551 ) -> Result<
1552 CommitSet,
1553 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1554 > {
1555 let a = collect_reachable(ws, self.0)?;
1556 let b = collect_reachable(ws, self.1)?;
1557 let inter = a.intersect(&b);
1558 let mut union = a;
1559 union.union(b);
1560 Ok(union.difference(&inter))
1561 }
1562}
1563
1564impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
1565where
1566 A: CommitSelector<Blobs>,
1567 B: CommitSelector<Blobs>,
1568 Blobs: BlobStore<Blake3>,
1569{
1570 fn select(
1571 self,
1572 ws: &mut Workspace<Blobs>,
1573 ) -> Result<
1574 CommitSet,
1575 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1576 > {
1577 let mut left = self.left.select(ws)?;
1578 let right = self.right.select(ws)?;
1579 left.union(right);
1580 Ok(left)
1581 }
1582}
1583
1584impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
1585where
1586 A: CommitSelector<Blobs>,
1587 B: CommitSelector<Blobs>,
1588 Blobs: BlobStore<Blake3>,
1589{
1590 fn select(
1591 self,
1592 ws: &mut Workspace<Blobs>,
1593 ) -> Result<
1594 CommitSet,
1595 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1596 > {
1597 let left = self.left.select(ws)?;
1598 let right = self.right.select(ws)?;
1599 Ok(left.intersect(&right))
1600 }
1601}
1602
1603impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
1604where
1605 A: CommitSelector<Blobs>,
1606 B: CommitSelector<Blobs>,
1607 Blobs: BlobStore<Blake3>,
1608{
1609 fn select(
1610 self,
1611 ws: &mut Workspace<Blobs>,
1612 ) -> Result<
1613 CommitSet,
1614 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1615 > {
1616 let left = self.left.select(ws)?;
1617 let right = self.right.select(ws)?;
1618 Ok(left.difference(&right))
1619 }
1620}
1621
1622impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
1623where
1624 Blobs: BlobStore<Blake3>,
1625 S: CommitSelector<Blobs>,
1626 F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
1627{
1628 fn select(
1629 self,
1630 ws: &mut Workspace<Blobs>,
1631 ) -> Result<
1632 CommitSet,
1633 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1634 > {
1635 let patch = self.selector.select(ws)?;
1636 let mut result = CommitSet::new();
1637 let filter = self.filter;
1638 for raw in patch.iter() {
1639 let handle = Value::new(*raw);
1640 let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1641
1642 let Ok((content_handle,)) = find!(
1643 (c: Value<_>),
1644 pattern!(&meta, [{ content: ?c }])
1645 )
1646 .exactly_one() else {
1647 return Err(WorkspaceCheckoutError::BadCommitMetadata());
1648 };
1649
1650 let payload: TribleSet = ws
1651 .get(content_handle)
1652 .map_err(WorkspaceCheckoutError::Storage)?;
1653
1654 if filter(&meta, &payload) {
1655 result.insert(&Entry::new(raw));
1656 }
1657 }
1658 Ok(result)
1659 }
1660}
1661
1662pub struct HistoryOf(pub Id);
1664
1665pub fn history_of(entity: Id) -> HistoryOf {
1667 HistoryOf(entity)
1668}
1669
1670impl<Blobs> CommitSelector<Blobs> for HistoryOf
1671where
1672 Blobs: BlobStore<Blake3>,
1673{
1674 fn select(
1675 self,
1676 ws: &mut Workspace<Blobs>,
1677 ) -> Result<
1678 CommitSet,
1679 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1680 > {
1681 let Some(head_) = ws.head else {
1682 return Ok(CommitSet::new());
1683 };
1684 let entity = self.0;
1685 filter(
1686 ancestors(head_),
1687 move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
1688 )
1689 .select(ws)
1690 }
1691}
1692
1693fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
1701 ws: &mut Workspace<Blobs>,
1702 patch: CommitSet,
1703) -> Result<
1704 CommitSet,
1705 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1706> {
1707 let mut result = CommitSet::new();
1708 for raw in patch.iter() {
1709 let handle = Value::new(*raw);
1710 let reach = collect_reachable(ws, handle)?;
1711 result.union(reach);
1712 }
1713 Ok(result)
1714}
1715
1716fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
1717 ws: &mut Workspace<Blobs>,
1718 seeds: CommitSet,
1719 stop: &CommitSet,
1720) -> Result<
1721 CommitSet,
1722 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1723> {
1724 let mut visited = HashSet::new();
1725 let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
1726 let mut result = CommitSet::new();
1727
1728 while let Some(commit) = stack.pop() {
1729 if !visited.insert(commit) {
1730 continue;
1731 }
1732
1733 if stop.get(&commit.raw).is_some() {
1734 continue;
1735 }
1736
1737 result.insert(&Entry::new(&commit.raw));
1738
1739 let meta: TribleSet = ws
1740 .local_blobs
1741 .reader()
1742 .unwrap()
1743 .get(commit)
1744 .or_else(|_| ws.base_blobs.get(commit))
1745 .map_err(WorkspaceCheckoutError::Storage)?;
1746
1747 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1748 stack.push(p);
1749 }
1750 }
1751
1752 Ok(result)
1753}
1754
1755impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
1756where
1757 T: CommitSelector<Blobs>,
1758 Blobs: BlobStore<Blake3>,
1759{
1760 fn select(
1761 self,
1762 ws: &mut Workspace<Blobs>,
1763 ) -> Result<
1764 CommitSet,
1765 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1766 > {
1767 let end_patch = self.end.select(ws)?;
1768 let start_patch = self.start.select(ws)?;
1769
1770 collect_reachable_from_patch_until(ws, end_patch, &start_patch)
1771 }
1772}
1773
1774impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
1775where
1776 T: CommitSelector<Blobs>,
1777 Blobs: BlobStore<Blake3>,
1778{
1779 fn select(
1780 self,
1781 ws: &mut Workspace<Blobs>,
1782 ) -> Result<
1783 CommitSet,
1784 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1785 > {
1786 let Some(head_) = ws.head else {
1787 return Ok(CommitSet::new());
1788 };
1789 let exclude_patch = self.start.select(ws)?;
1790
1791 let mut head_patch = CommitSet::new();
1792 head_patch.insert(&Entry::new(&head_.raw));
1793
1794 collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
1795 }
1796}
1797
1798impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
1799where
1800 T: CommitSelector<Blobs>,
1801 Blobs: BlobStore<Blake3>,
1802{
1803 fn select(
1804 self,
1805 ws: &mut Workspace<Blobs>,
1806 ) -> Result<
1807 CommitSet,
1808 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1809 > {
1810 let end_patch = self.end.select(ws)?;
1811 collect_reachable_from_patch(ws, end_patch)
1812 }
1813}
1814
1815impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
1816where
1817 Blobs: BlobStore<Blake3>,
1818{
1819 fn select(
1820 self,
1821 ws: &mut Workspace<Blobs>,
1822 ) -> Result<
1823 CommitSet,
1824 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1825 > {
1826 let Some(head_) = ws.head else {
1827 return Ok(CommitSet::new());
1828 };
1829 collect_reachable(ws, head_)
1830 }
1831}
1832
1833impl<Blobs> CommitSelector<Blobs> for TimeRange
1834where
1835 Blobs: BlobStore<Blake3>,
1836{
1837 fn select(
1838 self,
1839 ws: &mut Workspace<Blobs>,
1840 ) -> Result<
1841 CommitSet,
1842 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1843 > {
1844 let Some(head_) = ws.head else {
1845 return Ok(CommitSet::new());
1846 };
1847 let start = self.0;
1848 let end = self.1;
1849 filter(
1850 ancestors(head_),
1851 move |meta: &TribleSet, _payload: &TribleSet| {
1852 if let Ok(Some(((ts_start, ts_end),))) =
1853 find!((t: (Epoch, Epoch)), pattern!(meta, [{ timestamp: ?t }])).at_most_one()
1854 {
1855 ts_start <= end && ts_end >= start
1856 } else {
1857 false
1858 }
1859 },
1860 )
1861 .select(ws)
1862 }
1863}
1864
1865impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
1866 pub fn branch_id(&self) -> Id {
1868 self.base_branch_id
1869 }
1870
1871 pub fn head(&self) -> Option<CommitHandle> {
1873 self.head
1874 }
1875
1876 pub fn metadata(&self) -> MetadataHandle {
1878 self.commit_metadata
1879 }
1880
1881 pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
1884 where
1885 S: BlobSchema + 'static,
1886 T: ToBlob<S>,
1887 Handle<Blake3, S>: ValueSchema,
1888 {
1889 self.local_blobs.put(item).expect("infallible blob put")
1890 }
1891
1892 pub fn get<T, S>(
1897 &mut self,
1898 handle: Value<Handle<Blake3, S>>,
1899 ) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
1900 where
1901 S: BlobSchema + 'static,
1902 T: TryFromBlob<S>,
1903 Handle<Blake3, S>: ValueSchema,
1904 {
1905 self.local_blobs
1906 .reader()
1907 .unwrap()
1908 .get(handle)
1909 .or_else(|_| self.base_blobs.get(handle))
1910 }
1911
1912 pub fn commit(
1916 &mut self,
1917 content_: impl Into<TribleSet>,
1918 message_: &str,
1919 ) {
1920 let content_ = content_.into();
1921 self.commit_internal(content_, Some(self.commit_metadata), Some(message_));
1922 }
1923
1924 pub fn commit_with_metadata(
1927 &mut self,
1928 content_: impl Into<TribleSet>,
1929 metadata_: MetadataHandle,
1930 message_: &str,
1931 ) {
1932 let content_ = content_.into();
1933 self.commit_internal(content_, Some(metadata_), Some(message_));
1934 }
1935
1936 fn commit_internal(
1937 &mut self,
1938 content_: TribleSet,
1939 metadata_handle: Option<MetadataHandle>,
1940 message_: Option<&str>,
1941 ) {
1942 let content_blob = content_.to_blob();
1944 let message_handle = message_.map(|m| self.put(m.to_string()));
1946 let parents = self.head.iter().copied();
1947
1948 let commit_set = crate::repo::commit::commit_metadata(
1949 &self.signing_key,
1950 parents,
1951 message_handle,
1952 Some(content_blob.clone()),
1953 metadata_handle,
1954 );
1955 let _ = self
1957 .local_blobs
1958 .put(content_blob)
1959 .expect("failed to put content blob");
1960 let commit_handle = self
1961 .local_blobs
1962 .put(commit_set)
1963 .expect("failed to put commit blob");
1964 self.head = Some(commit_handle);
1966 }
1967
1968 pub fn merge(&mut self, other: &mut Workspace<Blobs>) -> Result<CommitHandle, MergeError> {
1982 let other_local = other.local_blobs.reader().unwrap();
1984 for r in other_local.blobs() {
1985 let handle = r.expect("infallible blob enumeration");
1986 let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
1987
1988 self.local_blobs.put(blob).expect("infallible blob put");
1990 }
1991 let parents = self.head.iter().copied().chain(other.head.iter().copied());
1993 let merge_commit = commit_metadata(
1994 &self.signing_key,
1995 parents,
1996 None, None, None, );
2000 let commit_handle = self
2002 .local_blobs
2003 .put(merge_commit)
2004 .expect("failed to put merge commit blob");
2005 self.head = Some(commit_handle);
2006
2007 Ok(commit_handle)
2008 }
2009
2010 pub fn merge_commit(
2016 &mut self,
2017 other: Value<Handle<Blake3, SimpleArchive>>,
2018 ) -> Result<CommitHandle, MergeError> {
2019 let parents = self.head.iter().copied().chain(Some(other));
2026 let merge_commit = commit_metadata(&self.signing_key, parents, None, None, None);
2027 let commit_handle = self
2028 .local_blobs
2029 .put(merge_commit)
2030 .expect("failed to put merge commit blob");
2031 self.head = Some(commit_handle);
2032 Ok(commit_handle)
2033 }
2034
2035 fn checkout_commits<I>(
2042 &mut self,
2043 commits: I,
2044 ) -> Result<
2045 TribleSet,
2046 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2047 >
2048 where
2049 I: IntoIterator<Item = CommitHandle>,
2050 {
2051 let local = self.local_blobs.reader().unwrap();
2052 let mut result = TribleSet::new();
2053 for commit in commits {
2054 let meta: TribleSet = local
2055 .get(commit)
2056 .or_else(|_| self.base_blobs.get(commit))
2057 .map_err(WorkspaceCheckoutError::Storage)?;
2058
2059 let content_opt =
2064 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
2065 Ok(Some((c,))) => Some(c),
2066 Ok(None) => None,
2067 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2068 };
2069
2070 if let Some(c) = content_opt {
2071 let set: TribleSet = local
2072 .get(c)
2073 .or_else(|_| self.base_blobs.get(c))
2074 .map_err(WorkspaceCheckoutError::Storage)?;
2075 result += set;
2076 } else {
2077 continue;
2079 }
2080 }
2081 Ok(result)
2082 }
2083
2084 fn checkout_commits_metadata<I>(
2085 &mut self,
2086 commits: I,
2087 ) -> Result<
2088 TribleSet,
2089 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2090 >
2091 where
2092 I: IntoIterator<Item = CommitHandle>,
2093 {
2094 let local = self.local_blobs.reader().unwrap();
2095 let mut result = TribleSet::new();
2096 for commit in commits {
2097 let meta: TribleSet = local
2098 .get(commit)
2099 .or_else(|_| self.base_blobs.get(commit))
2100 .map_err(WorkspaceCheckoutError::Storage)?;
2101
2102 let metadata_opt =
2103 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
2104 Ok(Some((c,))) => Some(c),
2105 Ok(None) => None,
2106 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2107 };
2108
2109 if let Some(c) = metadata_opt {
2110 let set: TribleSet = local
2111 .get(c)
2112 .or_else(|_| self.base_blobs.get(c))
2113 .map_err(WorkspaceCheckoutError::Storage)?;
2114 result += set;
2115 }
2116 }
2117 Ok(result)
2118 }
2119
2120 fn checkout_commits_with_metadata<I>(
2121 &mut self,
2122 commits: I,
2123 ) -> Result<
2124 (TribleSet, TribleSet),
2125 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2126 >
2127 where
2128 I: IntoIterator<Item = CommitHandle>,
2129 {
2130 let local = self.local_blobs.reader().unwrap();
2131 let mut data = TribleSet::new();
2132 let mut metadata_set = TribleSet::new();
2133 for commit in commits {
2134 let meta: TribleSet = local
2135 .get(commit)
2136 .or_else(|_| self.base_blobs.get(commit))
2137 .map_err(WorkspaceCheckoutError::Storage)?;
2138
2139 let content_opt =
2140 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
2141 Ok(Some((c,))) => Some(c),
2142 Ok(None) => None,
2143 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2144 };
2145
2146 if let Some(c) = content_opt {
2147 let set: TribleSet = local
2148 .get(c)
2149 .or_else(|_| self.base_blobs.get(c))
2150 .map_err(WorkspaceCheckoutError::Storage)?;
2151 data += set;
2152 }
2153
2154 let metadata_opt =
2155 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
2156 Ok(Some((c,))) => Some(c),
2157 Ok(None) => None,
2158 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2159 };
2160
2161 if let Some(c) = metadata_opt {
2162 let set: TribleSet = local
2163 .get(c)
2164 .or_else(|_| self.base_blobs.get(c))
2165 .map_err(WorkspaceCheckoutError::Storage)?;
2166 metadata_set += set;
2167 }
2168 }
2169 Ok((data, metadata_set))
2170 }
2171
2172 pub fn checkout<R>(
2176 &mut self,
2177 spec: R,
2178 ) -> Result<
2179 Checkout,
2180 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2181 >
2182 where
2183 R: CommitSelector<Blobs>,
2184 {
2185 let commits = spec.select(self)?;
2186 let facts = self.checkout_commits(commits.iter().map(|raw| Value::new(*raw)))?;
2187 Ok(Checkout { facts, commits })
2188 }
2189
2190 pub fn checkout_metadata<R>(
2193 &mut self,
2194 spec: R,
2195 ) -> Result<
2196 TribleSet,
2197 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2198 >
2199 where
2200 R: CommitSelector<Blobs>,
2201 {
2202 let patch = spec.select(self)?;
2203 let commits = patch.iter().map(|raw| Value::new(*raw));
2204 self.checkout_commits_metadata(commits)
2205 }
2206
2207 pub fn checkout_with_metadata<R>(
2210 &mut self,
2211 spec: R,
2212 ) -> Result<
2213 (TribleSet, TribleSet),
2214 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2215 >
2216 where
2217 R: CommitSelector<Blobs>,
2218 {
2219 let patch = spec.select(self)?;
2220 let commits = patch.iter().map(|raw| Value::new(*raw));
2221 self.checkout_commits_with_metadata(commits)
2222 }
2223}
2224
2225#[derive(Debug)]
2226pub enum WorkspaceCheckoutError<GetErr: Error> {
2227 Storage(GetErr),
2229 BadCommitMetadata(),
2231}
2232
2233impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
2234 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2235 match self {
2236 WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
2237 WorkspaceCheckoutError::BadCommitMetadata() => {
2238 write!(f, "commit metadata malformed")
2239 }
2240 }
2241 }
2242}
2243
2244impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
2245
2246fn collect_reachable<Blobs: BlobStore<Blake3>>(
2247 ws: &mut Workspace<Blobs>,
2248 from: CommitHandle,
2249) -> Result<
2250 CommitSet,
2251 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2252> {
2253 let mut visited = HashSet::new();
2254 let mut stack = vec![from];
2255 let mut result = CommitSet::new();
2256
2257 while let Some(commit) = stack.pop() {
2258 if !visited.insert(commit) {
2259 continue;
2260 }
2261 result.insert(&Entry::new(&commit.raw));
2262
2263 let meta: TribleSet = ws
2264 .local_blobs
2265 .reader()
2266 .unwrap()
2267 .get(commit)
2268 .or_else(|_| ws.base_blobs.get(commit))
2269 .map_err(WorkspaceCheckoutError::Storage)?;
2270
2271 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
2272 stack.push(p);
2273 }
2274 }
2275
2276 Ok(result)
2277}