1#![allow(clippy::type_complexity)]
8pub mod branch;
114pub mod commit;
116pub mod hybridstore;
118pub mod memoryrepo;
120#[cfg(feature = "object-store")]
121pub mod objectstore;
123pub mod pile;
125
126pub trait StorageClose {
132 type Error: std::error::Error;
134
135 fn close(self) -> Result<(), Self::Error>;
137}
138
139impl<Storage> Repository<Storage>
141where
142 Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
143{
144 pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
151 self.storage.close()
152 }
153}
154
155use crate::macros::pattern;
156use std::collections::{HashSet, VecDeque};
157use std::convert::Infallible;
158use std::error::Error;
159use std::fmt::Debug;
160use std::fmt::{self};
161
162use commit::commit_metadata;
163use hifitime::Epoch;
164use itertools::Itertools;
165
166use crate::blob::schemas::simplearchive::UnarchiveError;
167use crate::blob::schemas::UnknownBlob;
168use crate::blob::Blob;
169use crate::blob::BlobSchema;
170use crate::blob::MemoryBlobStore;
171use crate::blob::ToBlob;
172use crate::blob::TryFromBlob;
173use crate::find;
174use crate::id::genid;
175use crate::id::Id;
176use crate::patch::Entry;
177use crate::patch::IdentitySchema;
178use crate::patch::PATCH;
179use crate::prelude::valueschemas::GenId;
180use crate::repo::branch::branch_metadata;
181use crate::trible::TribleSet;
182use crate::value::schemas::hash::Handle;
183use crate::value::schemas::hash::HashProtocol;
184use crate::value::Value;
185use crate::value::ValueSchema;
186use crate::value::VALUE_LEN;
187use ed25519_dalek::SigningKey;
188
189use crate::blob::schemas::longstring::LongString;
190use crate::blob::schemas::simplearchive::SimpleArchive;
191use crate::prelude::*;
192use crate::value::schemas::ed25519 as ed;
193use crate::value::schemas::hash::Blake3;
194use crate::value::schemas::shortstring::ShortString;
195use crate::value::schemas::time::NsTAIInterval;
196
197attributes! {
198 "4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
200 "88B59BD497540AC5AECDB7518E737C87" as pub metadata: Handle<Blake3, SimpleArchive>;
202 "317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
204 "B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
206 "12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
208 "272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
210 "8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
212 "71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
214 "ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
216 "9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
218 "1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
220}
221
222pub trait BlobStoreList<H: HashProtocol> {
224 type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
226 where
227 Self: 'a;
228 type Err: Error + Debug + Send + Sync + 'static;
230
231 fn blobs<'a>(&'a self) -> Self::Iter<'a>;
233}
234
235#[derive(Debug, Clone)]
237pub struct BlobMetadata {
238 pub timestamp: u64,
240 pub length: u64,
242}
243
244pub trait BlobStoreMeta<H: HashProtocol> {
246 type MetaError: std::error::Error + Send + Sync + 'static;
248
249 fn metadata<S>(
252 &self,
253 handle: Value<Handle<H, S>>,
254 ) -> Result<Option<BlobMetadata>, Self::MetaError>
255 where
256 S: BlobSchema + 'static,
257 Handle<H, S>: ValueSchema;
258}
259
260pub trait BlobStoreForget<H: HashProtocol> {
265 type ForgetError: std::error::Error + Send + Sync + 'static;
267
268 fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
270 where
271 S: BlobSchema + 'static,
272 Handle<H, S>: ValueSchema;
273}
274
275pub trait BlobStoreGet<H: HashProtocol> {
277 type GetError<E: std::error::Error>: Error;
279
280 fn get<T, S>(
289 &self,
290 handle: Value<Handle<H, S>>,
291 ) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
292 where
293 S: BlobSchema + 'static,
294 T: TryFromBlob<S>,
295 Handle<H, S>: ValueSchema;
296}
297
298pub trait BlobStorePut<H: HashProtocol> {
300 type PutError: Error + Debug + Send + Sync + 'static;
302
303 fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
305 where
306 S: BlobSchema + 'static,
307 T: ToBlob<S>,
308 Handle<H, S>: ValueSchema;
309}
310
311pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
316 type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
318 type ReaderError: Error + Debug + Send + Sync + 'static;
320 fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
322}
323
324pub trait BlobStoreKeep<H: HashProtocol> {
326 fn keep<I>(&mut self, handles: I)
328 where
329 I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
330}
331
332#[derive(Debug)]
334pub enum PushResult<H>
335where
336 H: HashProtocol,
337{
338 Success(),
340 Conflict(Option<Value<Handle<H, SimpleArchive>>>),
343}
344
345pub trait BranchStore<H: HashProtocol> {
352 type BranchesError: Error + Debug + Send + Sync + 'static;
354 type HeadError: Error + Debug + Send + Sync + 'static;
356 type UpdateError: Error + Debug + Send + Sync + 'static;
358
359 type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
361 where
362 Self: 'a;
363
364 fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
367
368 fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
384
385 fn update(
396 &mut self,
397 id: Id,
398 old: Option<Value<Handle<H, SimpleArchive>>>,
399 new: Option<Value<Handle<H, SimpleArchive>>>,
400 ) -> Result<PushResult<H>, Self::UpdateError>;
401}
402
403#[derive(Debug)]
405pub enum TransferError<ListErr, LoadErr, StoreErr> {
406 List(ListErr),
408 Load(LoadErr),
410 Store(StoreErr),
412}
413
414impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
415 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
416 write!(f, "failed to transfer blob")
417 }
418}
419
420impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
421where
422 ListErr: Debug + Error + 'static,
423 LoadErr: Debug + Error + 'static,
424 StoreErr: Debug + Error + 'static,
425{
426 fn source(&self) -> Option<&(dyn Error + 'static)> {
427 match self {
428 Self::List(e) => Some(e),
429 Self::Load(e) => Some(e),
430 Self::Store(e) => Some(e),
431 }
432 }
433}
434
435pub fn transfer<'a, BS, BT, HS, HT, Handles>(
437 source: &'a BS,
438 target: &'a mut BT,
439 handles: Handles,
440) -> impl Iterator<
441 Item = Result<
442 (
443 Value<Handle<HS, UnknownBlob>>,
444 Value<Handle<HT, UnknownBlob>>,
445 ),
446 TransferError<
447 Infallible,
448 <BS as BlobStoreGet<HS>>::GetError<Infallible>,
449 <BT as BlobStorePut<HT>>::PutError,
450 >,
451 >,
452> + 'a
453where
454 BS: BlobStoreGet<HS> + 'a,
455 BT: BlobStorePut<HT> + 'a,
456 HS: 'static + HashProtocol,
457 HT: 'static + HashProtocol,
458 Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
459 Handles::IntoIter: 'a,
460{
461 handles.into_iter().map(move |source_handle| {
462 let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
463
464 Ok((
465 source_handle,
466 (target.put(blob).map_err(TransferError::Store)?),
467 ))
468 })
469}
470
471pub struct ReachableHandles<'a, BS, H>
473where
474 BS: BlobStoreGet<H>,
475 H: 'static + HashProtocol,
476{
477 source: &'a BS,
478 queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
479 visited: HashSet<[u8; VALUE_LEN]>,
480}
481
482impl<'a, BS, H> ReachableHandles<'a, BS, H>
483where
484 BS: BlobStoreGet<H>,
485 H: 'static + HashProtocol,
486{
487 fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
488 let mut queue = VecDeque::new();
489 for handle in roots {
490 queue.push_back(handle);
491 }
492
493 Self {
494 source,
495 queue,
496 visited: HashSet::new(),
497 }
498 }
499
500 fn enqueue_from_blob(&mut self, blob: &Blob<UnknownBlob>) {
501 let bytes = blob.bytes.as_ref();
502 let mut offset = 0usize;
503
504 while offset + VALUE_LEN <= bytes.len() {
505 let mut raw = [0u8; VALUE_LEN];
506 raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
507
508 if !self.visited.contains(&raw) {
509 let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
510 if self
511 .source
512 .get::<anybytes::Bytes, UnknownBlob>(candidate)
513 .is_ok()
514 {
515 self.queue.push_back(candidate);
516 }
517 }
518
519 offset += VALUE_LEN;
520 }
521 }
522}
523
524impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
525where
526 BS: BlobStoreGet<H>,
527 H: 'static + HashProtocol,
528{
529 type Item = Value<Handle<H, UnknownBlob>>;
530
531 fn next(&mut self) -> Option<Self::Item> {
532 while let Some(handle) = self.queue.pop_front() {
533 let raw = handle.raw;
534
535 if !self.visited.insert(raw) {
536 continue;
537 }
538
539 if let Ok(blob) = self.source.get(handle) {
540 self.enqueue_from_blob(&blob);
541 }
542
543 return Some(handle);
544 }
545
546 None
547 }
548}
549
550pub fn reachable<'a, BS, H>(
552 source: &'a BS,
553 roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
554) -> ReachableHandles<'a, BS, H>
555where
556 BS: BlobStoreGet<H>,
557 H: 'static + HashProtocol,
558{
559 ReachableHandles::new(source, roots)
560}
561
562pub fn potential_handles<'a, H>(
569 set: &'a TribleSet,
570) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
571where
572 H: HashProtocol,
573{
574 set.vae.iter().map(|raw| {
575 let mut value = [0u8; VALUE_LEN];
576 value.copy_from_slice(&raw[0..VALUE_LEN]);
577 Value::<Handle<H, UnknownBlob>>::new(value)
578 })
579}
580
581#[derive(Debug)]
584pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
585 ContentStorageError(BlobErr),
587 CommitStorageError(BlobErr),
589}
590
591impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
592 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
593 match self {
594 CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
595 CreateCommitError::CommitStorageError(e) => {
596 write!(f, "Commit metadata storage failed: {e}")
597 }
598 }
599 }
600}
601
602impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
603 fn source(&self) -> Option<&(dyn Error + 'static)> {
604 match self {
605 CreateCommitError::ContentStorageError(e) => Some(e),
606 CreateCommitError::CommitStorageError(e) => Some(e),
607 }
608 }
609}
610
611#[derive(Debug)]
613pub enum MergeError {
614 DifferentRepos(),
616}
617
618#[derive(Debug)]
620pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
621 StorageBranches(Storage::BranchesError),
623 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
625 StorageGet(
627 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
628 ),
629 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
631 BranchUpdate(Storage::UpdateError),
633 BadBranchMetadata(),
635 MergeError(MergeError),
637}
638
639impl<Storage> From<MergeError> for PushError<Storage>
644where
645 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
646{
647 fn from(e: MergeError) -> Self {
648 PushError::MergeError(e)
649 }
650}
651
652#[derive(Debug)]
660pub enum BranchError<Storage>
661where
662 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
663{
664 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
666 StorageGet(
668 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
669 ),
670 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
672 BranchHead(Storage::HeadError),
674 BranchUpdate(Storage::UpdateError),
676 AlreadyExists(),
678 BranchNotFound(Id),
680}
681
682#[derive(Debug)]
684pub enum LookupError<Storage>
685where
686 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
687{
688 StorageBranches(Storage::BranchesError),
690 BranchHead(Storage::HeadError),
692 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
694 StorageGet(
696 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
697 ),
698 NameConflict(Vec<Id>),
700 BadBranchMetadata(),
702}
703
704#[derive(Debug)]
706pub enum EnsureBranchError<Storage>
707where
708 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
709{
710 Lookup(LookupError<Storage>),
712 Create(BranchError<Storage>),
714}
715
716pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
723 storage: Storage,
724 signing_key: SigningKey,
725 commit_metadata: MetadataHandle,
726}
727
728pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
730where
731 BranchStorageErr: Error,
732 BlobReaderErr: Error,
733 BlobStorageErr: Error,
734{
735 BranchNotFound(Id),
737 BranchStorage(BranchStorageErr),
739 BlobReader(BlobReaderErr),
741 BlobStorage(BlobStorageErr),
743 BadBranchMetadata(),
745}
746
747impl<B, R, C> fmt::Debug for PullError<B, R, C>
748where
749 B: Error + fmt::Debug,
750 R: Error + fmt::Debug,
751 C: Error + fmt::Debug,
752{
753 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
754 match self {
755 PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
756 PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
757 PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
758 PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
759 PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
760 }
761 }
762}
763
764impl<Storage> Repository<Storage>
765where
766 Storage: BlobStore<Blake3> + BranchStore<Blake3>,
767{
768 pub fn new(
773 mut storage: Storage,
774 signing_key: SigningKey,
775 commit_metadata: TribleSet,
776 ) -> Result<Self, <Storage as BlobStorePut<Blake3>>::PutError> {
777 let commit_metadata = storage.put(commit_metadata)?;
778 Ok(Self {
779 storage,
780 signing_key,
781 commit_metadata,
782 })
783 }
784
785 pub fn into_storage(self) -> Storage {
791 self.storage
792 }
793
794 pub fn storage(&self) -> &Storage {
796 &self.storage
797 }
798
799 pub fn storage_mut(&mut self) -> &mut Storage {
801 &mut self.storage
802 }
803
804 pub fn set_signing_key(&mut self, signing_key: SigningKey) {
806 self.signing_key = signing_key;
807 }
808
809 pub fn commit_metadata(&self) -> MetadataHandle {
811 self.commit_metadata
812 }
813
814 pub fn create_branch(
828 &mut self,
829 branch_name: &str,
830 commit: Option<CommitHandle>,
831 ) -> Result<ExclusiveId, BranchError<Storage>> {
832 self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
833 }
834
835 pub fn create_branch_with_key(
837 &mut self,
838 branch_name: &str,
839 commit: Option<CommitHandle>,
840 signing_key: SigningKey,
841 ) -> Result<ExclusiveId, BranchError<Storage>> {
842 let branch_id = genid();
843 let name_blob = branch_name.to_owned().to_blob();
844 let name_handle = name_blob.get_handle::<Blake3>();
845 self.storage
846 .put(name_blob)
847 .map_err(|e| BranchError::StoragePut(e))?;
848
849 let branch_set = if let Some(commit) = commit {
850 let reader = self
851 .storage
852 .reader()
853 .map_err(|e| BranchError::StorageReader(e))?;
854 let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
855
856 branch::branch_metadata(&signing_key, *branch_id, name_handle, Some(set.to_blob()))
857 } else {
858 branch::branch_unsigned(*branch_id, name_handle, None)
859 };
860
861 let branch_blob = branch_set.to_blob();
862 let branch_handle = self
863 .storage
864 .put(branch_blob)
865 .map_err(|e| BranchError::StoragePut(e))?;
866 let push_result = self
867 .storage
868 .update(*branch_id, None, Some(branch_handle))
869 .map_err(|e| BranchError::BranchUpdate(e))?;
870
871 match push_result {
872 PushResult::Success() => Ok(branch_id),
873 PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
874 }
875 }
876
877 pub fn lookup_branch(
883 &mut self,
884 name: &str,
885 ) -> Result<Option<Id>, LookupError<Storage>> {
886 let branch_ids: Vec<Id> = self
887 .storage
888 .branches()
889 .map_err(LookupError::StorageBranches)?
890 .collect::<Result<Vec<_>, _>>()
891 .map_err(LookupError::StorageBranches)?;
892
893 let mut matches = Vec::new();
894
895 for branch_id in branch_ids {
896 let Some(meta_handle) = self
897 .storage
898 .head(branch_id)
899 .map_err(LookupError::BranchHead)?
900 else {
901 continue;
902 };
903
904 let reader = self
905 .storage
906 .reader()
907 .map_err(LookupError::StorageReader)?;
908 let meta_set: TribleSet =
909 reader.get(meta_handle).map_err(LookupError::StorageGet)?;
910
911 let Ok((name_handle,)) = find!(
912 (n: Value<Handle<Blake3, LongString>>),
913 pattern!(&meta_set, [{ crate::metadata::name: ?n }])
914 )
915 .exactly_one()
916 else {
917 continue;
918 };
919
920 let Ok(branch_name): Result<anybytes::View<str>, _> = reader.get(name_handle) else {
921 continue;
922 };
923
924 if branch_name.as_ref() == name {
925 matches.push(branch_id);
926 }
927 }
928
929 match matches.len() {
930 0 => Ok(None),
931 1 => Ok(Some(matches[0])),
932 _ => Err(LookupError::NameConflict(matches)),
933 }
934 }
935
936 pub fn ensure_branch(
944 &mut self,
945 name: &str,
946 commit: Option<CommitHandle>,
947 ) -> Result<Id, EnsureBranchError<Storage>> {
948 match self
949 .lookup_branch(name)
950 .map_err(EnsureBranchError::Lookup)?
951 {
952 Some(id) => Ok(id),
953 None => {
954 let id = self
955 .create_branch(name, commit)
956 .map_err(EnsureBranchError::Create)?;
957 Ok(*id)
958 }
959 }
960 }
961
962 pub fn pull(
965 &mut self,
966 branch_id: Id,
967 ) -> Result<
968 Workspace<Storage>,
969 PullError<
970 Storage::HeadError,
971 Storage::ReaderError,
972 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
973 >,
974 > {
975 self.pull_with_key(branch_id, self.signing_key.clone())
976 }
977
978 pub fn pull_with_key(
980 &mut self,
981 branch_id: Id,
982 signing_key: SigningKey,
983 ) -> Result<
984 Workspace<Storage>,
985 PullError<
986 Storage::HeadError,
987 Storage::ReaderError,
988 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
989 >,
990 > {
991 let base_branch_meta_handle = match self.storage.head(branch_id) {
993 Ok(Some(handle)) => handle,
994 Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
995 Err(e) => return Err(PullError::BranchStorage(e)),
996 };
997 let reader = self.storage.reader().map_err(PullError::BlobReader)?;
999 let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
1000 Ok(meta_set) => meta_set,
1001 Err(e) => return Err(PullError::BlobStorage(e)),
1002 };
1003
1004 let head_ = match find!(
1005 (head_: Value<_>),
1006 pattern!(&base_branch_meta, [{ head: ?head_ }])
1007 )
1008 .at_most_one()
1009 {
1010 Ok(Some((h,))) => Some(h),
1011 Ok(None) => None,
1012 Err(_) => return Err(PullError::BadBranchMetadata()),
1013 };
1014 let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
1016 Ok(Workspace {
1017 base_blobs,
1018 local_blobs: MemoryBlobStore::new(),
1019 head: head_,
1020 base_head: head_,
1021 base_branch_id: branch_id,
1022 base_branch_meta: base_branch_meta_handle,
1023 signing_key,
1024 commit_metadata: self.commit_metadata,
1025 })
1026 }
1027
1028 pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
1032 while let Some(mut conflict_ws) = self.try_push(workspace)? {
1037 conflict_ws.merge(workspace)?;
1041
1042 *workspace = conflict_ws;
1047 }
1048
1049 Ok(())
1050 }
1051
1052 pub fn try_push(
1056 &mut self,
1057 workspace: &mut Workspace<Storage>,
1058 ) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
1059 let workspace_reader = workspace.local_blobs.reader().unwrap();
1061 for handle in workspace_reader.blobs() {
1062 let handle = handle.expect("infallible blob enumeration");
1063 let blob: Blob<UnknownBlob> =
1064 workspace_reader.get(handle).expect("infallible blob read");
1065 self.storage.put(blob).map_err(PushError::StoragePut)?;
1066 }
1067
1068 if workspace.base_head == workspace.head {
1073 return Ok(None);
1074 }
1075
1076 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1078 let base_branch_meta: TribleSet = repo_reader
1079 .get(workspace.base_branch_meta)
1080 .map_err(PushError::StorageGet)?;
1081
1082 let Ok((branch_name,)) = find!(
1083 (name: Value<Handle<Blake3, LongString>>),
1084 pattern!(base_branch_meta, [{ crate::metadata::name: ?name }])
1085 )
1086 .exactly_one() else {
1087 return Err(PushError::BadBranchMetadata());
1088 };
1089
1090 let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
1091 let head_: TribleSet = repo_reader
1092 .get(head_handle)
1093 .map_err(PushError::StorageGet)?;
1094
1095 let branch_meta = branch_metadata(
1096 &workspace.signing_key,
1097 workspace.base_branch_id,
1098 branch_name,
1099 Some(head_.to_blob()),
1100 );
1101
1102 let branch_meta_handle = self
1103 .storage
1104 .put(branch_meta)
1105 .map_err(PushError::StoragePut)?;
1106
1107 let result = self
1109 .storage
1110 .update(
1111 workspace.base_branch_id,
1112 Some(workspace.base_branch_meta),
1113 Some(branch_meta_handle),
1114 )
1115 .map_err(PushError::BranchUpdate)?;
1116
1117 match result {
1118 PushResult::Success() => {
1119 workspace.base_branch_meta = branch_meta_handle;
1122 workspace.base_head = workspace.head;
1123 workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
1126 workspace.local_blobs = MemoryBlobStore::new();
1130 Ok(None)
1131 }
1132 PushResult::Conflict(conflicting_meta) => {
1133 let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
1134
1135 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1136 let branch_meta: TribleSet = repo_reader
1137 .get(conflicting_meta)
1138 .map_err(PushError::StorageGet)?;
1139
1140 let head_ = match find!((head_: Value<_>),
1141 pattern!(&branch_meta, [{ head: ?head_ }])
1142 )
1143 .at_most_one()
1144 {
1145 Ok(Some((h,))) => Some(h),
1146 Ok(None) => None,
1147 Err(_) => return Err(PushError::BadBranchMetadata()),
1148 };
1149
1150 let conflict_ws = Workspace {
1151 base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
1152 local_blobs: MemoryBlobStore::new(),
1153 head: head_,
1154 base_head: head_,
1155 base_branch_id: workspace.base_branch_id,
1156 base_branch_meta: conflicting_meta,
1157 signing_key: workspace.signing_key.clone(),
1158 commit_metadata: workspace.commit_metadata,
1159 };
1160
1161 Ok(Some(conflict_ws))
1162 }
1163 }
1164 }
1165}
1166
1167pub type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
1169type MetadataHandle = Value<Handle<Blake3, SimpleArchive>>;
1170pub type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
1172type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
1173
1174#[derive(Debug, Clone)]
1199pub struct Checkout {
1200 facts: TribleSet,
1201 commits: CommitSet,
1202}
1203
1204impl PartialEq<TribleSet> for Checkout {
1205 fn eq(&self, other: &TribleSet) -> bool {
1206 self.facts == *other
1207 }
1208}
1209
1210impl PartialEq<Checkout> for TribleSet {
1211 fn eq(&self, other: &Checkout) -> bool {
1212 *self == other.facts
1213 }
1214}
1215
1216impl Checkout {
1217 pub fn facts(&self) -> &TribleSet {
1219 &self.facts
1220 }
1221
1222 pub fn commits(&self) -> CommitSet {
1226 self.commits.clone()
1227 }
1228
1229 pub fn into_facts(self) -> TribleSet {
1231 self.facts
1232 }
1233}
1234
1235impl std::ops::Deref for Checkout {
1236 type Target = TribleSet;
1237 fn deref(&self) -> &TribleSet {
1238 &self.facts
1239 }
1240}
1241
1242impl std::ops::AddAssign<&Checkout> for Checkout {
1243 fn add_assign(&mut self, rhs: &Checkout) {
1244 self.facts += rhs.facts.clone();
1245 self.commits.union(rhs.commits.clone());
1246 }
1247}
1248
1249pub struct Workspace<Blobs: BlobStore<Blake3>> {
1253 local_blobs: MemoryBlobStore<Blake3>,
1255 base_blobs: Blobs::Reader,
1257 base_branch_id: Id,
1259 base_branch_meta: BranchMetaHandle,
1261 head: Option<CommitHandle>,
1263 base_head: Option<CommitHandle>,
1269 signing_key: SigningKey,
1271 commit_metadata: MetadataHandle,
1273}
1274
1275impl<Blobs> fmt::Debug for Workspace<Blobs>
1276where
1277 Blobs: BlobStore<Blake3>,
1278 Blobs::Reader: fmt::Debug,
1279{
1280 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1281 f.debug_struct("Workspace")
1282 .field("local_blobs", &self.local_blobs)
1283 .field("base_blobs", &self.base_blobs)
1284 .field("base_branch_id", &self.base_branch_id)
1285 .field("base_branch_meta", &self.base_branch_meta)
1286 .field("base_head", &self.base_head)
1287 .field("head", &self.head)
1288 .field("commit_metadata", &self.commit_metadata)
1289 .finish()
1290 }
1291}
1292
1293pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
1295 fn select(
1296 self,
1297 ws: &mut Workspace<Blobs>,
1298 ) -> Result<
1299 CommitSet,
1300 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1301 >;
1302}
1303
1304pub struct Ancestors(pub CommitHandle);
1306
1307pub fn ancestors(commit: CommitHandle) -> Ancestors {
1309 Ancestors(commit)
1310}
1311
1312pub struct NthAncestors<S>(pub S, pub usize);
1320
1321pub fn nth_ancestors<S>(selector: S, n: usize) -> NthAncestors<S> {
1323 NthAncestors(selector, n)
1324}
1325
1326pub struct Parents(pub CommitHandle);
1328
1329pub fn parents(commit: CommitHandle) -> Parents {
1331 Parents(commit)
1332}
1333
1334pub struct SymmetricDiff(pub CommitHandle, pub CommitHandle);
1337
1338pub fn symmetric_diff(a: CommitHandle, b: CommitHandle) -> SymmetricDiff {
1340 SymmetricDiff(a, b)
1341}
1342
1343pub struct Union<A, B> {
1345 left: A,
1346 right: B,
1347}
1348
1349pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
1351 Union { left, right }
1352}
1353
1354pub struct Intersect<A, B> {
1356 left: A,
1357 right: B,
1358}
1359
1360pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
1362 Intersect { left, right }
1363}
1364
1365pub struct Difference<A, B> {
1368 left: A,
1369 right: B,
1370}
1371
1372pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
1374 Difference { left, right }
1375}
1376
1377pub struct TimeRange(pub Epoch, pub Epoch);
1379
1380pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
1382 TimeRange(start, end)
1383}
1384
1385pub struct Filter<S, F> {
1387 selector: S,
1388 filter: F,
1389}
1390
1391pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
1393 Filter { selector, filter }
1394}
1395
1396impl<Blobs> CommitSelector<Blobs> for CommitHandle
1397where
1398 Blobs: BlobStore<Blake3>,
1399{
1400 fn select(
1401 self,
1402 _ws: &mut Workspace<Blobs>,
1403 ) -> Result<
1404 CommitSet,
1405 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1406 > {
1407 let mut patch = CommitSet::new();
1408 patch.insert(&Entry::new(&self.raw));
1409 Ok(patch)
1410 }
1411}
1412
1413impl<Blobs> CommitSelector<Blobs> for CommitSet
1414where
1415 Blobs: BlobStore<Blake3>,
1416{
1417 fn select(
1418 self,
1419 _ws: &mut Workspace<Blobs>,
1420 ) -> Result<
1421 CommitSet,
1422 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1423 > {
1424 Ok(self)
1425 }
1426}
1427
1428impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
1429where
1430 Blobs: BlobStore<Blake3>,
1431{
1432 fn select(
1433 self,
1434 _ws: &mut Workspace<Blobs>,
1435 ) -> Result<
1436 CommitSet,
1437 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1438 > {
1439 let mut patch = CommitSet::new();
1440 for handle in self {
1441 patch.insert(&Entry::new(&handle.raw));
1442 }
1443 Ok(patch)
1444 }
1445}
1446
1447impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
1448where
1449 Blobs: BlobStore<Blake3>,
1450{
1451 fn select(
1452 self,
1453 _ws: &mut Workspace<Blobs>,
1454 ) -> Result<
1455 CommitSet,
1456 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1457 > {
1458 let mut patch = CommitSet::new();
1459 for handle in self {
1460 patch.insert(&Entry::new(&handle.raw));
1461 }
1462 Ok(patch)
1463 }
1464}
1465
1466impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
1467where
1468 Blobs: BlobStore<Blake3>,
1469{
1470 fn select(
1471 self,
1472 _ws: &mut Workspace<Blobs>,
1473 ) -> Result<
1474 CommitSet,
1475 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1476 > {
1477 let mut patch = CommitSet::new();
1478 if let Some(handle) = self {
1479 patch.insert(&Entry::new(&handle.raw));
1480 }
1481 Ok(patch)
1482 }
1483}
1484
1485impl<Blobs> CommitSelector<Blobs> for Ancestors
1486where
1487 Blobs: BlobStore<Blake3>,
1488{
1489 fn select(
1490 self,
1491 ws: &mut Workspace<Blobs>,
1492 ) -> Result<
1493 CommitSet,
1494 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1495 > {
1496 collect_reachable(ws, self.0)
1497 }
1498}
1499
1500impl<Blobs, S> CommitSelector<Blobs> for NthAncestors<S>
1501where
1502 Blobs: BlobStore<Blake3>,
1503 S: CommitSelector<Blobs>,
1504{
1505 fn select(
1506 self,
1507 ws: &mut Workspace<Blobs>,
1508 ) -> Result<
1509 CommitSet,
1510 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1511 > {
1512 let mut frontier = self.0.select(ws)?;
1513 let mut remaining = self.1;
1514
1515 while remaining > 0 && !frontier.is_empty() {
1516 let keys: Vec<[u8; VALUE_LEN]> = frontier.iter().copied().collect();
1518 let mut next_frontier = CommitSet::new();
1519 for raw in keys {
1520 let handle = CommitHandle::new(raw);
1521 let meta: TribleSet =
1522 ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1523 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1524 next_frontier.insert(&Entry::new(&p.raw));
1525 }
1526 }
1527 frontier = next_frontier;
1528 remaining -= 1;
1529 }
1530
1531 Ok(frontier)
1532 }
1533}
1534
1535impl<Blobs> CommitSelector<Blobs> for Parents
1536where
1537 Blobs: BlobStore<Blake3>,
1538{
1539 fn select(
1540 self,
1541 ws: &mut Workspace<Blobs>,
1542 ) -> Result<
1543 CommitSet,
1544 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1545 > {
1546 let meta: TribleSet = ws.get(self.0).map_err(WorkspaceCheckoutError::Storage)?;
1547 let mut result = CommitSet::new();
1548 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1549 result.insert(&Entry::new(&p.raw));
1550 }
1551 Ok(result)
1552 }
1553}
1554
1555impl<Blobs> CommitSelector<Blobs> for SymmetricDiff
1556where
1557 Blobs: BlobStore<Blake3>,
1558{
1559 fn select(
1560 self,
1561 ws: &mut Workspace<Blobs>,
1562 ) -> Result<
1563 CommitSet,
1564 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1565 > {
1566 let a = collect_reachable(ws, self.0)?;
1567 let b = collect_reachable(ws, self.1)?;
1568 let inter = a.intersect(&b);
1569 let mut union = a;
1570 union.union(b);
1571 Ok(union.difference(&inter))
1572 }
1573}
1574
1575impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
1576where
1577 A: CommitSelector<Blobs>,
1578 B: CommitSelector<Blobs>,
1579 Blobs: BlobStore<Blake3>,
1580{
1581 fn select(
1582 self,
1583 ws: &mut Workspace<Blobs>,
1584 ) -> Result<
1585 CommitSet,
1586 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1587 > {
1588 let mut left = self.left.select(ws)?;
1589 let right = self.right.select(ws)?;
1590 left.union(right);
1591 Ok(left)
1592 }
1593}
1594
1595impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
1596where
1597 A: CommitSelector<Blobs>,
1598 B: CommitSelector<Blobs>,
1599 Blobs: BlobStore<Blake3>,
1600{
1601 fn select(
1602 self,
1603 ws: &mut Workspace<Blobs>,
1604 ) -> Result<
1605 CommitSet,
1606 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1607 > {
1608 let left = self.left.select(ws)?;
1609 let right = self.right.select(ws)?;
1610 Ok(left.intersect(&right))
1611 }
1612}
1613
1614impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
1615where
1616 A: CommitSelector<Blobs>,
1617 B: CommitSelector<Blobs>,
1618 Blobs: BlobStore<Blake3>,
1619{
1620 fn select(
1621 self,
1622 ws: &mut Workspace<Blobs>,
1623 ) -> Result<
1624 CommitSet,
1625 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1626 > {
1627 let left = self.left.select(ws)?;
1628 let right = self.right.select(ws)?;
1629 Ok(left.difference(&right))
1630 }
1631}
1632
1633impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
1634where
1635 Blobs: BlobStore<Blake3>,
1636 S: CommitSelector<Blobs>,
1637 F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
1638{
1639 fn select(
1640 self,
1641 ws: &mut Workspace<Blobs>,
1642 ) -> Result<
1643 CommitSet,
1644 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1645 > {
1646 let patch = self.selector.select(ws)?;
1647 let mut result = CommitSet::new();
1648 let filter = self.filter;
1649 for raw in patch.iter() {
1650 let handle = Value::new(*raw);
1651 let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1652
1653 let Ok((content_handle,)) = find!(
1654 (c: Value<_>),
1655 pattern!(&meta, [{ content: ?c }])
1656 )
1657 .exactly_one() else {
1658 return Err(WorkspaceCheckoutError::BadCommitMetadata());
1659 };
1660
1661 let payload: TribleSet = ws
1662 .get(content_handle)
1663 .map_err(WorkspaceCheckoutError::Storage)?;
1664
1665 if filter(&meta, &payload) {
1666 result.insert(&Entry::new(raw));
1667 }
1668 }
1669 Ok(result)
1670 }
1671}
1672
1673pub struct HistoryOf(pub Id);
1675
1676pub fn history_of(entity: Id) -> HistoryOf {
1678 HistoryOf(entity)
1679}
1680
1681impl<Blobs> CommitSelector<Blobs> for HistoryOf
1682where
1683 Blobs: BlobStore<Blake3>,
1684{
1685 fn select(
1686 self,
1687 ws: &mut Workspace<Blobs>,
1688 ) -> Result<
1689 CommitSet,
1690 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1691 > {
1692 let Some(head_) = ws.head else {
1693 return Ok(CommitSet::new());
1694 };
1695 let entity = self.0;
1696 filter(
1697 ancestors(head_),
1698 move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
1699 )
1700 .select(ws)
1701 }
1702}
1703
1704fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
1712 ws: &mut Workspace<Blobs>,
1713 patch: CommitSet,
1714) -> Result<
1715 CommitSet,
1716 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1717> {
1718 let mut result = CommitSet::new();
1719 for raw in patch.iter() {
1720 let handle = Value::new(*raw);
1721 let reach = collect_reachable(ws, handle)?;
1722 result.union(reach);
1723 }
1724 Ok(result)
1725}
1726
1727fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
1728 ws: &mut Workspace<Blobs>,
1729 seeds: CommitSet,
1730 stop: &CommitSet,
1731) -> Result<
1732 CommitSet,
1733 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1734> {
1735 let mut visited = HashSet::new();
1736 let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
1737 let mut result = CommitSet::new();
1738
1739 while let Some(commit) = stack.pop() {
1740 if !visited.insert(commit) {
1741 continue;
1742 }
1743
1744 if stop.get(&commit.raw).is_some() {
1745 continue;
1746 }
1747
1748 result.insert(&Entry::new(&commit.raw));
1749
1750 let meta: TribleSet = ws
1751 .local_blobs
1752 .reader()
1753 .unwrap()
1754 .get(commit)
1755 .or_else(|_| ws.base_blobs.get(commit))
1756 .map_err(WorkspaceCheckoutError::Storage)?;
1757
1758 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1759 stack.push(p);
1760 }
1761 }
1762
1763 Ok(result)
1764}
1765
1766impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
1767where
1768 T: CommitSelector<Blobs>,
1769 Blobs: BlobStore<Blake3>,
1770{
1771 fn select(
1772 self,
1773 ws: &mut Workspace<Blobs>,
1774 ) -> Result<
1775 CommitSet,
1776 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1777 > {
1778 let end_patch = self.end.select(ws)?;
1779 let start_patch = self.start.select(ws)?;
1780
1781 collect_reachable_from_patch_until(ws, end_patch, &start_patch)
1782 }
1783}
1784
1785impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
1786where
1787 T: CommitSelector<Blobs>,
1788 Blobs: BlobStore<Blake3>,
1789{
1790 fn select(
1791 self,
1792 ws: &mut Workspace<Blobs>,
1793 ) -> Result<
1794 CommitSet,
1795 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1796 > {
1797 let Some(head_) = ws.head else {
1798 return Ok(CommitSet::new());
1799 };
1800 let exclude_patch = self.start.select(ws)?;
1801
1802 let mut head_patch = CommitSet::new();
1803 head_patch.insert(&Entry::new(&head_.raw));
1804
1805 collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
1806 }
1807}
1808
1809impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
1810where
1811 T: CommitSelector<Blobs>,
1812 Blobs: BlobStore<Blake3>,
1813{
1814 fn select(
1815 self,
1816 ws: &mut Workspace<Blobs>,
1817 ) -> Result<
1818 CommitSet,
1819 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1820 > {
1821 let end_patch = self.end.select(ws)?;
1822 collect_reachable_from_patch(ws, end_patch)
1823 }
1824}
1825
1826impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
1827where
1828 Blobs: BlobStore<Blake3>,
1829{
1830 fn select(
1831 self,
1832 ws: &mut Workspace<Blobs>,
1833 ) -> Result<
1834 CommitSet,
1835 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1836 > {
1837 let Some(head_) = ws.head else {
1838 return Ok(CommitSet::new());
1839 };
1840 collect_reachable(ws, head_)
1841 }
1842}
1843
1844impl<Blobs> CommitSelector<Blobs> for TimeRange
1845where
1846 Blobs: BlobStore<Blake3>,
1847{
1848 fn select(
1849 self,
1850 ws: &mut Workspace<Blobs>,
1851 ) -> Result<
1852 CommitSet,
1853 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1854 > {
1855 let Some(head_) = ws.head else {
1856 return Ok(CommitSet::new());
1857 };
1858 let start = self.0;
1859 let end = self.1;
1860 filter(
1861 ancestors(head_),
1862 move |meta: &TribleSet, _payload: &TribleSet| {
1863 if let Ok(Some(((ts_start, ts_end),))) =
1864 find!((t: (Epoch, Epoch)), pattern!(meta, [{ timestamp: ?t }])).at_most_one()
1865 {
1866 ts_start <= end && ts_end >= start
1867 } else {
1868 false
1869 }
1870 },
1871 )
1872 .select(ws)
1873 }
1874}
1875
1876impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
1877 pub fn branch_id(&self) -> Id {
1879 self.base_branch_id
1880 }
1881
1882 pub fn head(&self) -> Option<CommitHandle> {
1884 self.head
1885 }
1886
1887 pub fn metadata(&self) -> MetadataHandle {
1889 self.commit_metadata
1890 }
1891
1892 pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
1895 where
1896 S: BlobSchema + 'static,
1897 T: ToBlob<S>,
1898 Handle<Blake3, S>: ValueSchema,
1899 {
1900 self.local_blobs.put(item).expect("infallible blob put")
1901 }
1902
1903 pub fn get<T, S>(
1908 &mut self,
1909 handle: Value<Handle<Blake3, S>>,
1910 ) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
1911 where
1912 S: BlobSchema + 'static,
1913 T: TryFromBlob<S>,
1914 Handle<Blake3, S>: ValueSchema,
1915 {
1916 self.local_blobs
1917 .reader()
1918 .unwrap()
1919 .get(handle)
1920 .or_else(|_| self.base_blobs.get(handle))
1921 }
1922
1923 pub fn commit(
1927 &mut self,
1928 content_: impl Into<TribleSet>,
1929 message_: &str,
1930 ) {
1931 let content_ = content_.into();
1932 self.commit_internal(content_, Some(self.commit_metadata), Some(message_));
1933 }
1934
1935 pub fn commit_with_metadata(
1938 &mut self,
1939 content_: impl Into<TribleSet>,
1940 metadata_: MetadataHandle,
1941 message_: &str,
1942 ) {
1943 let content_ = content_.into();
1944 self.commit_internal(content_, Some(metadata_), Some(message_));
1945 }
1946
1947 fn commit_internal(
1948 &mut self,
1949 content_: TribleSet,
1950 metadata_handle: Option<MetadataHandle>,
1951 message_: Option<&str>,
1952 ) {
1953 let content_blob = content_.to_blob();
1955 let message_handle = message_.map(|m| self.put(m.to_string()));
1957 let parents = self.head.iter().copied();
1958
1959 let commit_set = crate::repo::commit::commit_metadata(
1960 &self.signing_key,
1961 parents,
1962 message_handle,
1963 Some(content_blob.clone()),
1964 metadata_handle,
1965 );
1966 let _ = self
1968 .local_blobs
1969 .put(content_blob)
1970 .expect("failed to put content blob");
1971 let commit_handle = self
1972 .local_blobs
1973 .put(commit_set)
1974 .expect("failed to put commit blob");
1975 self.head = Some(commit_handle);
1977 }
1978
1979 pub fn merge(&mut self, other: &mut Workspace<Blobs>) -> Result<CommitHandle, MergeError> {
1993 let other_local = other.local_blobs.reader().unwrap();
1995 for r in other_local.blobs() {
1996 let handle = r.expect("infallible blob enumeration");
1997 let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
1998
1999 self.local_blobs.put(blob).expect("infallible blob put");
2001 }
2002 let parents = self.head.iter().copied().chain(other.head.iter().copied());
2004 let merge_commit = commit_metadata(
2005 &self.signing_key,
2006 parents,
2007 None, None, None, );
2011 let commit_handle = self
2013 .local_blobs
2014 .put(merge_commit)
2015 .expect("failed to put merge commit blob");
2016 self.head = Some(commit_handle);
2017
2018 Ok(commit_handle)
2019 }
2020
2021 pub fn merge_commit(
2027 &mut self,
2028 other: Value<Handle<Blake3, SimpleArchive>>,
2029 ) -> Result<CommitHandle, MergeError> {
2030 let parents = self.head.iter().copied().chain(Some(other));
2037 let merge_commit = commit_metadata(&self.signing_key, parents, None, None, None);
2038 let commit_handle = self
2039 .local_blobs
2040 .put(merge_commit)
2041 .expect("failed to put merge commit blob");
2042 self.head = Some(commit_handle);
2043 Ok(commit_handle)
2044 }
2045
2046 fn checkout_commits<I>(
2053 &mut self,
2054 commits: I,
2055 ) -> Result<
2056 TribleSet,
2057 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2058 >
2059 where
2060 I: IntoIterator<Item = CommitHandle>,
2061 {
2062 let local = self.local_blobs.reader().unwrap();
2063 let mut result = TribleSet::new();
2064 for commit in commits {
2065 let meta: TribleSet = local
2066 .get(commit)
2067 .or_else(|_| self.base_blobs.get(commit))
2068 .map_err(WorkspaceCheckoutError::Storage)?;
2069
2070 let content_opt =
2075 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
2076 Ok(Some((c,))) => Some(c),
2077 Ok(None) => None,
2078 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2079 };
2080
2081 if let Some(c) = content_opt {
2082 let set: TribleSet = local
2083 .get(c)
2084 .or_else(|_| self.base_blobs.get(c))
2085 .map_err(WorkspaceCheckoutError::Storage)?;
2086 result += set;
2087 } else {
2088 continue;
2090 }
2091 }
2092 Ok(result)
2093 }
2094
2095 fn checkout_commits_metadata<I>(
2096 &mut self,
2097 commits: I,
2098 ) -> Result<
2099 TribleSet,
2100 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2101 >
2102 where
2103 I: IntoIterator<Item = CommitHandle>,
2104 {
2105 let local = self.local_blobs.reader().unwrap();
2106 let mut result = TribleSet::new();
2107 for commit in commits {
2108 let meta: TribleSet = local
2109 .get(commit)
2110 .or_else(|_| self.base_blobs.get(commit))
2111 .map_err(WorkspaceCheckoutError::Storage)?;
2112
2113 let metadata_opt =
2114 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
2115 Ok(Some((c,))) => Some(c),
2116 Ok(None) => None,
2117 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2118 };
2119
2120 if let Some(c) = metadata_opt {
2121 let set: TribleSet = local
2122 .get(c)
2123 .or_else(|_| self.base_blobs.get(c))
2124 .map_err(WorkspaceCheckoutError::Storage)?;
2125 result += set;
2126 }
2127 }
2128 Ok(result)
2129 }
2130
2131 fn checkout_commits_with_metadata<I>(
2132 &mut self,
2133 commits: I,
2134 ) -> Result<
2135 (TribleSet, TribleSet),
2136 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2137 >
2138 where
2139 I: IntoIterator<Item = CommitHandle>,
2140 {
2141 let local = self.local_blobs.reader().unwrap();
2142 let mut data = TribleSet::new();
2143 let mut metadata_set = TribleSet::new();
2144 for commit in commits {
2145 let meta: TribleSet = local
2146 .get(commit)
2147 .or_else(|_| self.base_blobs.get(commit))
2148 .map_err(WorkspaceCheckoutError::Storage)?;
2149
2150 let content_opt =
2151 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
2152 Ok(Some((c,))) => Some(c),
2153 Ok(None) => None,
2154 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2155 };
2156
2157 if let Some(c) = content_opt {
2158 let set: TribleSet = local
2159 .get(c)
2160 .or_else(|_| self.base_blobs.get(c))
2161 .map_err(WorkspaceCheckoutError::Storage)?;
2162 data += set;
2163 }
2164
2165 let metadata_opt =
2166 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
2167 Ok(Some((c,))) => Some(c),
2168 Ok(None) => None,
2169 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
2170 };
2171
2172 if let Some(c) = metadata_opt {
2173 let set: TribleSet = local
2174 .get(c)
2175 .or_else(|_| self.base_blobs.get(c))
2176 .map_err(WorkspaceCheckoutError::Storage)?;
2177 metadata_set += set;
2178 }
2179 }
2180 Ok((data, metadata_set))
2181 }
2182
2183 pub fn checkout<R>(
2187 &mut self,
2188 spec: R,
2189 ) -> Result<
2190 Checkout,
2191 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2192 >
2193 where
2194 R: CommitSelector<Blobs>,
2195 {
2196 let commits = spec.select(self)?;
2197 let facts = self.checkout_commits(commits.iter().map(|raw| Value::new(*raw)))?;
2198 Ok(Checkout { facts, commits })
2199 }
2200
2201 pub fn checkout_metadata<R>(
2204 &mut self,
2205 spec: R,
2206 ) -> Result<
2207 TribleSet,
2208 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2209 >
2210 where
2211 R: CommitSelector<Blobs>,
2212 {
2213 let patch = spec.select(self)?;
2214 let commits = patch.iter().map(|raw| Value::new(*raw));
2215 self.checkout_commits_metadata(commits)
2216 }
2217
2218 pub fn checkout_with_metadata<R>(
2221 &mut self,
2222 spec: R,
2223 ) -> Result<
2224 (TribleSet, TribleSet),
2225 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2226 >
2227 where
2228 R: CommitSelector<Blobs>,
2229 {
2230 let patch = spec.select(self)?;
2231 let commits = patch.iter().map(|raw| Value::new(*raw));
2232 self.checkout_commits_with_metadata(commits)
2233 }
2234}
2235
2236#[derive(Debug)]
2237pub enum WorkspaceCheckoutError<GetErr: Error> {
2238 Storage(GetErr),
2240 BadCommitMetadata(),
2242}
2243
2244impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
2245 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2246 match self {
2247 WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
2248 WorkspaceCheckoutError::BadCommitMetadata() => {
2249 write!(f, "commit metadata malformed")
2250 }
2251 }
2252 }
2253}
2254
2255impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
2256
2257fn collect_reachable<Blobs: BlobStore<Blake3>>(
2258 ws: &mut Workspace<Blobs>,
2259 from: CommitHandle,
2260) -> Result<
2261 CommitSet,
2262 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2263> {
2264 let mut visited = HashSet::new();
2265 let mut stack = vec![from];
2266 let mut result = CommitSet::new();
2267
2268 while let Some(commit) = stack.pop() {
2269 if !visited.insert(commit) {
2270 continue;
2271 }
2272 result.insert(&Entry::new(&commit.raw));
2273
2274 let meta: TribleSet = ws
2275 .local_blobs
2276 .reader()
2277 .unwrap()
2278 .get(commit)
2279 .or_else(|_| ws.base_blobs.get(commit))
2280 .map_err(WorkspaceCheckoutError::Storage)?;
2281
2282 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
2283 stack.push(p);
2284 }
2285 }
2286
2287 Ok(result)
2288}