1#![allow(clippy::type_complexity)]
8pub mod branch;
114pub mod commit;
115pub mod hybridstore;
116pub mod memoryrepo;
117pub mod objectstore;
118pub mod pile;
119
120pub trait StorageClose {
126 type Error: std::error::Error;
128
129 fn close(self) -> Result<(), Self::Error>;
131}
132
133impl<Storage> Repository<Storage>
135where
136 Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
137{
138 pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
145 self.storage.close()
146 }
147}
148
149use crate::macros::pattern;
150use std::collections::{HashSet, VecDeque};
151use std::convert::Infallible;
152use std::error::Error;
153use std::fmt::Debug;
154use std::fmt::{self};
155
156use commit::commit_metadata;
157use hifitime::Epoch;
158use itertools::Itertools;
159
160use crate::blob::schemas::simplearchive::UnarchiveError;
161use crate::blob::schemas::UnknownBlob;
162use crate::blob::Blob;
163use crate::blob::BlobSchema;
164use crate::blob::MemoryBlobStore;
165use crate::blob::ToBlob;
166use crate::blob::TryFromBlob;
167use crate::find;
168use crate::id::genid;
169use crate::id::Id;
170use crate::patch::Entry;
171use crate::patch::IdentitySchema;
172use crate::patch::PATCH;
173use crate::prelude::valueschemas::GenId;
174use crate::repo::branch::branch_metadata;
175use crate::trible::TribleSet;
176use crate::value::schemas::hash::Handle;
177use crate::value::schemas::hash::HashProtocol;
178use crate::value::Value;
179use crate::value::ValueSchema;
180use crate::value::VALUE_LEN;
181use ed25519_dalek::SigningKey;
182
183use crate::blob::schemas::longstring::LongString;
184use crate::blob::schemas::simplearchive::SimpleArchive;
185use crate::prelude::*;
186use crate::value::schemas::ed25519 as ed;
187use crate::value::schemas::hash::Blake3;
188use crate::value::schemas::shortstring::ShortString;
189use crate::value::schemas::time::NsTAIInterval;
190
191attributes! {
192 "4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
194 "88B59BD497540AC5AECDB7518E737C87" as pub metadata: Handle<Blake3, SimpleArchive>;
196 "317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
198 "B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
200 "12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
202 "272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
204 "8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
206 "71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
208 "ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
210 "9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
212 "1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
214}
215
216pub trait BlobStoreList<H: HashProtocol> {
218 type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
219 where
220 Self: 'a;
221 type Err: Error + Debug + Send + Sync + 'static;
222
223 fn blobs<'a>(&'a self) -> Self::Iter<'a>;
225}
226
227#[derive(Debug, Clone)]
229pub struct BlobMetadata {
230 pub timestamp: u64,
232 pub length: u64,
234}
235
236pub trait BlobStoreMeta<H: HashProtocol> {
238 type MetaError: std::error::Error + Send + Sync + 'static;
240
241 fn metadata<S>(
242 &self,
243 handle: Value<Handle<H, S>>,
244 ) -> Result<Option<BlobMetadata>, Self::MetaError>
245 where
246 S: BlobSchema + 'static,
247 Handle<H, S>: ValueSchema;
248}
249
250pub trait BlobStoreForget<H: HashProtocol> {
255 type ForgetError: std::error::Error + Send + Sync + 'static;
256
257 fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
258 where
259 S: BlobSchema + 'static,
260 Handle<H, S>: ValueSchema;
261}
262
263pub trait BlobStoreGet<H: HashProtocol> {
265 type GetError<E: std::error::Error>: Error;
266
267 fn get<T, S>(
276 &self,
277 handle: Value<Handle<H, S>>,
278 ) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
279 where
280 S: BlobSchema + 'static,
281 T: TryFromBlob<S>,
282 Handle<H, S>: ValueSchema;
283}
284
285pub trait BlobStorePut<H: HashProtocol> {
287 type PutError: Error + Debug + Send + Sync + 'static;
288
289 fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
290 where
291 S: BlobSchema + 'static,
292 T: ToBlob<S>,
293 Handle<H, S>: ValueSchema;
294}
295
296pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
297 type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
298 type ReaderError: Error + Debug + Send + Sync + 'static;
299 fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
300}
301
302pub trait BlobStoreKeep<H: HashProtocol> {
304 fn keep<I>(&mut self, handles: I)
306 where
307 I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
308}
309
310#[derive(Debug)]
311pub enum PushResult<H>
312where
313 H: HashProtocol,
314{
315 Success(),
316 Conflict(Option<Value<Handle<H, SimpleArchive>>>),
317}
318
319pub trait BranchStore<H: HashProtocol> {
320 type BranchesError: Error + Debug + Send + Sync + 'static;
321 type HeadError: Error + Debug + Send + Sync + 'static;
322 type UpdateError: Error + Debug + Send + Sync + 'static;
323
324 type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
325 where
326 Self: 'a;
327
328 fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
331
332 fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
348
349 fn update(
360 &mut self,
361 id: Id,
362 old: Option<Value<Handle<H, SimpleArchive>>>,
363 new: Option<Value<Handle<H, SimpleArchive>>>,
364 ) -> Result<PushResult<H>, Self::UpdateError>;
365}
366
367#[derive(Debug)]
368pub enum TransferError<ListErr, LoadErr, StoreErr> {
369 List(ListErr),
370 Load(LoadErr),
371 Store(StoreErr),
372}
373
374impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
375 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
376 write!(f, "failed to transfer blob")
377 }
378}
379
380impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
381where
382 ListErr: Debug + Error + 'static,
383 LoadErr: Debug + Error + 'static,
384 StoreErr: Debug + Error + 'static,
385{
386 fn source(&self) -> Option<&(dyn Error + 'static)> {
387 match self {
388 Self::List(e) => Some(e),
389 Self::Load(e) => Some(e),
390 Self::Store(e) => Some(e),
391 }
392 }
393}
394
395pub fn transfer<'a, BS, BT, HS, HT, Handles>(
397 source: &'a BS,
398 target: &'a mut BT,
399 handles: Handles,
400) -> impl Iterator<
401 Item = Result<
402 (
403 Value<Handle<HS, UnknownBlob>>,
404 Value<Handle<HT, UnknownBlob>>,
405 ),
406 TransferError<
407 Infallible,
408 <BS as BlobStoreGet<HS>>::GetError<Infallible>,
409 <BT as BlobStorePut<HT>>::PutError,
410 >,
411 >,
412> + 'a
413where
414 BS: BlobStoreGet<HS> + 'a,
415 BT: BlobStorePut<HT> + 'a,
416 HS: 'static + HashProtocol,
417 HT: 'static + HashProtocol,
418 Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
419 Handles::IntoIter: 'a,
420{
421 handles.into_iter().map(move |source_handle| {
422 let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
423
424 Ok((
425 source_handle,
426 (target.put(blob).map_err(TransferError::Store)?),
427 ))
428 })
429}
430
431pub struct ReachableHandles<'a, BS, H>
433where
434 BS: BlobStoreGet<H>,
435 H: 'static + HashProtocol,
436{
437 source: &'a BS,
438 queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
439 visited: HashSet<[u8; VALUE_LEN]>,
440}
441
442impl<'a, BS, H> ReachableHandles<'a, BS, H>
443where
444 BS: BlobStoreGet<H>,
445 H: 'static + HashProtocol,
446{
447 fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
448 let mut queue = VecDeque::new();
449 for handle in roots {
450 queue.push_back(handle);
451 }
452
453 Self {
454 source,
455 queue,
456 visited: HashSet::new(),
457 }
458 }
459
460 fn enqueue_from_blob(&mut self, blob: &Blob<UnknownBlob>) {
461 let bytes = blob.bytes.as_ref();
462 let mut offset = 0usize;
463
464 while offset + VALUE_LEN <= bytes.len() {
465 let mut raw = [0u8; VALUE_LEN];
466 raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
467
468 if !self.visited.contains(&raw) {
469 let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
470 if self
471 .source
472 .get::<anybytes::Bytes, UnknownBlob>(candidate)
473 .is_ok()
474 {
475 self.queue.push_back(candidate);
476 }
477 }
478
479 offset += VALUE_LEN;
480 }
481 }
482}
483
484impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
485where
486 BS: BlobStoreGet<H>,
487 H: 'static + HashProtocol,
488{
489 type Item = Value<Handle<H, UnknownBlob>>;
490
491 fn next(&mut self) -> Option<Self::Item> {
492 while let Some(handle) = self.queue.pop_front() {
493 let raw = handle.raw;
494
495 if !self.visited.insert(raw) {
496 continue;
497 }
498
499 if let Ok(blob) = self.source.get(handle) {
500 self.enqueue_from_blob(&blob);
501 }
502
503 return Some(handle);
504 }
505
506 None
507 }
508}
509
510pub fn reachable<'a, BS, H>(
512 source: &'a BS,
513 roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
514) -> ReachableHandles<'a, BS, H>
515where
516 BS: BlobStoreGet<H>,
517 H: 'static + HashProtocol,
518{
519 ReachableHandles::new(source, roots)
520}
521
522pub fn potential_handles<'a, H>(
529 set: &'a TribleSet,
530) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
531where
532 H: HashProtocol,
533{
534 set.vae.iter().map(|raw| {
535 let mut value = [0u8; VALUE_LEN];
536 value.copy_from_slice(&raw[0..VALUE_LEN]);
537 Value::<Handle<H, UnknownBlob>>::new(value)
538 })
539}
540
541#[derive(Debug)]
544pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
545 ContentStorageError(BlobErr),
547 CommitStorageError(BlobErr),
549}
550
551impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
552 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
553 match self {
554 CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
555 CreateCommitError::CommitStorageError(e) => {
556 write!(f, "Commit metadata storage failed: {e}")
557 }
558 }
559 }
560}
561
562impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
563 fn source(&self) -> Option<&(dyn Error + 'static)> {
564 match self {
565 CreateCommitError::ContentStorageError(e) => Some(e),
566 CreateCommitError::CommitStorageError(e) => Some(e),
567 }
568 }
569}
570
571#[derive(Debug)]
572pub enum MergeError {
573 DifferentRepos(),
575}
576
577#[derive(Debug)]
578pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
579 StorageBranches(Storage::BranchesError),
581 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
583 StorageGet(
585 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
586 ),
587 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
589 BranchUpdate(Storage::UpdateError),
591 BadBranchMetadata(),
593 MergeError(MergeError),
595}
596
597impl<Storage> From<MergeError> for PushError<Storage>
602where
603 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
604{
605 fn from(e: MergeError) -> Self {
606 PushError::MergeError(e)
607 }
608}
609
610#[derive(Debug)]
617pub enum BranchError<Storage>
618where
619 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
620{
621 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
623 StorageGet(
625 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
626 ),
627 StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
629 BranchHead(Storage::HeadError),
631 BranchUpdate(Storage::UpdateError),
633 AlreadyExists(),
635 BranchNotFound(Id),
637}
638
639#[derive(Debug)]
640pub enum LookupError<Storage>
641where
642 Storage: BranchStore<Blake3> + BlobStore<Blake3>,
643{
644 StorageBranches(Storage::BranchesError),
645 BranchHead(Storage::HeadError),
646 StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
647 StorageGet(
648 <<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
649 ),
650 NameConflict(Vec<Id>),
652 BadBranchMetadata(),
653}
654
655pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
662 storage: Storage,
663 signing_key: SigningKey,
664 default_metadata: Option<MetadataHandle>,
665}
666
667pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
668where
669 BranchStorageErr: Error,
670 BlobReaderErr: Error,
671 BlobStorageErr: Error,
672{
673 BranchNotFound(Id),
675 BranchStorage(BranchStorageErr),
677 BlobReader(BlobReaderErr),
679 BlobStorage(BlobStorageErr),
681 BadBranchMetadata(),
683}
684
685impl<B, R, C> fmt::Debug for PullError<B, R, C>
686where
687 B: Error + fmt::Debug,
688 R: Error + fmt::Debug,
689 C: Error + fmt::Debug,
690{
691 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
692 match self {
693 PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
694 PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
695 PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
696 PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
697 PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
698 }
699 }
700}
701
702impl<Storage> Repository<Storage>
703where
704 Storage: BlobStore<Blake3> + BranchStore<Blake3>,
705{
706 pub fn new(storage: Storage, signing_key: SigningKey) -> Self {
717 Self {
718 storage,
719 signing_key,
720 default_metadata: None,
721 }
722 }
723
724 pub fn into_storage(self) -> Storage {
730 self.storage
731 }
732
733 pub fn storage(&self) -> &Storage {
735 &self.storage
736 }
737
738 pub fn storage_mut(&mut self) -> &mut Storage {
740 &mut self.storage
741 }
742
743 pub fn set_signing_key(&mut self, signing_key: SigningKey) {
745 self.signing_key = signing_key;
746 }
747
748 pub fn set_default_metadata(
751 &mut self,
752 metadata_set: TribleSet,
753 ) -> Result<MetadataHandle, <Storage as BlobStorePut<Blake3>>::PutError> {
754 let handle = self.storage.put(metadata_set)?;
755 self.default_metadata = Some(handle);
756 Ok(handle)
757 }
758
759 pub fn clear_default_metadata(&mut self) {
761 self.default_metadata = None;
762 }
763
764 pub fn default_metadata(&self) -> Option<MetadataHandle> {
766 self.default_metadata
767 }
768
769 pub fn create_branch(
783 &mut self,
784 branch_name: &str,
785 commit: Option<CommitHandle>,
786 ) -> Result<ExclusiveId, BranchError<Storage>> {
787 self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
788 }
789
790 pub fn create_branch_with_key(
792 &mut self,
793 branch_name: &str,
794 commit: Option<CommitHandle>,
795 signing_key: SigningKey,
796 ) -> Result<ExclusiveId, BranchError<Storage>> {
797 let branch_id = genid();
798 let name_blob = branch_name.to_owned().to_blob();
799 let name_handle = name_blob.get_handle::<Blake3>();
800 self.storage
801 .put(name_blob)
802 .map_err(|e| BranchError::StoragePut(e))?;
803
804 let branch_set = if let Some(commit) = commit {
805 let reader = self
806 .storage
807 .reader()
808 .map_err(|e| BranchError::StorageReader(e))?;
809 let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
810
811 branch::branch_metadata(&signing_key, *branch_id, name_handle, Some(set.to_blob()))
812 } else {
813 branch::branch_unsigned(*branch_id, name_handle, None)
814 };
815
816 let branch_blob = branch_set.to_blob();
817 let branch_handle = self
818 .storage
819 .put(branch_blob)
820 .map_err(|e| BranchError::StoragePut(e))?;
821 let push_result = self
822 .storage
823 .update(*branch_id, None, Some(branch_handle))
824 .map_err(|e| BranchError::BranchUpdate(e))?;
825
826 match push_result {
827 PushResult::Success() => Ok(branch_id),
828 PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
829 }
830 }
831
832 pub fn pull(
835 &mut self,
836 branch_id: Id,
837 ) -> Result<
838 Workspace<Storage>,
839 PullError<
840 Storage::HeadError,
841 Storage::ReaderError,
842 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
843 >,
844 > {
845 self.pull_with_key(branch_id, self.signing_key.clone())
846 }
847
848 pub fn pull_with_key(
850 &mut self,
851 branch_id: Id,
852 signing_key: SigningKey,
853 ) -> Result<
854 Workspace<Storage>,
855 PullError<
856 Storage::HeadError,
857 Storage::ReaderError,
858 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
859 >,
860 > {
861 let base_branch_meta_handle = match self.storage.head(branch_id) {
863 Ok(Some(handle)) => handle,
864 Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
865 Err(e) => return Err(PullError::BranchStorage(e)),
866 };
867 let reader = self.storage.reader().map_err(PullError::BlobReader)?;
869 let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
870 Ok(meta_set) => meta_set,
871 Err(e) => return Err(PullError::BlobStorage(e)),
872 };
873
874 let head_ = match find!(
875 (head_: Value<_>),
876 pattern!(&base_branch_meta, [{ head: ?head_ }])
877 )
878 .at_most_one()
879 {
880 Ok(Some((h,))) => Some(h),
881 Ok(None) => None,
882 Err(_) => return Err(PullError::BadBranchMetadata()),
883 };
884 let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
886 Ok(Workspace {
887 base_blobs,
888 local_blobs: MemoryBlobStore::new(),
889 head: head_,
890 base_head: head_,
891 base_branch_id: branch_id,
892 base_branch_meta: base_branch_meta_handle,
893 signing_key,
894 default_metadata: self.default_metadata,
895 })
896 }
897
898 pub fn pull_with_metadata(
900 &mut self,
901 branch_id: Id,
902 metadata_set: TribleSet,
903 ) -> Result<
904 Workspace<Storage>,
905 PullError<
906 Storage::HeadError,
907 Storage::ReaderError,
908 <Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
909 >,
910 > {
911 let mut workspace = self.pull_with_key(branch_id, self.signing_key.clone())?;
912 workspace.set_default_metadata(metadata_set);
913 Ok(workspace)
914 }
915
916 pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
920 while let Some(mut conflict_ws) = self.try_push(workspace)? {
925 conflict_ws.merge(workspace)?;
929
930 *workspace = conflict_ws;
935 }
936
937 Ok(())
938 }
939
940 pub fn try_push(
944 &mut self,
945 workspace: &mut Workspace<Storage>,
946 ) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
947 let workspace_reader = workspace.local_blobs.reader().unwrap();
949 for handle in workspace_reader.blobs() {
950 let handle = handle.expect("infallible blob enumeration");
951 let blob: Blob<UnknownBlob> =
952 workspace_reader.get(handle).expect("infallible blob read");
953 self.storage.put(blob).map_err(PushError::StoragePut)?;
954 }
955
956 if workspace.base_head == workspace.head {
961 return Ok(None);
962 }
963
964 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
966 let base_branch_meta: TribleSet = repo_reader
967 .get(workspace.base_branch_meta)
968 .map_err(PushError::StorageGet)?;
969
970 let Ok((branch_name,)) = find!(
971 (name: Value<Handle<Blake3, LongString>>),
972 pattern!(base_branch_meta, [{ crate::metadata::name: ?name }])
973 )
974 .exactly_one() else {
975 return Err(PushError::BadBranchMetadata());
976 };
977
978 let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
979 let head_: TribleSet = repo_reader
980 .get(head_handle)
981 .map_err(PushError::StorageGet)?;
982
983 let branch_meta = branch_metadata(
984 &workspace.signing_key,
985 workspace.base_branch_id,
986 branch_name,
987 Some(head_.to_blob()),
988 );
989
990 let branch_meta_handle = self
991 .storage
992 .put(branch_meta)
993 .map_err(PushError::StoragePut)?;
994
995 let result = self
997 .storage
998 .update(
999 workspace.base_branch_id,
1000 Some(workspace.base_branch_meta),
1001 Some(branch_meta_handle),
1002 )
1003 .map_err(PushError::BranchUpdate)?;
1004
1005 match result {
1006 PushResult::Success() => {
1007 workspace.base_branch_meta = branch_meta_handle;
1010 workspace.base_head = workspace.head;
1011 workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
1014 workspace.local_blobs = MemoryBlobStore::new();
1018 Ok(None)
1019 }
1020 PushResult::Conflict(conflicting_meta) => {
1021 let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
1022
1023 let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
1024 let branch_meta: TribleSet = repo_reader
1025 .get(conflicting_meta)
1026 .map_err(PushError::StorageGet)?;
1027
1028 let head_ = match find!((head_: Value<_>),
1029 pattern!(&branch_meta, [{ head: ?head_ }])
1030 )
1031 .at_most_one()
1032 {
1033 Ok(Some((h,))) => Some(h),
1034 Ok(None) => None,
1035 Err(_) => return Err(PushError::BadBranchMetadata()),
1036 };
1037
1038 let conflict_ws = Workspace {
1039 base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
1040 local_blobs: MemoryBlobStore::new(),
1041 head: head_,
1042 base_head: head_,
1043 base_branch_id: workspace.base_branch_id,
1044 base_branch_meta: conflicting_meta,
1045 signing_key: workspace.signing_key.clone(),
1046 default_metadata: workspace.default_metadata,
1047 };
1048
1049 Ok(Some(conflict_ws))
1050 }
1051 }
1052 }
1053}
1054
1055type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
1056type MetadataHandle = Value<Handle<Blake3, SimpleArchive>>;
1057type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
1058type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
1059
1060pub struct Workspace<Blobs: BlobStore<Blake3>> {
1064 local_blobs: MemoryBlobStore<Blake3>,
1066 base_blobs: Blobs::Reader,
1068 base_branch_id: Id,
1070 base_branch_meta: BranchMetaHandle,
1072 head: Option<CommitHandle>,
1074 base_head: Option<CommitHandle>,
1080 signing_key: SigningKey,
1082 default_metadata: Option<MetadataHandle>,
1084}
1085
1086impl<Blobs> fmt::Debug for Workspace<Blobs>
1087where
1088 Blobs: BlobStore<Blake3>,
1089 Blobs::Reader: fmt::Debug,
1090{
1091 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1092 f.debug_struct("Workspace")
1093 .field("local_blobs", &self.local_blobs)
1094 .field("base_blobs", &self.base_blobs)
1095 .field("base_branch_id", &self.base_branch_id)
1096 .field("base_branch_meta", &self.base_branch_meta)
1097 .field("base_head", &self.base_head)
1098 .field("head", &self.head)
1099 .field("default_metadata", &self.default_metadata)
1100 .finish()
1101 }
1102}
1103
1104pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
1106 fn select(
1107 self,
1108 ws: &mut Workspace<Blobs>,
1109 ) -> Result<
1110 CommitSet,
1111 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1112 >;
1113}
1114
1115pub struct Ancestors(pub CommitHandle);
1117
1118pub fn ancestors(commit: CommitHandle) -> Ancestors {
1120 Ancestors(commit)
1121}
1122
1123pub struct NthAncestor(pub CommitHandle, pub usize);
1125
1126pub fn nth_ancestor(commit: CommitHandle, n: usize) -> NthAncestor {
1128 NthAncestor(commit, n)
1129}
1130
1131pub struct Parents(pub CommitHandle);
1133
1134pub fn parents(commit: CommitHandle) -> Parents {
1136 Parents(commit)
1137}
1138
1139pub struct SymmetricDiff(pub CommitHandle, pub CommitHandle);
1142
1143pub fn symmetric_diff(a: CommitHandle, b: CommitHandle) -> SymmetricDiff {
1145 SymmetricDiff(a, b)
1146}
1147
1148pub struct Union<A, B> {
1150 left: A,
1151 right: B,
1152}
1153
1154pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
1156 Union { left, right }
1157}
1158
1159pub struct Intersect<A, B> {
1161 left: A,
1162 right: B,
1163}
1164
1165pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
1167 Intersect { left, right }
1168}
1169
1170pub struct Difference<A, B> {
1173 left: A,
1174 right: B,
1175}
1176
1177pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
1179 Difference { left, right }
1180}
1181
1182pub struct TimeRange(pub Epoch, pub Epoch);
1184
1185pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
1187 TimeRange(start, end)
1188}
1189
1190pub struct Filter<S, F> {
1192 selector: S,
1193 filter: F,
1194}
1195
1196pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
1198 Filter { selector, filter }
1199}
1200
1201impl<Blobs> CommitSelector<Blobs> for CommitHandle
1202where
1203 Blobs: BlobStore<Blake3>,
1204{
1205 fn select(
1206 self,
1207 _ws: &mut Workspace<Blobs>,
1208 ) -> Result<
1209 CommitSet,
1210 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1211 > {
1212 let mut patch = CommitSet::new();
1213 patch.insert(&Entry::new(&self.raw));
1214 Ok(patch)
1215 }
1216}
1217
1218impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
1219where
1220 Blobs: BlobStore<Blake3>,
1221{
1222 fn select(
1223 self,
1224 _ws: &mut Workspace<Blobs>,
1225 ) -> Result<
1226 CommitSet,
1227 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1228 > {
1229 let mut patch = CommitSet::new();
1230 for handle in self {
1231 patch.insert(&Entry::new(&handle.raw));
1232 }
1233 Ok(patch)
1234 }
1235}
1236
1237impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
1238where
1239 Blobs: BlobStore<Blake3>,
1240{
1241 fn select(
1242 self,
1243 _ws: &mut Workspace<Blobs>,
1244 ) -> Result<
1245 CommitSet,
1246 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1247 > {
1248 let mut patch = CommitSet::new();
1249 for handle in self {
1250 patch.insert(&Entry::new(&handle.raw));
1251 }
1252 Ok(patch)
1253 }
1254}
1255
1256impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
1257where
1258 Blobs: BlobStore<Blake3>,
1259{
1260 fn select(
1261 self,
1262 _ws: &mut Workspace<Blobs>,
1263 ) -> Result<
1264 CommitSet,
1265 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1266 > {
1267 let mut patch = CommitSet::new();
1268 if let Some(handle) = self {
1269 patch.insert(&Entry::new(&handle.raw));
1270 }
1271 Ok(patch)
1272 }
1273}
1274
1275impl<Blobs> CommitSelector<Blobs> for Ancestors
1276where
1277 Blobs: BlobStore<Blake3>,
1278{
1279 fn select(
1280 self,
1281 ws: &mut Workspace<Blobs>,
1282 ) -> Result<
1283 CommitSet,
1284 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1285 > {
1286 collect_reachable(ws, self.0)
1287 }
1288}
1289
1290impl<Blobs> CommitSelector<Blobs> for NthAncestor
1291where
1292 Blobs: BlobStore<Blake3>,
1293{
1294 fn select(
1295 self,
1296 ws: &mut Workspace<Blobs>,
1297 ) -> Result<
1298 CommitSet,
1299 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1300 > {
1301 let mut current = self.0;
1302 let mut remaining = self.1;
1303
1304 while remaining > 0 {
1305 let meta: TribleSet = ws.get(current).map_err(WorkspaceCheckoutError::Storage)?;
1306 let mut parents = find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }]));
1307 let Some((p,)) = parents.next() else {
1308 return Ok(CommitSet::new());
1309 };
1310 current = p;
1311 remaining -= 1;
1312 }
1313
1314 let mut patch = CommitSet::new();
1315 patch.insert(&Entry::new(¤t.raw));
1316 Ok(patch)
1317 }
1318}
1319
1320impl<Blobs> CommitSelector<Blobs> for Parents
1321where
1322 Blobs: BlobStore<Blake3>,
1323{
1324 fn select(
1325 self,
1326 ws: &mut Workspace<Blobs>,
1327 ) -> Result<
1328 CommitSet,
1329 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1330 > {
1331 let meta: TribleSet = ws.get(self.0).map_err(WorkspaceCheckoutError::Storage)?;
1332 let mut result = CommitSet::new();
1333 for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
1334 result.insert(&Entry::new(&p.raw));
1335 }
1336 Ok(result)
1337 }
1338}
1339
1340impl<Blobs> CommitSelector<Blobs> for SymmetricDiff
1341where
1342 Blobs: BlobStore<Blake3>,
1343{
1344 fn select(
1345 self,
1346 ws: &mut Workspace<Blobs>,
1347 ) -> Result<
1348 CommitSet,
1349 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1350 > {
1351 let a = collect_reachable(ws, self.0)?;
1352 let b = collect_reachable(ws, self.1)?;
1353 let inter = a.intersect(&b);
1354 let mut union = a;
1355 union.union(b);
1356 Ok(union.difference(&inter))
1357 }
1358}
1359
1360impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
1361where
1362 A: CommitSelector<Blobs>,
1363 B: CommitSelector<Blobs>,
1364 Blobs: BlobStore<Blake3>,
1365{
1366 fn select(
1367 self,
1368 ws: &mut Workspace<Blobs>,
1369 ) -> Result<
1370 CommitSet,
1371 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1372 > {
1373 let mut left = self.left.select(ws)?;
1374 let right = self.right.select(ws)?;
1375 left.union(right);
1376 Ok(left)
1377 }
1378}
1379
1380impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
1381where
1382 A: CommitSelector<Blobs>,
1383 B: CommitSelector<Blobs>,
1384 Blobs: BlobStore<Blake3>,
1385{
1386 fn select(
1387 self,
1388 ws: &mut Workspace<Blobs>,
1389 ) -> Result<
1390 CommitSet,
1391 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1392 > {
1393 let left = self.left.select(ws)?;
1394 let right = self.right.select(ws)?;
1395 Ok(left.intersect(&right))
1396 }
1397}
1398
1399impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
1400where
1401 A: CommitSelector<Blobs>,
1402 B: CommitSelector<Blobs>,
1403 Blobs: BlobStore<Blake3>,
1404{
1405 fn select(
1406 self,
1407 ws: &mut Workspace<Blobs>,
1408 ) -> Result<
1409 CommitSet,
1410 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1411 > {
1412 let left = self.left.select(ws)?;
1413 let right = self.right.select(ws)?;
1414 Ok(left.difference(&right))
1415 }
1416}
1417
1418impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
1419where
1420 Blobs: BlobStore<Blake3>,
1421 S: CommitSelector<Blobs>,
1422 F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
1423{
1424 fn select(
1425 self,
1426 ws: &mut Workspace<Blobs>,
1427 ) -> Result<
1428 CommitSet,
1429 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1430 > {
1431 let patch = self.selector.select(ws)?;
1432 let mut result = CommitSet::new();
1433 let filter = self.filter;
1434 for raw in patch.iter() {
1435 let handle = Value::new(*raw);
1436 let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
1437
1438 let Ok((content_handle,)) = find!(
1439 (c: Value<_>),
1440 pattern!(&meta, [{ content: ?c }])
1441 )
1442 .exactly_one() else {
1443 return Err(WorkspaceCheckoutError::BadCommitMetadata());
1444 };
1445
1446 let payload: TribleSet = ws
1447 .get(content_handle)
1448 .map_err(WorkspaceCheckoutError::Storage)?;
1449
1450 if filter(&meta, &payload) {
1451 result.insert(&Entry::new(raw));
1452 }
1453 }
1454 Ok(result)
1455 }
1456}
1457
1458pub struct HistoryOf(pub Id);
1460
1461pub fn history_of(entity: Id) -> HistoryOf {
1463 HistoryOf(entity)
1464}
1465
1466impl<Blobs> CommitSelector<Blobs> for HistoryOf
1467where
1468 Blobs: BlobStore<Blake3>,
1469{
1470 fn select(
1471 self,
1472 ws: &mut Workspace<Blobs>,
1473 ) -> Result<
1474 CommitSet,
1475 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1476 > {
1477 let Some(head_) = ws.head else {
1478 return Ok(CommitSet::new());
1479 };
1480 let entity = self.0;
1481 filter(
1482 ancestors(head_),
1483 move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
1484 )
1485 .select(ws)
1486 }
1487}
1488
1489fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
1497 ws: &mut Workspace<Blobs>,
1498 patch: CommitSet,
1499) -> Result<
1500 CommitSet,
1501 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1502> {
1503 let mut result = CommitSet::new();
1504 for raw in patch.iter() {
1505 let handle = Value::new(*raw);
1506 let reach = collect_reachable(ws, handle)?;
1507 result.union(reach);
1508 }
1509 Ok(result)
1510}
1511
1512fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
1513 ws: &mut Workspace<Blobs>,
1514 seeds: CommitSet,
1515 stop: &CommitSet,
1516) -> Result<
1517 CommitSet,
1518 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1519> {
1520 let mut visited = HashSet::new();
1521 let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
1522 let mut result = CommitSet::new();
1523
1524 while let Some(commit) = stack.pop() {
1525 if !visited.insert(commit) {
1526 continue;
1527 }
1528
1529 if stop.get(&commit.raw).is_some() {
1530 continue;
1531 }
1532
1533 result.insert(&Entry::new(&commit.raw));
1534
1535 let meta: TribleSet = ws
1536 .local_blobs
1537 .reader()
1538 .unwrap()
1539 .get(commit)
1540 .or_else(|_| ws.base_blobs.get(commit))
1541 .map_err(WorkspaceCheckoutError::Storage)?;
1542
1543 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
1544 stack.push(p);
1545 }
1546 }
1547
1548 Ok(result)
1549}
1550
1551impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
1552where
1553 T: CommitSelector<Blobs>,
1554 Blobs: BlobStore<Blake3>,
1555{
1556 fn select(
1557 self,
1558 ws: &mut Workspace<Blobs>,
1559 ) -> Result<
1560 CommitSet,
1561 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1562 > {
1563 let end_patch = self.end.select(ws)?;
1564 let start_patch = self.start.select(ws)?;
1565
1566 collect_reachable_from_patch_until(ws, end_patch, &start_patch)
1567 }
1568}
1569
1570impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
1571where
1572 T: CommitSelector<Blobs>,
1573 Blobs: BlobStore<Blake3>,
1574{
1575 fn select(
1576 self,
1577 ws: &mut Workspace<Blobs>,
1578 ) -> Result<
1579 CommitSet,
1580 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1581 > {
1582 let Some(head_) = ws.head else {
1583 return Ok(CommitSet::new());
1584 };
1585 let exclude_patch = self.start.select(ws)?;
1586
1587 let mut head_patch = CommitSet::new();
1588 head_patch.insert(&Entry::new(&head_.raw));
1589
1590 collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
1591 }
1592}
1593
1594impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
1595where
1596 T: CommitSelector<Blobs>,
1597 Blobs: BlobStore<Blake3>,
1598{
1599 fn select(
1600 self,
1601 ws: &mut Workspace<Blobs>,
1602 ) -> Result<
1603 CommitSet,
1604 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1605 > {
1606 let end_patch = self.end.select(ws)?;
1607 collect_reachable_from_patch(ws, end_patch)
1608 }
1609}
1610
1611impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
1612where
1613 Blobs: BlobStore<Blake3>,
1614{
1615 fn select(
1616 self,
1617 ws: &mut Workspace<Blobs>,
1618 ) -> Result<
1619 CommitSet,
1620 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1621 > {
1622 let Some(head_) = ws.head else {
1623 return Ok(CommitSet::new());
1624 };
1625 collect_reachable(ws, head_)
1626 }
1627}
1628
1629impl<Blobs> CommitSelector<Blobs> for TimeRange
1630where
1631 Blobs: BlobStore<Blake3>,
1632{
1633 fn select(
1634 self,
1635 ws: &mut Workspace<Blobs>,
1636 ) -> Result<
1637 CommitSet,
1638 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1639 > {
1640 let Some(head_) = ws.head else {
1641 return Ok(CommitSet::new());
1642 };
1643 let start = self.0;
1644 let end = self.1;
1645 filter(
1646 ancestors(head_),
1647 move |meta: &TribleSet, _payload: &TribleSet| {
1648 if let Ok(Some((ts,))) =
1649 find!((t: Value<_>), pattern!(meta, [{ timestamp: ?t }])).at_most_one()
1650 {
1651 let (ts_start, ts_end): (Epoch, Epoch) =
1652 crate::value::FromValue::from_value(&ts);
1653 ts_start <= end && ts_end >= start
1654 } else {
1655 false
1656 }
1657 },
1658 )
1659 .select(ws)
1660 }
1661}
1662
1663impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
1664 pub fn branch_id(&self) -> Id {
1666 self.base_branch_id
1667 }
1668
1669 pub fn head(&self) -> Option<CommitHandle> {
1671 self.head
1672 }
1673
1674 pub fn set_default_metadata(&mut self, metadata_set: TribleSet) -> MetadataHandle {
1677 let handle = self
1678 .local_blobs
1679 .put(metadata_set)
1680 .expect("infallible metadata blob put");
1681 self.default_metadata = Some(handle);
1682 handle
1683 }
1684
1685 pub fn clear_default_metadata(&mut self) {
1687 self.default_metadata = None;
1688 }
1689
1690 pub fn default_metadata(&self) -> Option<MetadataHandle> {
1692 self.default_metadata
1693 }
1694
1695 pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
1698 where
1699 S: BlobSchema + 'static,
1700 T: ToBlob<S>,
1701 Handle<Blake3, S>: ValueSchema,
1702 {
1703 self.local_blobs.put(item).expect("infallible blob put")
1704 }
1705
1706 pub fn get<T, S>(
1711 &mut self,
1712 handle: Value<Handle<Blake3, S>>,
1713 ) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
1714 where
1715 S: BlobSchema + 'static,
1716 T: TryFromBlob<S>,
1717 Handle<Blake3, S>: ValueSchema,
1718 {
1719 self.local_blobs
1720 .reader()
1721 .unwrap()
1722 .get(handle)
1723 .or_else(|_| self.base_blobs.get(handle))
1724 }
1725
1726 pub fn commit(
1732 &mut self,
1733 content_: TribleSet,
1734 metadata_: Option<TribleSet>,
1735 message_: Option<&str>,
1736 ) {
1737 let metadata_handle = match metadata_ {
1738 Some(metadata_set) => Some(
1739 self.local_blobs
1740 .put(metadata_set)
1741 .expect("infallible metadata blob put"),
1742 ),
1743 None => self.default_metadata,
1744 };
1745 self.commit_internal(content_, metadata_handle, message_);
1746 }
1747
1748 fn commit_internal(
1749 &mut self,
1750 content_: TribleSet,
1751 metadata_handle: Option<MetadataHandle>,
1752 message_: Option<&str>,
1753 ) {
1754 let content_blob = content_.to_blob();
1756 let message_handle = message_.map(|m| self.put(m.to_string()));
1758 let parents = self.head.iter().copied();
1759
1760 let commit_set = crate::repo::commit::commit_metadata(
1761 &self.signing_key,
1762 parents,
1763 message_handle,
1764 Some(content_blob.clone()),
1765 metadata_handle,
1766 );
1767 let _ = self
1769 .local_blobs
1770 .put(content_blob)
1771 .expect("failed to put content blob");
1772 let commit_handle = self
1773 .local_blobs
1774 .put(commit_set)
1775 .expect("failed to put commit blob");
1776 self.head = Some(commit_handle);
1778 }
1779
1780 pub fn merge(&mut self, other: &mut Workspace<Blobs>) -> Result<CommitHandle, MergeError> {
1794 let other_local = other.local_blobs.reader().unwrap();
1796 for r in other_local.blobs() {
1797 let handle = r.expect("infallible blob enumeration");
1798 let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
1799
1800 self.local_blobs.put(blob).expect("infallible blob put");
1802 }
1803 let parents = self.head.iter().copied().chain(other.head.iter().copied());
1805 let merge_commit = commit_metadata(
1806 &self.signing_key,
1807 parents,
1808 None, None, None, );
1812 let commit_handle = self
1814 .local_blobs
1815 .put(merge_commit)
1816 .expect("failed to put merge commit blob");
1817 self.head = Some(commit_handle);
1818
1819 Ok(commit_handle)
1820 }
1821
1822 pub fn merge_commit(
1828 &mut self,
1829 other: Value<Handle<Blake3, SimpleArchive>>,
1830 ) -> Result<CommitHandle, MergeError> {
1831 let parents = self.head.iter().copied().chain(Some(other));
1838 let merge_commit = commit_metadata(&self.signing_key, parents, None, None, None);
1839 let commit_handle = self
1840 .local_blobs
1841 .put(merge_commit)
1842 .expect("failed to put merge commit blob");
1843 self.head = Some(commit_handle);
1844 Ok(commit_handle)
1845 }
1846
1847 fn checkout_commits<I>(
1854 &mut self,
1855 commits: I,
1856 ) -> Result<
1857 TribleSet,
1858 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1859 >
1860 where
1861 I: IntoIterator<Item = CommitHandle>,
1862 {
1863 let local = self.local_blobs.reader().unwrap();
1864 let mut result = TribleSet::new();
1865 for commit in commits {
1866 let meta: TribleSet = local
1867 .get(commit)
1868 .or_else(|_| self.base_blobs.get(commit))
1869 .map_err(WorkspaceCheckoutError::Storage)?;
1870
1871 let content_opt =
1876 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1877 Ok(Some((c,))) => Some(c),
1878 Ok(None) => None,
1879 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1880 };
1881
1882 if let Some(c) = content_opt {
1883 let set: TribleSet = local
1884 .get(c)
1885 .or_else(|_| self.base_blobs.get(c))
1886 .map_err(WorkspaceCheckoutError::Storage)?;
1887 result.union(set);
1888 } else {
1889 continue;
1891 }
1892 }
1893 Ok(result)
1894 }
1895
1896 fn checkout_commits_metadata<I>(
1897 &mut self,
1898 commits: I,
1899 ) -> Result<
1900 TribleSet,
1901 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1902 >
1903 where
1904 I: IntoIterator<Item = CommitHandle>,
1905 {
1906 let local = self.local_blobs.reader().unwrap();
1907 let mut result = TribleSet::new();
1908 for commit in commits {
1909 let meta: TribleSet = local
1910 .get(commit)
1911 .or_else(|_| self.base_blobs.get(commit))
1912 .map_err(WorkspaceCheckoutError::Storage)?;
1913
1914 let metadata_opt =
1915 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
1916 Ok(Some((c,))) => Some(c),
1917 Ok(None) => None,
1918 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1919 };
1920
1921 if let Some(c) = metadata_opt {
1922 let set: TribleSet = local
1923 .get(c)
1924 .or_else(|_| self.base_blobs.get(c))
1925 .map_err(WorkspaceCheckoutError::Storage)?;
1926 result.union(set);
1927 }
1928 }
1929 Ok(result)
1930 }
1931
1932 fn checkout_commits_with_metadata<I>(
1933 &mut self,
1934 commits: I,
1935 ) -> Result<
1936 (TribleSet, TribleSet),
1937 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1938 >
1939 where
1940 I: IntoIterator<Item = CommitHandle>,
1941 {
1942 let local = self.local_blobs.reader().unwrap();
1943 let mut data = TribleSet::new();
1944 let mut metadata_set = TribleSet::new();
1945 for commit in commits {
1946 let meta: TribleSet = local
1947 .get(commit)
1948 .or_else(|_| self.base_blobs.get(commit))
1949 .map_err(WorkspaceCheckoutError::Storage)?;
1950
1951 let content_opt =
1952 match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
1953 Ok(Some((c,))) => Some(c),
1954 Ok(None) => None,
1955 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1956 };
1957
1958 if let Some(c) = content_opt {
1959 let set: TribleSet = local
1960 .get(c)
1961 .or_else(|_| self.base_blobs.get(c))
1962 .map_err(WorkspaceCheckoutError::Storage)?;
1963 data.union(set);
1964 }
1965
1966 let metadata_opt =
1967 match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
1968 Ok(Some((c,))) => Some(c),
1969 Ok(None) => None,
1970 Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
1971 };
1972
1973 if let Some(c) = metadata_opt {
1974 let set: TribleSet = local
1975 .get(c)
1976 .or_else(|_| self.base_blobs.get(c))
1977 .map_err(WorkspaceCheckoutError::Storage)?;
1978 metadata_set.union(set);
1979 }
1980 }
1981 Ok((data, metadata_set))
1982 }
1983
1984 pub fn checkout<R>(
1988 &mut self,
1989 spec: R,
1990 ) -> Result<
1991 TribleSet,
1992 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
1993 >
1994 where
1995 R: CommitSelector<Blobs>,
1996 {
1997 let patch = spec.select(self)?;
1998 let commits = patch.iter().map(|raw| Value::new(*raw));
1999 self.checkout_commits(commits)
2000 }
2001
2002 pub fn checkout_metadata<R>(
2005 &mut self,
2006 spec: R,
2007 ) -> Result<
2008 TribleSet,
2009 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2010 >
2011 where
2012 R: CommitSelector<Blobs>,
2013 {
2014 let patch = spec.select(self)?;
2015 let commits = patch.iter().map(|raw| Value::new(*raw));
2016 self.checkout_commits_metadata(commits)
2017 }
2018
2019 pub fn checkout_with_metadata<R>(
2022 &mut self,
2023 spec: R,
2024 ) -> Result<
2025 (TribleSet, TribleSet),
2026 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2027 >
2028 where
2029 R: CommitSelector<Blobs>,
2030 {
2031 let patch = spec.select(self)?;
2032 let commits = patch.iter().map(|raw| Value::new(*raw));
2033 self.checkout_commits_with_metadata(commits)
2034 }
2035}
2036
2037#[derive(Debug)]
2038pub enum WorkspaceCheckoutError<GetErr: Error> {
2039 Storage(GetErr),
2041 BadCommitMetadata(),
2043}
2044
2045impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
2046 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2047 match self {
2048 WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
2049 WorkspaceCheckoutError::BadCommitMetadata() => {
2050 write!(f, "commit metadata malformed")
2051 }
2052 }
2053 }
2054}
2055
2056impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
2057
2058fn collect_reachable<Blobs: BlobStore<Blake3>>(
2059 ws: &mut Workspace<Blobs>,
2060 from: CommitHandle,
2061) -> Result<
2062 CommitSet,
2063 WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
2064> {
2065 let mut visited = HashSet::new();
2066 let mut stack = vec![from];
2067 let mut result = CommitSet::new();
2068
2069 while let Some(commit) = stack.pop() {
2070 if !visited.insert(commit) {
2071 continue;
2072 }
2073 result.insert(&Entry::new(&commit.raw));
2074
2075 let meta: TribleSet = ws
2076 .local_blobs
2077 .reader()
2078 .unwrap()
2079 .get(commit)
2080 .or_else(|_| ws.base_blobs.get(commit))
2081 .map_err(WorkspaceCheckoutError::Storage)?;
2082
2083 for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
2084 stack.push(p);
2085 }
2086 }
2087
2088 Ok(result)
2089}