#![allow(clippy::type_complexity)]
pub mod branch;
pub mod commit;
pub mod hybridstore;
pub mod memoryrepo;
#[cfg(feature = "object-store")]
pub mod objectstore;
pub mod pile;
pub trait StorageClose {
type Error: std::error::Error;
fn close(self) -> Result<(), Self::Error>;
}
impl<Storage> Repository<Storage>
where
Storage: BlobStore<Blake3> + BranchStore<Blake3> + StorageClose,
{
pub fn close(self) -> Result<(), <Storage as StorageClose>::Error> {
self.storage.close()
}
}
use crate::macros::pattern;
use std::collections::{HashSet, VecDeque};
use std::convert::Infallible;
use std::error::Error;
use std::fmt::Debug;
use std::fmt::{self};
use commit::commit_metadata;
use hifitime::Epoch;
use itertools::Itertools;
use crate::blob::schemas::simplearchive::UnarchiveError;
use crate::blob::schemas::UnknownBlob;
use crate::blob::Blob;
use crate::blob::BlobSchema;
use crate::blob::MemoryBlobStore;
use crate::blob::ToBlob;
use crate::blob::TryFromBlob;
use crate::find;
use crate::id::genid;
use crate::id::Id;
use crate::patch::Entry;
use crate::patch::IdentitySchema;
use crate::patch::PATCH;
use crate::prelude::valueschemas::GenId;
use crate::repo::branch::branch_metadata;
use crate::trible::TribleSet;
use crate::value::schemas::hash::Handle;
use crate::value::schemas::hash::HashProtocol;
use crate::value::Value;
use crate::value::ValueSchema;
use crate::value::VALUE_LEN;
use ed25519_dalek::SigningKey;
use crate::blob::schemas::longstring::LongString;
use crate::blob::schemas::simplearchive::SimpleArchive;
use crate::prelude::*;
use crate::value::schemas::ed25519 as ed;
use crate::value::schemas::hash::Blake3;
use crate::value::schemas::shortstring::ShortString;
use crate::value::schemas::time::NsTAIInterval;
attributes! {
"4DD4DDD05CC31734B03ABB4E43188B1F" as pub content: Handle<Blake3, SimpleArchive>;
"88B59BD497540AC5AECDB7518E737C87" as pub metadata: Handle<Blake3, SimpleArchive>;
"317044B612C690000D798CA660ECFD2A" as pub parent: Handle<Blake3, SimpleArchive>;
"B59D147839100B6ED4B165DF76EDF3BB" as pub message: Handle<Blake3, LongString>;
"12290C0BE0E9207E324F24DDE0D89300" as pub short_message: ShortString;
"272FBC56108F336C4D2E17289468C35F" as pub head: Handle<Blake3, SimpleArchive>;
"8694CC73AF96A5E1C7635C677D1B928A" as pub branch: GenId;
"71FF566AB4E3119FC2C5E66A18979586" as pub timestamp: NsTAIInterval;
"ADB4FFAD247C886848161297EFF5A05B" as pub signed_by: ed::ED25519PublicKey;
"9DF34F84959928F93A3C40AEB6E9E499" as pub signature_r: ed::ED25519RComponent;
"1ACE03BF70242B289FDF00E4327C3BC6" as pub signature_s: ed::ED25519SComponent;
}
pub trait BlobStoreList<H: HashProtocol> {
type Iter<'a>: Iterator<Item = Result<Value<Handle<H, UnknownBlob>>, Self::Err>>
where
Self: 'a;
type Err: Error + Debug + Send + Sync + 'static;
fn blobs<'a>(&'a self) -> Self::Iter<'a>;
fn blobs_diff<'a>(&'a self, _old: &Self) -> Self::Iter<'a> {
self.blobs()
}
}
#[derive(Debug, Clone)]
pub struct BlobMetadata {
pub timestamp: u64,
pub length: u64,
}
pub trait BlobStoreMeta<H: HashProtocol> {
type MetaError: std::error::Error + Send + Sync + 'static;
fn metadata<S>(
&self,
handle: Value<Handle<H, S>>,
) -> Result<Option<BlobMetadata>, Self::MetaError>
where
S: BlobSchema + 'static,
Handle<H, S>: ValueSchema;
}
pub trait BlobStoreForget<H: HashProtocol> {
type ForgetError: std::error::Error + Send + Sync + 'static;
fn forget<S>(&mut self, handle: Value<Handle<H, S>>) -> Result<(), Self::ForgetError>
where
S: BlobSchema + 'static,
Handle<H, S>: ValueSchema;
}
pub trait BlobStoreGet<H: HashProtocol> {
type GetError<E: std::error::Error + Send + Sync + 'static>: Error + Send + Sync + 'static;
fn get<T, S>(
&self,
handle: Value<Handle<H, S>>,
) -> Result<T, Self::GetError<<T as TryFromBlob<S>>::Error>>
where
S: BlobSchema + 'static,
T: TryFromBlob<S>,
Handle<H, S>: ValueSchema;
}
pub trait BlobStorePut<H: HashProtocol> {
type PutError: Error + Debug + Send + Sync + 'static;
fn put<S, T>(&mut self, item: T) -> Result<Value<Handle<H, S>>, Self::PutError>
where
S: BlobSchema + 'static,
T: ToBlob<S>,
Handle<H, S>: ValueSchema;
}
pub trait BlobStore<H: HashProtocol>: BlobStorePut<H> {
type Reader: BlobStoreGet<H> + BlobStoreList<H> + Clone + Send + PartialEq + Eq + 'static;
type ReaderError: Error + Debug + Send + Sync + 'static;
fn reader(&mut self) -> Result<Self::Reader, Self::ReaderError>;
}
pub trait BlobStoreKeep<H: HashProtocol> {
fn keep<I>(&mut self, handles: I)
where
I: IntoIterator<Item = Value<Handle<H, UnknownBlob>>>;
}
pub trait BlobChildren<H: HashProtocol>: BlobStoreGet<H> {
fn children(
&self,
handle: Value<Handle<H, UnknownBlob>>,
) -> Vec<Value<Handle<H, UnknownBlob>>> {
let Ok(blob) = self.get::<Blob<UnknownBlob>, UnknownBlob>(handle) else {
return Vec::new();
};
let bytes = blob.bytes.as_ref();
let mut result = Vec::new();
let mut offset = 0usize;
while offset + VALUE_LEN <= bytes.len() {
let mut raw = [0u8; VALUE_LEN];
raw.copy_from_slice(&bytes[offset..offset + VALUE_LEN]);
let candidate = Value::<Handle<H, UnknownBlob>>::new(raw);
if self.get::<anybytes::Bytes, UnknownBlob>(candidate).is_ok() {
result.push(candidate);
}
offset += VALUE_LEN;
}
result
}
}
#[derive(Debug)]
pub enum PushResult<H>
where
H: HashProtocol,
{
Success(),
Conflict(Option<Value<Handle<H, SimpleArchive>>>),
}
pub trait BranchStore<H: HashProtocol> {
type BranchesError: Error + Debug + Send + Sync + 'static;
type HeadError: Error + Debug + Send + Sync + 'static;
type UpdateError: Error + Debug + Send + Sync + 'static;
type ListIter<'a>: Iterator<Item = Result<Id, Self::BranchesError>>
where
Self: 'a;
fn branches<'a>(&'a mut self) -> Result<Self::ListIter<'a>, Self::BranchesError>;
fn head(&mut self, id: Id) -> Result<Option<Value<Handle<H, SimpleArchive>>>, Self::HeadError>;
fn update(
&mut self,
id: Id,
old: Option<Value<Handle<H, SimpleArchive>>>,
new: Option<Value<Handle<H, SimpleArchive>>>,
) -> Result<PushResult<H>, Self::UpdateError>;
}
#[derive(Debug)]
pub enum TransferError<ListErr, LoadErr, StoreErr> {
List(ListErr),
Load(LoadErr),
Store(StoreErr),
}
impl<ListErr, LoadErr, StoreErr> fmt::Display for TransferError<ListErr, LoadErr, StoreErr> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "failed to transfer blob")
}
}
impl<ListErr, LoadErr, StoreErr> Error for TransferError<ListErr, LoadErr, StoreErr>
where
ListErr: Debug + Error + 'static,
LoadErr: Debug + Error + 'static,
StoreErr: Debug + Error + 'static,
{
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::List(e) => Some(e),
Self::Load(e) => Some(e),
Self::Store(e) => Some(e),
}
}
}
pub fn transfer<'a, BS, BT, HS, HT, Handles>(
source: &'a BS,
target: &'a mut BT,
handles: Handles,
) -> impl Iterator<
Item = Result<
(
Value<Handle<HS, UnknownBlob>>,
Value<Handle<HT, UnknownBlob>>,
),
TransferError<
Infallible,
<BS as BlobStoreGet<HS>>::GetError<Infallible>,
<BT as BlobStorePut<HT>>::PutError,
>,
>,
> + 'a
where
BS: BlobStoreGet<HS> + 'a,
BT: BlobStorePut<HT> + 'a,
HS: 'static + HashProtocol,
HT: 'static + HashProtocol,
Handles: IntoIterator<Item = Value<Handle<HS, UnknownBlob>>> + 'a,
Handles::IntoIter: 'a,
{
handles.into_iter().map(move |source_handle| {
let blob: Blob<UnknownBlob> = source.get(source_handle).map_err(TransferError::Load)?;
Ok((
source_handle,
(target.put(blob).map_err(TransferError::Store)?),
))
})
}
pub struct ReachableHandles<'a, BS, H>
where
BS: BlobChildren<H>,
H: 'static + HashProtocol,
{
source: &'a BS,
queue: VecDeque<Value<Handle<H, UnknownBlob>>>,
visited: HashSet<[u8; VALUE_LEN]>,
}
impl<'a, BS, H> ReachableHandles<'a, BS, H>
where
BS: BlobChildren<H>,
H: 'static + HashProtocol,
{
fn new(source: &'a BS, roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>) -> Self {
let mut queue = VecDeque::new();
for handle in roots {
queue.push_back(handle);
}
Self {
source,
queue,
visited: HashSet::new(),
}
}
}
impl<'a, BS, H> Iterator for ReachableHandles<'a, BS, H>
where
BS: BlobChildren<H>,
H: 'static + HashProtocol,
{
type Item = Value<Handle<H, UnknownBlob>>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(handle) = self.queue.pop_front() {
let raw = handle.raw;
if !self.visited.insert(raw) {
continue;
}
for child in self.source.children(handle) {
if !self.visited.contains(&child.raw) {
self.queue.push_back(child);
}
}
return Some(handle);
}
None
}
}
pub fn reachable<'a, BS, H>(
source: &'a BS,
roots: impl IntoIterator<Item = Value<Handle<H, UnknownBlob>>>,
) -> ReachableHandles<'a, BS, H>
where
BS: BlobChildren<H>,
H: 'static + HashProtocol,
{
ReachableHandles::new(source, roots)
}
pub fn potential_handles<'a, H>(
set: &'a TribleSet,
) -> impl Iterator<Item = Value<Handle<H, UnknownBlob>>> + 'a
where
H: HashProtocol,
{
set.vae.iter().map(|raw| {
let mut value = [0u8; VALUE_LEN];
value.copy_from_slice(&raw[0..VALUE_LEN]);
Value::<Handle<H, UnknownBlob>>::new(value)
})
}
#[derive(Debug)]
pub enum CreateCommitError<BlobErr: Error + Debug + Send + Sync + 'static> {
ContentStorageError(BlobErr),
CommitStorageError(BlobErr),
}
impl<BlobErr: Error + Debug + Send + Sync + 'static> fmt::Display for CreateCommitError<BlobErr> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
CreateCommitError::ContentStorageError(e) => write!(f, "Content storage failed: {e}"),
CreateCommitError::CommitStorageError(e) => {
write!(f, "Commit metadata storage failed: {e}")
}
}
}
}
impl<BlobErr: Error + Debug + Send + Sync + 'static> Error for CreateCommitError<BlobErr> {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
CreateCommitError::ContentStorageError(e) => Some(e),
CreateCommitError::CommitStorageError(e) => Some(e),
}
}
}
#[derive(Debug)]
pub enum MergeError {
DifferentRepos(),
}
#[derive(Debug)]
pub enum PushError<Storage: BranchStore<Blake3> + BlobStore<Blake3>> {
StorageBranches(Storage::BranchesError),
StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
StorageGet(
<<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
),
StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
BranchUpdate(Storage::UpdateError),
BadBranchMetadata(),
MergeError(MergeError),
}
impl<Storage> From<MergeError> for PushError<Storage>
where
Storage: BranchStore<Blake3> + BlobStore<Blake3>,
{
fn from(e: MergeError) -> Self {
PushError::MergeError(e)
}
}
#[derive(Debug)]
pub enum BranchError<Storage>
where
Storage: BranchStore<Blake3> + BlobStore<Blake3>,
{
StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
StorageGet(
<<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
),
StoragePut(<Storage as BlobStorePut<Blake3>>::PutError),
BranchHead(Storage::HeadError),
BranchUpdate(Storage::UpdateError),
AlreadyExists(),
BranchNotFound(Id),
}
#[derive(Debug)]
pub enum LookupError<Storage>
where
Storage: BranchStore<Blake3> + BlobStore<Blake3>,
{
StorageBranches(Storage::BranchesError),
BranchHead(Storage::HeadError),
StorageReader(<Storage as BlobStore<Blake3>>::ReaderError),
StorageGet(
<<Storage as BlobStore<Blake3>>::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
),
NameConflict(Vec<Id>),
BadBranchMetadata(),
}
#[derive(Debug)]
pub enum EnsureBranchError<Storage>
where
Storage: BranchStore<Blake3> + BlobStore<Blake3>,
{
Lookup(LookupError<Storage>),
Create(BranchError<Storage>),
}
pub struct Repository<Storage: BlobStore<Blake3> + BranchStore<Blake3>> {
storage: Storage,
signing_key: SigningKey,
commit_metadata: MetadataHandle,
}
pub enum PullError<BranchStorageErr, BlobReaderErr, BlobStorageErr>
where
BranchStorageErr: Error,
BlobReaderErr: Error,
BlobStorageErr: Error,
{
BranchNotFound(Id),
BranchStorage(BranchStorageErr),
BlobReader(BlobReaderErr),
BlobStorage(BlobStorageErr),
BadBranchMetadata(),
}
impl<B, R, C> fmt::Debug for PullError<B, R, C>
where
B: Error + fmt::Debug,
R: Error + fmt::Debug,
C: Error + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PullError::BranchNotFound(id) => f.debug_tuple("BranchNotFound").field(id).finish(),
PullError::BranchStorage(e) => f.debug_tuple("BranchStorage").field(e).finish(),
PullError::BlobReader(e) => f.debug_tuple("BlobReader").field(e).finish(),
PullError::BlobStorage(e) => f.debug_tuple("BlobStorage").field(e).finish(),
PullError::BadBranchMetadata() => f.debug_tuple("BadBranchMetadata").finish(),
}
}
}
impl<Storage> Repository<Storage>
where
Storage: BlobStore<Blake3> + BranchStore<Blake3>,
{
pub fn new(
mut storage: Storage,
signing_key: SigningKey,
commit_metadata: TribleSet,
) -> Result<Self, <Storage as BlobStorePut<Blake3>>::PutError> {
let commit_metadata = storage.put(commit_metadata)?;
Ok(Self {
storage,
signing_key,
commit_metadata,
})
}
pub fn into_storage(self) -> Storage {
self.storage
}
pub fn storage(&self) -> &Storage {
&self.storage
}
pub fn storage_mut(&mut self) -> &mut Storage {
&mut self.storage
}
pub fn set_signing_key(&mut self, signing_key: SigningKey) {
self.signing_key = signing_key;
}
pub fn commit_metadata(&self) -> MetadataHandle {
self.commit_metadata
}
pub fn create_branch(
&mut self,
branch_name: &str,
commit: Option<CommitHandle>,
) -> Result<ExclusiveId, BranchError<Storage>> {
self.create_branch_with_key(branch_name, commit, self.signing_key.clone())
}
pub fn create_branch_with_key(
&mut self,
branch_name: &str,
commit: Option<CommitHandle>,
signing_key: SigningKey,
) -> Result<ExclusiveId, BranchError<Storage>> {
let branch_id = genid();
let name_blob = branch_name.to_owned().to_blob();
let name_handle = name_blob.get_handle::<Blake3>();
self.storage
.put(name_blob)
.map_err(|e| BranchError::StoragePut(e))?;
let branch_set = if let Some(commit) = commit {
let reader = self
.storage
.reader()
.map_err(|e| BranchError::StorageReader(e))?;
let set: TribleSet = reader.get(commit).map_err(|e| BranchError::StorageGet(e))?;
branch::branch_metadata(&signing_key, *branch_id, name_handle, Some(set.to_blob()))
} else {
branch::branch_unsigned(*branch_id, name_handle, None)
};
let branch_blob = branch_set.to_blob();
let branch_handle = self
.storage
.put(branch_blob)
.map_err(|e| BranchError::StoragePut(e))?;
let push_result = self
.storage
.update(*branch_id, None, Some(branch_handle))
.map_err(|e| BranchError::BranchUpdate(e))?;
match push_result {
PushResult::Success() => Ok(branch_id),
PushResult::Conflict(_) => Err(BranchError::AlreadyExists()),
}
}
pub fn lookup_branch(&mut self, name: &str) -> Result<Option<Id>, LookupError<Storage>> {
let branch_ids: Vec<Id> = self
.storage
.branches()
.map_err(LookupError::StorageBranches)?
.collect::<Result<Vec<_>, _>>()
.map_err(LookupError::StorageBranches)?;
let mut matches = Vec::new();
for branch_id in branch_ids {
let Some(meta_handle) = self
.storage
.head(branch_id)
.map_err(LookupError::BranchHead)?
else {
continue;
};
let reader = self.storage.reader().map_err(LookupError::StorageReader)?;
let meta_set: TribleSet = reader.get(meta_handle).map_err(LookupError::StorageGet)?;
let Ok((name_handle,)) = find!(
(n: Value<Handle<Blake3, LongString>>),
pattern!(&meta_set, [{ crate::metadata::name: ?n }])
)
.exactly_one() else {
continue;
};
let Ok(branch_name): Result<anybytes::View<str>, _> = reader.get(name_handle) else {
continue;
};
if branch_name.as_ref() == name {
matches.push(branch_id);
}
}
match matches.len() {
0 => Ok(None),
1 => Ok(Some(matches[0])),
_ => Err(LookupError::NameConflict(matches)),
}
}
pub fn ensure_branch(
&mut self,
name: &str,
commit: Option<CommitHandle>,
) -> Result<Id, EnsureBranchError<Storage>> {
match self
.lookup_branch(name)
.map_err(EnsureBranchError::Lookup)?
{
Some(id) => Ok(id),
None => {
let id = self
.create_branch(name, commit)
.map_err(EnsureBranchError::Create)?;
Ok(*id)
}
}
}
pub fn pull(
&mut self,
branch_id: Id,
) -> Result<
Workspace<Storage>,
PullError<
Storage::HeadError,
Storage::ReaderError,
<Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
>,
> {
self.pull_with_key(branch_id, self.signing_key.clone())
}
pub fn pull_with_key(
&mut self,
branch_id: Id,
signing_key: SigningKey,
) -> Result<
Workspace<Storage>,
PullError<
Storage::HeadError,
Storage::ReaderError,
<Storage::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>,
>,
> {
let base_branch_meta_handle = match self.storage.head(branch_id) {
Ok(Some(handle)) => handle,
Ok(None) => return Err(PullError::BranchNotFound(branch_id)),
Err(e) => return Err(PullError::BranchStorage(e)),
};
let reader = self.storage.reader().map_err(PullError::BlobReader)?;
let base_branch_meta: TribleSet = match reader.get(base_branch_meta_handle) {
Ok(meta_set) => meta_set,
Err(e) => return Err(PullError::BlobStorage(e)),
};
let head_ = match find!(
(head_: Value<_>),
pattern!(&base_branch_meta, [{ head: ?head_ }])
)
.at_most_one()
{
Ok(Some((h,))) => Some(h),
Ok(None) => None,
Err(_) => return Err(PullError::BadBranchMetadata()),
};
let base_blobs = self.storage.reader().map_err(PullError::BlobReader)?;
Ok(Workspace {
base_blobs,
local_blobs: MemoryBlobStore::new(),
head: head_,
base_head: head_,
base_branch_id: branch_id,
base_branch_meta: base_branch_meta_handle,
signing_key,
commit_metadata: self.commit_metadata,
})
}
pub fn push(&mut self, workspace: &mut Workspace<Storage>) -> Result<(), PushError<Storage>> {
while let Some(mut conflict_ws) = self.try_push(workspace)? {
conflict_ws.merge(workspace)?;
*workspace = conflict_ws;
}
Ok(())
}
pub fn try_push(
&mut self,
workspace: &mut Workspace<Storage>,
) -> Result<Option<Workspace<Storage>>, PushError<Storage>> {
let workspace_reader = workspace.local_blobs.reader().unwrap();
for handle in workspace_reader.blobs() {
let handle = handle.expect("infallible blob enumeration");
let blob: Blob<UnknownBlob> =
workspace_reader.get(handle).expect("infallible blob read");
self.storage.put(blob).map_err(PushError::StoragePut)?;
}
if workspace.base_head == workspace.head {
return Ok(None);
}
let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
let base_branch_meta: TribleSet = repo_reader
.get(workspace.base_branch_meta)
.map_err(PushError::StorageGet)?;
let Ok((branch_name,)) = find!(
(name: Value<Handle<Blake3, LongString>>),
pattern!(base_branch_meta, [{ crate::metadata::name: ?name }])
)
.exactly_one() else {
return Err(PushError::BadBranchMetadata());
};
let head_handle = workspace.head.ok_or(PushError::BadBranchMetadata())?;
let head_: TribleSet = repo_reader
.get(head_handle)
.map_err(PushError::StorageGet)?;
let branch_meta = branch_metadata(
&workspace.signing_key,
workspace.base_branch_id,
branch_name,
Some(head_.to_blob()),
);
let branch_meta_handle = self
.storage
.put(branch_meta)
.map_err(PushError::StoragePut)?;
let result = self
.storage
.update(
workspace.base_branch_id,
Some(workspace.base_branch_meta),
Some(branch_meta_handle),
)
.map_err(PushError::BranchUpdate)?;
match result {
PushResult::Success() => {
workspace.base_branch_meta = branch_meta_handle;
workspace.base_head = workspace.head;
workspace.base_blobs = self.storage.reader().map_err(PushError::StorageReader)?;
workspace.local_blobs = MemoryBlobStore::new();
Ok(None)
}
PushResult::Conflict(conflicting_meta) => {
let conflicting_meta = conflicting_meta.ok_or(PushError::BadBranchMetadata())?;
let repo_reader = self.storage.reader().map_err(PushError::StorageReader)?;
let branch_meta: TribleSet = repo_reader
.get(conflicting_meta)
.map_err(PushError::StorageGet)?;
let head_ = match find!((head_: Value<_>),
pattern!(&branch_meta, [{ head: ?head_ }])
)
.at_most_one()
{
Ok(Some((h,))) => Some(h),
Ok(None) => None,
Err(_) => return Err(PushError::BadBranchMetadata()),
};
let conflict_ws = Workspace {
base_blobs: self.storage.reader().map_err(PushError::StorageReader)?,
local_blobs: MemoryBlobStore::new(),
head: head_,
base_head: head_,
base_branch_id: workspace.base_branch_id,
base_branch_meta: conflicting_meta,
signing_key: workspace.signing_key.clone(),
commit_metadata: workspace.commit_metadata,
};
Ok(Some(conflict_ws))
}
}
}
}
pub type CommitHandle = Value<Handle<Blake3, SimpleArchive>>;
type MetadataHandle = Value<Handle<Blake3, SimpleArchive>>;
pub type CommitSet = PATCH<VALUE_LEN, IdentitySchema, ()>;
type BranchMetaHandle = Value<Handle<Blake3, SimpleArchive>>;
#[derive(Debug, Clone)]
pub struct Checkout {
facts: TribleSet,
commits: CommitSet,
}
impl PartialEq<TribleSet> for Checkout {
fn eq(&self, other: &TribleSet) -> bool {
self.facts == *other
}
}
impl PartialEq<Checkout> for TribleSet {
fn eq(&self, other: &Checkout) -> bool {
*self == other.facts
}
}
impl Checkout {
pub fn facts(&self) -> &TribleSet {
&self.facts
}
pub fn commits(&self) -> CommitSet {
self.commits.clone()
}
pub fn into_facts(self) -> TribleSet {
self.facts
}
}
impl std::ops::Deref for Checkout {
type Target = TribleSet;
fn deref(&self) -> &TribleSet {
&self.facts
}
}
impl std::ops::AddAssign<&Checkout> for Checkout {
fn add_assign(&mut self, rhs: &Checkout) {
self.facts += rhs.facts.clone();
self.commits.union(rhs.commits.clone());
}
}
impl std::ops::Add for Checkout {
type Output = Self;
fn add(mut self, rhs: Self) -> Self {
self.facts += rhs.facts;
self.commits.union(rhs.commits);
self
}
}
impl std::ops::Add<&Checkout> for Checkout {
type Output = Self;
fn add(mut self, rhs: &Checkout) -> Self {
self += rhs;
self
}
}
pub struct Workspace<Blobs: BlobStore<Blake3>> {
local_blobs: MemoryBlobStore<Blake3>,
base_blobs: Blobs::Reader,
base_branch_id: Id,
base_branch_meta: BranchMetaHandle,
head: Option<CommitHandle>,
base_head: Option<CommitHandle>,
signing_key: SigningKey,
commit_metadata: MetadataHandle,
}
impl<Blobs> fmt::Debug for Workspace<Blobs>
where
Blobs: BlobStore<Blake3>,
Blobs::Reader: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Workspace")
.field("local_blobs", &self.local_blobs)
.field("base_blobs", &self.base_blobs)
.field("base_branch_id", &self.base_branch_id)
.field("base_branch_meta", &self.base_branch_meta)
.field("base_head", &self.base_head)
.field("head", &self.head)
.field("commit_metadata", &self.commit_metadata)
.finish()
}
}
pub trait CommitSelector<Blobs: BlobStore<Blake3>> {
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
>;
}
pub struct Ancestors<S>(pub S);
pub fn ancestors<S>(selector: S) -> Ancestors<S> {
Ancestors(selector)
}
pub struct NthAncestors<S>(pub S, pub usize);
pub fn nth_ancestors<S>(selector: S, n: usize) -> NthAncestors<S> {
NthAncestors(selector, n)
}
pub struct Parents<S>(pub S);
pub fn parents<S>(selector: S) -> Parents<S> {
Parents(selector)
}
pub struct SymmetricDiff<A, B>(pub A, pub B);
pub fn symmetric_diff<A, B>(a: A, b: B) -> SymmetricDiff<A, B> {
SymmetricDiff(a, b)
}
pub struct Union<A, B> {
left: A,
right: B,
}
pub fn union<A, B>(left: A, right: B) -> Union<A, B> {
Union { left, right }
}
pub struct Intersect<A, B> {
left: A,
right: B,
}
pub fn intersect<A, B>(left: A, right: B) -> Intersect<A, B> {
Intersect { left, right }
}
pub struct Difference<A, B> {
left: A,
right: B,
}
pub fn difference<A, B>(left: A, right: B) -> Difference<A, B> {
Difference { left, right }
}
pub struct TimeRange(pub Epoch, pub Epoch);
pub fn time_range(start: Epoch, end: Epoch) -> TimeRange {
TimeRange(start, end)
}
pub struct Filter<S, F> {
selector: S,
filter: F,
}
pub fn filter<S, F>(selector: S, filter: F) -> Filter<S, F> {
Filter { selector, filter }
}
impl<Blobs> CommitSelector<Blobs> for CommitHandle
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
_ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut patch = CommitSet::new();
patch.insert(&Entry::new(&self.raw));
Ok(patch)
}
}
impl<Blobs> CommitSelector<Blobs> for CommitSet
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
_ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
Ok(self)
}
}
impl<Blobs> CommitSelector<Blobs> for Vec<CommitHandle>
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
_ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut patch = CommitSet::new();
for handle in self {
patch.insert(&Entry::new(&handle.raw));
}
Ok(patch)
}
}
impl<Blobs> CommitSelector<Blobs> for &[CommitHandle]
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
_ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut patch = CommitSet::new();
for handle in self {
patch.insert(&Entry::new(&handle.raw));
}
Ok(patch)
}
}
impl<Blobs> CommitSelector<Blobs> for Option<CommitHandle>
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
_ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut patch = CommitSet::new();
if let Some(handle) = self {
patch.insert(&Entry::new(&handle.raw));
}
Ok(patch)
}
}
impl<S, Blobs> CommitSelector<Blobs> for Ancestors<S>
where
S: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let seeds = self.0.select(ws)?;
collect_reachable_from_patch(ws, seeds)
}
}
impl<Blobs, S> CommitSelector<Blobs> for NthAncestors<S>
where
Blobs: BlobStore<Blake3>,
S: CommitSelector<Blobs>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut frontier = self.0.select(ws)?;
let mut remaining = self.1;
while remaining > 0 && !frontier.is_empty() {
let keys: Vec<[u8; VALUE_LEN]> = frontier.iter().copied().collect();
let mut next_frontier = CommitSet::new();
for raw in keys {
let handle = CommitHandle::new(raw);
let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
next_frontier.insert(&Entry::new(&p.raw));
}
}
frontier = next_frontier;
remaining -= 1;
}
Ok(frontier)
}
}
impl<S, Blobs> CommitSelector<Blobs> for Parents<S>
where
S: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let seeds = self.0.select(ws)?;
let mut result = CommitSet::new();
for raw in seeds.iter() {
let handle = Value::new(*raw);
let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
for (p,) in find!((p: Value<_>), pattern!(&meta, [{ parent: ?p }])) {
result.insert(&Entry::new(&p.raw));
}
}
Ok(result)
}
}
impl<A, B, Blobs> CommitSelector<Blobs> for SymmetricDiff<A, B>
where
A: CommitSelector<Blobs>,
B: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let seeds_a = self.0.select(ws)?;
let seeds_b = self.1.select(ws)?;
let a = collect_reachable_from_patch(ws, seeds_a)?;
let b = collect_reachable_from_patch(ws, seeds_b)?;
let inter = a.intersect(&b);
let mut union = a;
union.union(b);
Ok(union.difference(&inter))
}
}
impl<A, B, Blobs> CommitSelector<Blobs> for Union<A, B>
where
A: CommitSelector<Blobs>,
B: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut left = self.left.select(ws)?;
let right = self.right.select(ws)?;
left.union(right);
Ok(left)
}
}
impl<A, B, Blobs> CommitSelector<Blobs> for Intersect<A, B>
where
A: CommitSelector<Blobs>,
B: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let left = self.left.select(ws)?;
let right = self.right.select(ws)?;
Ok(left.intersect(&right))
}
}
impl<A, B, Blobs> CommitSelector<Blobs> for Difference<A, B>
where
A: CommitSelector<Blobs>,
B: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let left = self.left.select(ws)?;
let right = self.right.select(ws)?;
Ok(left.difference(&right))
}
}
impl<S, F, Blobs> CommitSelector<Blobs> for Filter<S, F>
where
Blobs: BlobStore<Blake3>,
S: CommitSelector<Blobs>,
F: for<'x, 'y> Fn(&'x TribleSet, &'y TribleSet) -> bool,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let patch = self.selector.select(ws)?;
let mut result = CommitSet::new();
let filter = self.filter;
for raw in patch.iter() {
let handle = Value::new(*raw);
let meta: TribleSet = ws.get(handle).map_err(WorkspaceCheckoutError::Storage)?;
let Ok((content_handle,)) = find!(
(c: Value<_>),
pattern!(&meta, [{ content: ?c }])
)
.exactly_one() else {
return Err(WorkspaceCheckoutError::BadCommitMetadata());
};
let payload: TribleSet = ws
.get(content_handle)
.map_err(WorkspaceCheckoutError::Storage)?;
if filter(&meta, &payload) {
result.insert(&Entry::new(raw));
}
}
Ok(result)
}
}
pub struct HistoryOf(pub Id);
pub fn history_of(entity: Id) -> HistoryOf {
HistoryOf(entity)
}
impl<Blobs> CommitSelector<Blobs> for HistoryOf
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let Some(head_) = ws.head else {
return Ok(CommitSet::new());
};
let entity = self.0;
filter(
ancestors(head_),
move |_: &TribleSet, payload: &TribleSet| payload.iter().any(|t| t.e() == &entity),
)
.select(ws)
}
}
fn collect_reachable_from_patch<Blobs: BlobStore<Blake3>>(
ws: &mut Workspace<Blobs>,
patch: CommitSet,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut result = CommitSet::new();
for raw in patch.iter() {
let handle = Value::new(*raw);
let reach = collect_reachable(ws, handle)?;
result.union(reach);
}
Ok(result)
}
fn collect_reachable_from_patch_until<Blobs: BlobStore<Blake3>>(
ws: &mut Workspace<Blobs>,
seeds: CommitSet,
stop: &CommitSet,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut visited = HashSet::new();
let mut stack: Vec<CommitHandle> = seeds.iter().map(|raw| Value::new(*raw)).collect();
let mut result = CommitSet::new();
while let Some(commit) = stack.pop() {
if !visited.insert(commit) {
continue;
}
if stop.get(&commit.raw).is_some() {
continue;
}
result.insert(&Entry::new(&commit.raw));
let meta: TribleSet = ws
.local_blobs
.reader()
.unwrap()
.get(commit)
.or_else(|_| ws.base_blobs.get(commit))
.map_err(WorkspaceCheckoutError::Storage)?;
for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
stack.push(p);
}
}
Ok(result)
}
impl<T, Blobs> CommitSelector<Blobs> for std::ops::Range<T>
where
T: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let end_patch = self.end.select(ws)?;
let start_patch = self.start.select(ws)?;
collect_reachable_from_patch_until(ws, end_patch, &start_patch)
}
}
impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeFrom<T>
where
T: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let Some(head_) = ws.head else {
return Ok(CommitSet::new());
};
let exclude_patch = self.start.select(ws)?;
let mut head_patch = CommitSet::new();
head_patch.insert(&Entry::new(&head_.raw));
collect_reachable_from_patch_until(ws, head_patch, &exclude_patch)
}
}
impl<T, Blobs> CommitSelector<Blobs> for std::ops::RangeTo<T>
where
T: CommitSelector<Blobs>,
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let end_patch = self.end.select(ws)?;
collect_reachable_from_patch(ws, end_patch)
}
}
impl<Blobs> CommitSelector<Blobs> for std::ops::RangeFull
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let Some(head_) = ws.head else {
return Ok(CommitSet::new());
};
collect_reachable(ws, head_)
}
}
impl<Blobs> CommitSelector<Blobs> for TimeRange
where
Blobs: BlobStore<Blake3>,
{
fn select(
self,
ws: &mut Workspace<Blobs>,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let Some(head_) = ws.head else {
return Ok(CommitSet::new());
};
let start = self.0;
let end = self.1;
filter(
ancestors(head_),
move |meta: &TribleSet, _payload: &TribleSet| {
if let Ok(Some(((ts_start, ts_end),))) =
find!((t: (Epoch, Epoch)), pattern!(meta, [{ crate::metadata::created_at: ?t }])).at_most_one()
{
ts_start <= end && ts_end >= start
} else {
false
}
},
)
.select(ws)
}
}
impl<Blobs: BlobStore<Blake3>> Workspace<Blobs> {
pub fn branch_id(&self) -> Id {
self.base_branch_id
}
pub fn head(&self) -> Option<CommitHandle> {
self.head
}
pub fn metadata(&self) -> MetadataHandle {
self.commit_metadata
}
pub fn put<S, T>(&mut self, item: T) -> Value<Handle<Blake3, S>>
where
S: BlobSchema + 'static,
T: ToBlob<S>,
Handle<Blake3, S>: ValueSchema,
{
self.local_blobs.put(item).expect("infallible blob put")
}
pub fn get<T, S>(
&mut self,
handle: Value<Handle<Blake3, S>>,
) -> Result<T, <Blobs::Reader as BlobStoreGet<Blake3>>::GetError<<T as TryFromBlob<S>>::Error>>
where
S: BlobSchema + 'static,
T: TryFromBlob<S>,
Handle<Blake3, S>: ValueSchema,
{
self.local_blobs
.reader()
.unwrap()
.get(handle)
.or_else(|_| self.base_blobs.get(handle))
}
pub fn commit(&mut self, content_: impl Into<TribleSet>, message_: &str) {
let content_ = content_.into();
self.commit_internal(content_, Some(self.commit_metadata), Some(message_));
}
pub fn commit_with_metadata(
&mut self,
content_: impl Into<TribleSet>,
metadata_: MetadataHandle,
message_: &str,
) {
let content_ = content_.into();
self.commit_internal(content_, Some(metadata_), Some(message_));
}
fn commit_internal(
&mut self,
content_: TribleSet,
metadata_handle: Option<MetadataHandle>,
message_: Option<&str>,
) {
let content_blob = content_.to_blob();
let message_handle = message_.map(|m| self.put(m.to_string()));
let parents = self.head.iter().copied();
let commit_set = crate::repo::commit::commit_metadata(
&self.signing_key,
parents,
message_handle,
Some(content_blob.clone()),
metadata_handle,
);
let _ = self
.local_blobs
.put(content_blob)
.expect("failed to put content blob");
let commit_handle = self
.local_blobs
.put(commit_set)
.expect("failed to put commit blob");
self.head = Some(commit_handle);
}
pub fn merge(
&mut self,
other: &mut Workspace<Blobs>,
) -> Result<Option<CommitHandle>, MergeError> {
let other_local = other.local_blobs.reader().unwrap();
for r in other_local.blobs() {
let handle = r.expect("infallible blob enumeration");
let blob: Blob<UnknownBlob> = other_local.get(handle).expect("infallible blob read");
self.local_blobs.put(blob).expect("infallible blob put");
}
match other.head {
Some(other_head) => Ok(Some(self.merge_commit(other_head)?)),
None => Ok(self.head),
}
}
pub fn merge_commit(
&mut self,
other: Value<Handle<Blake3, SimpleArchive>>,
) -> Result<CommitHandle, MergeError> {
let local_head = match self.head {
None => {
self.head = Some(other);
return Ok(other);
}
Some(h) if h == other => {
return Ok(h);
}
Some(h) => h,
};
let remote_in_local = ancestors(local_head)
.select(self)
.ok()
.map(|set| set.get(&other.raw).is_some())
.unwrap_or(false);
if remote_in_local {
return Ok(local_head);
}
let local_in_remote = ancestors(other)
.select(self)
.ok()
.map(|set| set.get(&local_head.raw).is_some())
.unwrap_or(false);
if local_in_remote {
self.head = Some(other);
return Ok(other);
}
let parents = self.head.iter().copied().chain(Some(other));
let merge_commit = commit_metadata(&self.signing_key, parents, None, None, None);
let commit_handle = self
.local_blobs
.put(merge_commit)
.expect("failed to put merge commit blob");
self.head = Some(commit_handle);
Ok(commit_handle)
}
pub fn set_head(&mut self, commit: CommitHandle) {
self.head = Some(commit);
}
fn checkout_commits<I>(
&mut self,
commits: I,
) -> Result<
TribleSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
>
where
I: IntoIterator<Item = CommitHandle>,
{
let local = self.local_blobs.reader().unwrap();
let mut result = TribleSet::new();
for commit in commits {
let meta: TribleSet = local
.get(commit)
.or_else(|_| self.base_blobs.get(commit))
.map_err(WorkspaceCheckoutError::Storage)?;
let content_opt =
match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
Ok(Some((c,))) => Some(c),
Ok(None) => None,
Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
};
if let Some(c) = content_opt {
let set: TribleSet = local
.get(c)
.or_else(|_| self.base_blobs.get(c))
.map_err(WorkspaceCheckoutError::Storage)?;
result += set;
} else {
continue;
}
}
Ok(result)
}
fn checkout_commits_metadata<I>(
&mut self,
commits: I,
) -> Result<
TribleSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
>
where
I: IntoIterator<Item = CommitHandle>,
{
let local = self.local_blobs.reader().unwrap();
let mut result = TribleSet::new();
for commit in commits {
let meta: TribleSet = local
.get(commit)
.or_else(|_| self.base_blobs.get(commit))
.map_err(WorkspaceCheckoutError::Storage)?;
let metadata_opt =
match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
Ok(Some((c,))) => Some(c),
Ok(None) => None,
Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
};
if let Some(c) = metadata_opt {
let set: TribleSet = local
.get(c)
.or_else(|_| self.base_blobs.get(c))
.map_err(WorkspaceCheckoutError::Storage)?;
result += set;
}
}
Ok(result)
}
fn checkout_commits_with_metadata<I>(
&mut self,
commits: I,
) -> Result<
(TribleSet, TribleSet),
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
>
where
I: IntoIterator<Item = CommitHandle>,
{
let local = self.local_blobs.reader().unwrap();
let mut data = TribleSet::new();
let mut metadata_set = TribleSet::new();
for commit in commits {
let meta: TribleSet = local
.get(commit)
.or_else(|_| self.base_blobs.get(commit))
.map_err(WorkspaceCheckoutError::Storage)?;
let content_opt =
match find!((c: Value<_>), pattern!(&meta, [{ content: ?c }])).at_most_one() {
Ok(Some((c,))) => Some(c),
Ok(None) => None,
Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
};
if let Some(c) = content_opt {
let set: TribleSet = local
.get(c)
.or_else(|_| self.base_blobs.get(c))
.map_err(WorkspaceCheckoutError::Storage)?;
data += set;
}
let metadata_opt =
match find!((c: Value<_>), pattern!(&meta, [{ metadata: ?c }])).at_most_one() {
Ok(Some((c,))) => Some(c),
Ok(None) => None,
Err(_) => return Err(WorkspaceCheckoutError::BadCommitMetadata()),
};
if let Some(c) = metadata_opt {
let set: TribleSet = local
.get(c)
.or_else(|_| self.base_blobs.get(c))
.map_err(WorkspaceCheckoutError::Storage)?;
metadata_set += set;
}
}
Ok((data, metadata_set))
}
pub fn checkout<R>(
&mut self,
spec: R,
) -> Result<
Checkout,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
>
where
R: CommitSelector<Blobs>,
{
let commits = spec.select(self)?;
let facts = self.checkout_commits(commits.iter().map(|raw| Value::new(*raw)))?;
Ok(Checkout { facts, commits })
}
pub fn checkout_metadata<R>(
&mut self,
spec: R,
) -> Result<
TribleSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
>
where
R: CommitSelector<Blobs>,
{
let patch = spec.select(self)?;
let commits = patch.iter().map(|raw| Value::new(*raw));
self.checkout_commits_metadata(commits)
}
pub fn checkout_with_metadata<R>(
&mut self,
spec: R,
) -> Result<
(TribleSet, TribleSet),
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
>
where
R: CommitSelector<Blobs>,
{
let patch = spec.select(self)?;
let commits = patch.iter().map(|raw| Value::new(*raw));
self.checkout_commits_with_metadata(commits)
}
}
#[derive(Debug)]
pub enum WorkspaceCheckoutError<GetErr: Error> {
Storage(GetErr),
BadCommitMetadata(),
}
impl<E: Error + fmt::Debug> fmt::Display for WorkspaceCheckoutError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
WorkspaceCheckoutError::Storage(e) => write!(f, "storage error: {e}"),
WorkspaceCheckoutError::BadCommitMetadata() => {
write!(f, "commit metadata malformed")
}
}
}
}
impl<E: Error + fmt::Debug> Error for WorkspaceCheckoutError<E> {}
fn collect_reachable<Blobs: BlobStore<Blake3>>(
ws: &mut Workspace<Blobs>,
from: CommitHandle,
) -> Result<
CommitSet,
WorkspaceCheckoutError<<Blobs::Reader as BlobStoreGet<Blake3>>::GetError<UnarchiveError>>,
> {
let mut visited = HashSet::new();
let mut stack = vec![from];
let mut result = CommitSet::new();
while let Some(commit) = stack.pop() {
if !visited.insert(commit) {
continue;
}
result.insert(&Entry::new(&commit.raw));
let meta: TribleSet = ws
.local_blobs
.reader()
.unwrap()
.get(commit)
.or_else(|_| ws.base_blobs.get(commit))
.map_err(WorkspaceCheckoutError::Storage)?;
for (p,) in find!((p: Value<_>,), pattern!(&meta, [{ parent: ?p }])) {
stack.push(p);
}
}
Ok(result)
}