use std::cmp::Ordering;
use std::collections::HashMap;
use std::collections::HashSet;
use std::slice;
use std::sync::Arc;
use itertools::Itertools as _;
use thiserror::Error;
use crate::dag_walk;
use crate::object_id::HexPrefix;
use crate::object_id::PrefixResolution;
use crate::op_heads_store;
use crate::op_heads_store::OpHeadResolutionError;
use crate::op_heads_store::OpHeadsStore;
use crate::op_heads_store::OpHeadsStoreError;
use crate::op_store::OpStore;
use crate::op_store::OpStoreError;
use crate::op_store::OpStoreResult;
use crate::op_store::OperationId;
use crate::operation::Operation;
use crate::repo::ReadonlyRepo;
use crate::repo::Repo as _;
use crate::repo::RepoLoader;
#[derive(Debug, Error)]
pub enum OpsetEvaluationError {
#[error(transparent)]
OpsetResolution(#[from] OpsetResolutionError),
#[error(transparent)]
OpHeadsStore(#[from] OpHeadsStoreError),
#[error(transparent)]
OpHeadResolution(#[from] OpHeadResolutionError),
#[error(transparent)]
OpStore(#[from] OpStoreError),
}
#[derive(Debug, Error)]
pub enum OpsetResolutionError {
#[error(r#"The "{expr}" expression resolved to more than one operation"#)]
MultipleOperations {
expr: String,
candidates: Vec<OperationId>,
},
#[error(r#"The "{0}" expression resolved to no operations"#)]
EmptyOperations(String),
#[error(r#"Operation ID "{0}" is not a valid hexadecimal prefix"#)]
InvalidIdPrefix(String),
#[error(r#"No operation ID matching "{0}""#)]
NoSuchOperation(String),
#[error(r#"Operation ID prefix "{0}" is ambiguous"#)]
AmbiguousIdPrefix(String),
}
pub fn resolve_op_for_load(
repo_loader: &RepoLoader,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
let op_store = repo_loader.op_store();
let op_heads_store = repo_loader.op_heads_store().as_ref();
let get_current_op = || {
op_heads_store::resolve_op_heads(op_heads_store, op_store, |op_heads| {
Err(OpsetResolutionError::MultipleOperations {
expr: "@".to_owned(),
candidates: op_heads.iter().map(|op| op.id().clone()).collect(),
}
.into())
})
};
let get_head_ops = || get_current_head_ops(op_store, op_heads_store);
resolve_single_op(op_store, get_current_op, get_head_ops, op_str)
}
pub fn resolve_op_with_repo(
repo: &ReadonlyRepo,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
resolve_op_at(repo.op_store(), slice::from_ref(repo.operation()), op_str)
}
pub fn resolve_op_at(
op_store: &Arc<dyn OpStore>,
head_ops: &[Operation],
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
let get_current_op = || match head_ops {
[head_op] => Ok(head_op.clone()),
[] => Err(OpsetResolutionError::EmptyOperations("@".to_owned()).into()),
_ => Err(OpsetResolutionError::MultipleOperations {
expr: "@".to_owned(),
candidates: head_ops.iter().map(|op| op.id().clone()).collect(),
}
.into()),
};
let get_head_ops = || Ok(head_ops.to_vec());
resolve_single_op(op_store, get_current_op, get_head_ops, op_str)
}
fn resolve_single_op(
op_store: &Arc<dyn OpStore>,
get_current_op: impl FnOnce() -> Result<Operation, OpsetEvaluationError>,
get_head_ops: impl FnOnce() -> Result<Vec<Operation>, OpsetEvaluationError>,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
let op_symbol = op_str.trim_end_matches(['-', '+']);
let op_postfix = &op_str[op_symbol.len()..];
let head_ops = op_postfix.contains('+').then(get_head_ops).transpose()?;
let mut operation = match op_symbol {
"@" => get_current_op(),
s => resolve_single_op_from_store(op_store, s),
}?;
for c in op_postfix.chars() {
let mut neighbor_ops = match c {
'-' => operation.parents().try_collect()?,
'+' => find_child_ops(head_ops.as_ref().unwrap(), operation.id())?,
_ => unreachable!(),
};
operation = match neighbor_ops.len() {
0 => Err(OpsetResolutionError::EmptyOperations(op_str.to_owned()))?,
1 => neighbor_ops.pop().unwrap(),
_ => Err(OpsetResolutionError::MultipleOperations {
expr: op_str.to_owned(),
candidates: neighbor_ops.iter().map(|op| op.id().clone()).collect(),
})?,
};
}
Ok(operation)
}
fn resolve_single_op_from_store(
op_store: &Arc<dyn OpStore>,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
if op_str.is_empty() {
return Err(OpsetResolutionError::InvalidIdPrefix(op_str.to_owned()).into());
}
let prefix = HexPrefix::new(op_str)
.ok_or_else(|| OpsetResolutionError::InvalidIdPrefix(op_str.to_owned()))?;
match op_store.resolve_operation_id_prefix(&prefix)? {
PrefixResolution::NoMatch => {
Err(OpsetResolutionError::NoSuchOperation(op_str.to_owned()).into())
}
PrefixResolution::SingleMatch(op_id) => {
let data = op_store.read_operation(&op_id)?;
Ok(Operation::new(op_store.clone(), op_id, data))
}
PrefixResolution::AmbiguousMatch => {
Err(OpsetResolutionError::AmbiguousIdPrefix(op_str.to_owned()).into())
}
}
}
pub fn get_current_head_ops(
op_store: &Arc<dyn OpStore>,
op_heads_store: &dyn OpHeadsStore,
) -> Result<Vec<Operation>, OpsetEvaluationError> {
let mut head_ops: Vec<_> = op_heads_store
.get_op_heads()?
.into_iter()
.map(|id| -> OpStoreResult<Operation> {
let data = op_store.read_operation(&id)?;
Ok(Operation::new(op_store.clone(), id, data))
})
.try_collect()?;
head_ops.sort_by_key(|op| op.metadata().end_time.timestamp);
Ok(head_ops)
}
fn find_child_ops(
head_ops: &[Operation],
root_op_id: &OperationId,
) -> OpStoreResult<Vec<Operation>> {
walk_ancestors(head_ops)
.take_while(|res| res.as_ref().map_or(true, |op| op.id() != root_op_id))
.filter_ok(|op| op.parent_ids().iter().any(|id| id == root_op_id))
.try_collect()
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct OperationByEndTime(Operation);
impl Ord for OperationByEndTime {
fn cmp(&self, other: &Self) -> Ordering {
let self_end_time = &self.0.metadata().end_time;
let other_end_time = &other.0.metadata().end_time;
self_end_time
.cmp(other_end_time)
.then_with(|| self.0.cmp(&other.0)) }
}
impl PartialOrd for OperationByEndTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
pub fn walk_ancestors(head_ops: &[Operation]) -> impl Iterator<Item = OpStoreResult<Operation>> {
let mut head_ops = head_ops
.iter()
.cloned()
.map(OperationByEndTime)
.collect_vec();
head_ops.sort_unstable_by(|op1, op2| op1.cmp(op2).reverse());
dag_walk::topo_order_reverse_lazy_ok(
head_ops.into_iter().map(Ok),
|OperationByEndTime(op)| op.id().clone(),
|OperationByEndTime(op)| op.parents().map_ok(OperationByEndTime).collect_vec(),
)
.map_ok(|OperationByEndTime(op)| op)
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ReparentStats {
pub new_head_ids: Vec<OperationId>,
pub rewritten_count: usize,
pub unreachable_count: usize,
}
pub fn reparent_range(
op_store: &dyn OpStore,
root_ops: &[Operation],
head_ops: &[Operation],
dest_op: &Operation,
) -> OpStoreResult<ReparentStats> {
let mut unwanted_ids: HashSet<_> = walk_ancestors(root_ops)
.map_ok(|op| op.id().clone())
.try_collect()?;
let ops_to_reparent: Vec<_> = walk_ancestors(head_ops)
.filter_ok(|op| !unwanted_ids.contains(op.id()))
.try_collect()?;
for op in walk_ancestors(slice::from_ref(dest_op)) {
unwanted_ids.remove(op?.id());
}
let unreachable_ids = unwanted_ids;
assert!(
ops_to_reparent
.last()
.map_or(true, |op| op.id() != op_store.root_operation_id()),
"root operation cannot be rewritten"
);
let mut rewritten_ids = HashMap::new();
for old_op in ops_to_reparent.into_iter().rev() {
let mut data = old_op.store_operation().clone();
let mut dest_once = Some(dest_op.id());
data.parents = data
.parents
.iter()
.filter_map(|id| rewritten_ids.get(id).or_else(|| dest_once.take()))
.cloned()
.collect();
let new_id = op_store.write_operation(&data)?;
rewritten_ids.insert(old_op.id().clone(), new_id);
}
let mut dest_once = Some(dest_op.id());
let new_head_ids = head_ops
.iter()
.filter_map(|op| rewritten_ids.get(op.id()).or_else(|| dest_once.take()))
.cloned()
.collect();
Ok(ReparentStats {
new_head_ids,
rewritten_count: rewritten_ids.len(),
unreachable_count: unreachable_ids.len(),
})
}