1use std::cmp::Ordering;
18use std::collections::HashMap;
19use std::collections::HashSet;
20use std::slice;
21use std::sync::Arc;
22
23use itertools::Itertools as _;
24use thiserror::Error;
25
26use crate::dag_walk;
27use crate::object_id::HexPrefix;
28use crate::object_id::PrefixResolution;
29use crate::op_heads_store;
30use crate::op_heads_store::OpHeadResolutionError;
31use crate::op_heads_store::OpHeadsStore;
32use crate::op_heads_store::OpHeadsStoreError;
33use crate::op_store::OpStore;
34use crate::op_store::OpStoreError;
35use crate::op_store::OpStoreResult;
36use crate::op_store::OperationId;
37use crate::operation::Operation;
38use crate::repo::ReadonlyRepo;
39use crate::repo::Repo as _;
40use crate::repo::RepoLoader;
41
42#[derive(Debug, Error)]
44pub enum OpsetEvaluationError {
45 #[error(transparent)]
47 OpsetResolution(#[from] OpsetResolutionError),
48 #[error(transparent)]
50 OpHeadsStore(#[from] OpHeadsStoreError),
51 #[error(transparent)]
53 OpHeadResolution(#[from] OpHeadResolutionError),
54 #[error(transparent)]
56 OpStore(#[from] OpStoreError),
57}
58
59#[derive(Debug, Error)]
62pub enum OpsetResolutionError {
63 #[error(r#"The "{expr}" expression resolved to more than one operation"#)]
67 MultipleOperations {
68 expr: String,
70 candidates: Vec<OperationId>,
72 },
73 #[error(r#"The "{0}" expression resolved to no operations"#)]
75 EmptyOperations(String),
76 #[error(r#"Operation ID "{0}" is not a valid hexadecimal prefix"#)]
78 InvalidIdPrefix(String),
79 #[error(r#"No operation ID matching "{0}""#)]
81 NoSuchOperation(String),
82 #[error(r#"Operation ID prefix "{0}" is ambiguous"#)]
84 AmbiguousIdPrefix(String),
85}
86
87pub fn resolve_op_for_load(
89 repo_loader: &RepoLoader,
90 op_str: &str,
91) -> Result<Operation, OpsetEvaluationError> {
92 let op_store = repo_loader.op_store();
93 let op_heads_store = repo_loader.op_heads_store().as_ref();
94 let get_current_op = || {
95 op_heads_store::resolve_op_heads(op_heads_store, op_store, |op_heads| {
96 Err(OpsetResolutionError::MultipleOperations {
97 expr: "@".to_owned(),
98 candidates: op_heads.iter().map(|op| op.id().clone()).collect(),
99 }
100 .into())
101 })
102 };
103 let get_head_ops = || get_current_head_ops(op_store, op_heads_store);
104 resolve_single_op(op_store, get_current_op, get_head_ops, op_str)
105}
106
107pub fn resolve_op_with_repo(
111 repo: &ReadonlyRepo,
112 op_str: &str,
113) -> Result<Operation, OpsetEvaluationError> {
114 resolve_op_at(repo.op_store(), slice::from_ref(repo.operation()), op_str)
115}
116
117pub fn resolve_op_at(
119 op_store: &Arc<dyn OpStore>,
120 head_ops: &[Operation],
121 op_str: &str,
122) -> Result<Operation, OpsetEvaluationError> {
123 let get_current_op = || match head_ops {
124 [head_op] => Ok(head_op.clone()),
125 [] => Err(OpsetResolutionError::EmptyOperations("@".to_owned()).into()),
126 _ => Err(OpsetResolutionError::MultipleOperations {
127 expr: "@".to_owned(),
128 candidates: head_ops.iter().map(|op| op.id().clone()).collect(),
129 }
130 .into()),
131 };
132 let get_head_ops = || Ok(head_ops.to_vec());
133 resolve_single_op(op_store, get_current_op, get_head_ops, op_str)
134}
135
136fn resolve_single_op(
139 op_store: &Arc<dyn OpStore>,
140 get_current_op: impl FnOnce() -> Result<Operation, OpsetEvaluationError>,
141 get_head_ops: impl FnOnce() -> Result<Vec<Operation>, OpsetEvaluationError>,
142 op_str: &str,
143) -> Result<Operation, OpsetEvaluationError> {
144 let op_symbol = op_str.trim_end_matches(['-', '+']);
145 let op_postfix = &op_str[op_symbol.len()..];
146 let head_ops = op_postfix.contains('+').then(get_head_ops).transpose()?;
147 let mut operation = match op_symbol {
148 "@" => get_current_op(),
149 s => resolve_single_op_from_store(op_store, s),
150 }?;
151 for (i, c) in op_postfix.chars().enumerate() {
152 let mut neighbor_ops = match c {
153 '-' => operation.parents().try_collect()?,
154 '+' => find_child_ops(head_ops.as_ref().unwrap(), operation.id())?,
155 _ => unreachable!(),
156 };
157 operation = match neighbor_ops.len() {
158 0 => Err(OpsetResolutionError::EmptyOperations(op_str.to_owned()))?,
167 1 => neighbor_ops.pop().unwrap(),
168 _ => Err(OpsetResolutionError::MultipleOperations {
171 expr: op_str[..=op_symbol.len() + i].to_owned(),
172 candidates: neighbor_ops.iter().map(|op| op.id().clone()).collect(),
173 })?,
174 };
175 }
176 Ok(operation)
177}
178
179fn resolve_single_op_from_store(
180 op_store: &Arc<dyn OpStore>,
181 op_str: &str,
182) -> Result<Operation, OpsetEvaluationError> {
183 if op_str.is_empty() {
184 return Err(OpsetResolutionError::InvalidIdPrefix(op_str.to_owned()).into());
185 }
186 let prefix = HexPrefix::new(op_str)
187 .ok_or_else(|| OpsetResolutionError::InvalidIdPrefix(op_str.to_owned()))?;
188 match op_store.resolve_operation_id_prefix(&prefix)? {
189 PrefixResolution::NoMatch => {
190 Err(OpsetResolutionError::NoSuchOperation(op_str.to_owned()).into())
191 }
192 PrefixResolution::SingleMatch(op_id) => {
193 let data = op_store.read_operation(&op_id)?;
194 Ok(Operation::new(op_store.clone(), op_id, data))
195 }
196 PrefixResolution::AmbiguousMatch => {
197 Err(OpsetResolutionError::AmbiguousIdPrefix(op_str.to_owned()).into())
198 }
199 }
200}
201
202pub fn get_current_head_ops(
205 op_store: &Arc<dyn OpStore>,
206 op_heads_store: &dyn OpHeadsStore,
207) -> Result<Vec<Operation>, OpsetEvaluationError> {
208 let mut head_ops: Vec<_> = op_heads_store
209 .get_op_heads()?
210 .into_iter()
211 .map(|id| -> OpStoreResult<Operation> {
212 let data = op_store.read_operation(&id)?;
213 Ok(Operation::new(op_store.clone(), id, data))
214 })
215 .try_collect()?;
216 head_ops.sort_by_key(|op| op.metadata().end_time.timestamp);
218 Ok(head_ops)
219}
220
221fn find_child_ops(
226 head_ops: &[Operation],
227 root_op_id: &OperationId,
228) -> OpStoreResult<Vec<Operation>> {
229 walk_ancestors(head_ops)
230 .take_while(|res| res.as_ref().map_or(true, |op| op.id() != root_op_id))
231 .filter_ok(|op| op.parent_ids().iter().any(|id| id == root_op_id))
232 .try_collect()
233}
234
235#[derive(Clone, Debug, Eq, Hash, PartialEq)]
236struct OperationByEndTime(Operation);
237
238impl Ord for OperationByEndTime {
239 fn cmp(&self, other: &Self) -> Ordering {
240 let self_end_time = &self.0.metadata().end_time;
241 let other_end_time = &other.0.metadata().end_time;
242 self_end_time
243 .cmp(other_end_time)
244 .then_with(|| self.0.cmp(&other.0)) }
246}
247
248impl PartialOrd for OperationByEndTime {
249 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
250 Some(self.cmp(other))
251 }
252}
253
254pub fn walk_ancestors(
256 head_ops: &[Operation],
257) -> impl Iterator<Item = OpStoreResult<Operation>> + use<> {
258 let mut head_ops = head_ops
260 .iter()
261 .cloned()
262 .map(OperationByEndTime)
263 .collect_vec();
264 head_ops.sort_unstable_by(|op1, op2| op1.cmp(op2).reverse());
265 dag_walk::topo_order_reverse_lazy_ok(
268 head_ops.into_iter().map(Ok),
269 |OperationByEndTime(op)| op.id().clone(),
270 |OperationByEndTime(op)| op.parents().map_ok(OperationByEndTime).collect_vec(),
271 )
272 .map_ok(|OperationByEndTime(op)| op)
273}
274
275#[derive(Clone, Debug, Eq, PartialEq)]
277pub struct ReparentStats {
278 pub new_head_ids: Vec<OperationId>,
280 pub rewritten_count: usize,
282 pub unreachable_count: usize,
285}
286
287pub fn reparent_range(
297 op_store: &dyn OpStore,
298 root_ops: &[Operation],
299 head_ops: &[Operation],
300 dest_op: &Operation,
301) -> OpStoreResult<ReparentStats> {
302 let mut unwanted_ids: HashSet<_> = walk_ancestors(root_ops)
305 .map_ok(|op| op.id().clone())
306 .try_collect()?;
307 let ops_to_reparent: Vec<_> = walk_ancestors(head_ops)
308 .filter_ok(|op| !unwanted_ids.contains(op.id()))
309 .try_collect()?;
310 for op in walk_ancestors(slice::from_ref(dest_op)) {
311 unwanted_ids.remove(op?.id());
312 }
313 let unreachable_ids = unwanted_ids;
314
315 assert!(
316 ops_to_reparent
317 .last()
318 .is_none_or(|op| op.id() != op_store.root_operation_id()),
319 "root operation cannot be rewritten"
320 );
321 let mut rewritten_ids = HashMap::new();
322 for old_op in ops_to_reparent.into_iter().rev() {
323 let mut data = old_op.store_operation().clone();
324 let mut dest_once = Some(dest_op.id());
325 data.parents = data
326 .parents
327 .iter()
328 .filter_map(|id| rewritten_ids.get(id).or_else(|| dest_once.take()))
329 .cloned()
330 .collect();
331 let new_id = op_store.write_operation(&data)?;
332 rewritten_ids.insert(old_op.id().clone(), new_id);
333 }
334
335 let mut dest_once = Some(dest_op.id());
336 let new_head_ids = head_ops
337 .iter()
338 .filter_map(|op| rewritten_ids.get(op.id()).or_else(|| dest_once.take()))
339 .cloned()
340 .collect();
341 Ok(ReparentStats {
342 new_head_ids,
343 rewritten_count: rewritten_ids.len(),
344 unreachable_count: unreachable_ids.len(),
345 })
346}