mnem_core/repo/readonly.rs
1//! [`ReadonlyRepo`] - a repository view pinned to a single `OperationId`.
2//!
3//! Cheap to clone (every field is `Arc`-wrapped). Loaned / cloned into a
4//! [`Transaction`] via [`ReadonlyRepo::start_transaction`]; after a
5//! `Transaction::commit`, a new `ReadonlyRepo` pinned to the next op is
6//! returned.
7//!
8//! [`Transaction`]: crate::repo::transaction::Transaction
9
10use std::sync::Arc;
11use std::time::{SystemTime, UNIX_EPOCH};
12
13use crate::codec::{from_canonical_bytes, hash_to_cid};
14use crate::error::{Error, RepoError, StoreError};
15use crate::id::{Cid, NodeId};
16use crate::objects::node::Embedding;
17use crate::objects::{Commit, Edge, EmbeddingBucket, Node, Operation, RefTarget, View};
18use crate::prolly::{self, ProllyKey};
19use crate::store::{Blockstore, OpHeadsStore};
20
21use super::transaction::Transaction;
22
23/// Current microseconds since Unix epoch. Used throughout the repo
24/// layer for timestamps on new Operations.
25pub(crate) fn now_micros() -> u64 {
26 SystemTime::now()
27 .duration_since(UNIX_EPOCH)
28 .unwrap_or_default()
29 .as_micros() as u64
30}
31
32/// A view of the repository pinned to a single `OperationId`.
33///
34/// `ReadonlyRepo` does not mutate state. To make changes, call
35/// [`start_transaction`] and then [`Transaction::commit`] - which returns
36/// a fresh `ReadonlyRepo` pinned to the new op.
37///
38/// All fields are behind `Arc`, so `clone()` is cheap. Sharing across
39/// threads is safe.
40///
41/// [`start_transaction`]: ReadonlyRepo::start_transaction
42#[derive(Clone)]
43pub struct ReadonlyRepo {
44 pub(crate) blockstore: Arc<dyn Blockstore>,
45 pub(crate) op_heads: Arc<dyn OpHeadsStore>,
46 pub(crate) op_id: Cid,
47 pub(crate) op: Arc<Operation>,
48 pub(crate) view: Arc<View>,
49 /// Head commit of the current view. `None` for a freshly-initialized
50 /// repository (root-View exception, SPEC §4.6 / §7.5).
51 pub(crate) commit: Option<Arc<Commit>>,
52}
53
54impl ReadonlyRepo {
55 // ---------------- Construction ----------------
56
57 /// Initialize a fresh repository per SPEC §7.5.
58 ///
59 /// Writes one root View (empty heads, empty refs) and one root
60 /// Operation into the blockstore, registers the op as the sole
61 /// op-head, and returns a `ReadonlyRepo` pinned to that op.
62 ///
63 /// # Errors
64 ///
65 /// Returns a store or codec error if blockstore writes fail.
66 pub fn init(
67 blockstore: Arc<dyn Blockstore>,
68 op_heads: Arc<dyn OpHeadsStore>,
69 ) -> Result<Self, Error> {
70 let now = now_micros();
71
72 // Root view: empty heads, empty refs (SPEC §7.5).
73 let root_view = View::new();
74 let (view_bytes, view_cid) = hash_to_cid(&root_view)?;
75 blockstore.put(view_cid.clone(), view_bytes)?;
76
77 // Root operation: parents=[], description="init".
78 let root_op = Operation::new(view_cid, "", now, "init");
79 let (op_bytes, op_cid) = hash_to_cid(&root_op)?;
80 blockstore.put(op_cid.clone(), op_bytes)?;
81
82 // Advance op-heads: root_op with no supersedes.
83 op_heads.update(op_cid.clone(), &[])?;
84
85 Ok(Self {
86 blockstore,
87 op_heads,
88 op_id: op_cid,
89 op: Arc::new(root_op),
90 view: Arc::new(root_view),
91 commit: None,
92 })
93 }
94
95 /// Open an existing repository pinned to the current op-head.
96 ///
97 /// If the op-heads store has more than one current head (concurrent
98 /// writers landed against the same base), the 3-way merge from
99 /// [`crate::repo::merge`] runs transparently: it finds the op-DAG
100 /// common ancestor, 3-way merges each head's View (emitting
101 /// [`RefTarget::Conflicted`] for divergent refs), writes a synthetic
102 /// merge Operation, and advances op-heads. The returned
103 /// `ReadonlyRepo` is pinned to that merge op.
104 ///
105 /// # Errors
106 ///
107 /// - [`RepoError::Uninitialized`] if the op-heads store is empty
108 /// - call [`ReadonlyRepo::init`] first.
109 /// - [`RepoError::NoCommonAncestor`] if the op-DAG is malformed.
110 /// - Store / codec errors if loading objects fails.
111 pub fn open(
112 blockstore: Arc<dyn Blockstore>,
113 op_heads: Arc<dyn OpHeadsStore>,
114 ) -> Result<Self, Error> {
115 let heads = op_heads.current()?;
116 match heads.len() {
117 0 => Err(RepoError::Uninitialized.into()),
118 1 => Self::load_at(blockstore, op_heads, heads.into_iter().next().unwrap()),
119 _ => {
120 let merge_cid = super::merge::merge_op_heads(&blockstore, &op_heads, heads)?;
121 Self::load_at(blockstore, op_heads, merge_cid)
122 }
123 }
124 }
125
126 /// Load a repository view pinned to a specific `OperationId`.
127 ///
128 /// Does not consult the op-heads store. Used internally by
129 /// [`open`] and [`Transaction::commit`].
130 ///
131 /// # Errors
132 ///
133 /// Store / codec errors if loading objects fails.
134 ///
135 /// [`open`]: Self::open
136 /// [`Transaction::commit`]: crate::repo::Transaction::commit
137 pub fn load_at(
138 blockstore: Arc<dyn Blockstore>,
139 op_heads: Arc<dyn OpHeadsStore>,
140 op_id: Cid,
141 ) -> Result<Self, Error> {
142 let op: Operation = decode_from_store(&*blockstore, &op_id)?;
143 let view: View = decode_from_store(&*blockstore, &op.view)?;
144 let commit = if let Some(head) = view.heads.first() {
145 let c: Commit = decode_from_store(&*blockstore, head)?;
146 Some(Arc::new(c))
147 } else {
148 None
149 };
150 Ok(Self {
151 blockstore,
152 op_heads,
153 op_id,
154 op: Arc::new(op),
155 view: Arc::new(view),
156 commit,
157 })
158 }
159
160 // ---------------- Accessors ----------------
161
162 /// The CID of the Operation this view is pinned to.
163 #[must_use]
164 pub const fn op_id(&self) -> &Cid {
165 &self.op_id
166 }
167
168 /// The Operation this view is pinned to.
169 #[must_use]
170 pub fn operation(&self) -> &Operation {
171 &self.op
172 }
173
174 /// The View snapshotted by the current Operation.
175 #[must_use]
176 pub fn view(&self) -> &View {
177 &self.view
178 }
179
180 /// The head Commit of the current view. `None` on a freshly-
181 /// initialized repository that hasn't yet received any commits.
182 #[must_use]
183 pub fn head_commit(&self) -> Option<&Commit> {
184 self.commit.as_deref()
185 }
186
187 /// Access the underlying blockstore (borrowed `Arc`).
188 #[must_use]
189 pub fn blockstore(&self) -> &Arc<dyn Blockstore> {
190 &self.blockstore
191 }
192
193 /// Access the underlying op-heads store (borrowed `Arc`).
194 #[must_use]
195 pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
196 &self.op_heads
197 }
198
199 // ---------------- Read operations ----------------
200
201 /// Look up a node by its stable [`NodeId`] in the current commit's
202 /// node tree. Returns `None` if absent or if the repository has no
203 /// commits yet.
204 ///
205 /// # Errors
206 ///
207 /// Store or codec errors while walking the Prolly tree.
208 pub fn lookup_node(&self, id: &NodeId) -> Result<Option<Node>, Error> {
209 let Some(commit) = self.commit.as_ref() else {
210 return Ok(None);
211 };
212 let key = ProllyKey::from(*id);
213 match prolly::lookup(&*self.blockstore, &commit.nodes, &key)? {
214 Some(node_cid) => {
215 let node: Node = decode_from_store(&*self.blockstore, &node_cid)?;
216 Ok(Some(node))
217 }
218 None => Ok(None),
219 }
220 }
221
222 /// Look up the embedding for a node by its content-addressed
223 /// `NodeCid` and a model identifier, walking the
224 /// [`Commit::embeddings`](crate::objects::Commit::embeddings)
225 /// Prolly sidecar. Returns `None` when:
226 ///
227 /// - the repo has no commits yet,
228 /// - the head commit has no embedding sidecar (`embeddings = None`),
229 /// - the sidecar tree has no entry for this `NodeCid`, or
230 /// - the bucket exists but does not carry a vector under the
231 /// requested `model` string.
232 ///
233 /// The Prolly key is derived via the same helper
234 /// (`embedding_key_for_node_cid`) the write side uses, so a
235 /// `Transaction::set_embedding` write and a subsequent
236 /// `embedding_for` read are guaranteed to agree on the bucket
237 /// location.
238 ///
239 /// # Why not on `Node`?
240 ///
241 /// The same trade documented on
242 /// [`Commit::embeddings`](crate::objects::Commit::embeddings):
243 /// dense vector bytes drift in the last bit across ORT thread
244 /// counts, so storing them on the `Node` would couple `NodeCid`
245 /// to thread count. The sidecar separates identity (Node) from
246 /// derived bytes (Embedding) so `NodeCid` stays stable.
247 ///
248 /// # Errors
249 ///
250 /// Store or codec errors while walking the Prolly tree or
251 /// decoding the bucket. A missing key is `Ok(None)`, not an error.
252 pub fn embedding_for(&self, node_cid: &Cid, model: &str) -> Result<Option<Embedding>, Error> {
253 let Some(commit) = self.commit.as_ref() else {
254 return Ok(None);
255 };
256 let Some(embeddings_root) = commit.embeddings.as_ref() else {
257 return Ok(None);
258 };
259 let key = super::transaction::embedding_key_for_node_cid(node_cid);
260 let Some(bucket_cid) = prolly::lookup(&*self.blockstore, embeddings_root, &key)? else {
261 return Ok(None);
262 };
263 let bucket: EmbeddingBucket = decode_from_store(&*self.blockstore, &bucket_cid)?;
264 Ok(bucket.get(model).cloned())
265 }
266
267 /// All outgoing edges from `src` in the current commit, optionally
268 /// filtered by edge-type label. Returns an empty vec if the node
269 /// has no adjacency bucket (no authored out-edges), or if the repo
270 /// has no commits yet.
271 ///
272 /// Used by graph-aware retrieval (`Retriever::with_graph_expand`)
273 /// to expand a seed set via 1-hop neighborhood traversal.
274 ///
275 /// # Errors
276 ///
277 /// Store or codec errors while walking the adjacency index or
278 /// decoding Edge blocks.
279 pub fn outgoing_edges(
280 &self,
281 src: &NodeId,
282 etype_filter: Option<&[&str]>,
283 ) -> Result<Vec<Edge>, Error> {
284 let Some(commit) = self.commit.as_ref() else {
285 return Ok(Vec::new());
286 };
287 let Some(indexes_cid) = commit.indexes.as_ref() else {
288 return Ok(Vec::new());
289 };
290 let indexes: crate::objects::IndexSet = decode_from_store(&*self.blockstore, indexes_cid)?;
291 let Some(adj_root) = &indexes.outgoing else {
292 return Ok(Vec::new());
293 };
294 let key = ProllyKey::from(*src);
295 let Some(bucket_cid) = prolly::lookup(&*self.blockstore, adj_root, &key)? else {
296 return Ok(Vec::new());
297 };
298 let bucket: crate::objects::AdjacencyBucket =
299 decode_from_store(&*self.blockstore, &bucket_cid)?;
300 let mut out = Vec::new();
301 for ae in &bucket.edges {
302 if let Some(want) = etype_filter
303 && !want.contains(&ae.label.as_str())
304 {
305 continue;
306 }
307 let edge: Edge = decode_from_store(&*self.blockstore, &ae.edge)?;
308 out.push(edge);
309 }
310 Ok(out)
311 }
312
313 /// All incoming edges pointing at `dst` in the current commit,
314 /// optionally filtered by edge-type label. Returns an empty vec if
315 /// the node has no incoming-adjacency bucket, if the commit's
316 /// `IndexSet` has no `incoming` tree (pre-0.3 repos), or if the
317 /// repo has no commits yet.
318 ///
319 /// Symmetric mirror of [`Self::outgoing_edges`]. Use this from
320 /// agent-side callers that want "who points at this node" without
321 /// constructing a full [`crate::index::Query`].
322 ///
323 /// # Errors
324 ///
325 /// Store or codec errors while walking the incoming-adjacency
326 /// index or decoding Edge blocks.
327 pub fn incoming_edges(
328 &self,
329 dst: &NodeId,
330 etype_filter: Option<&[&str]>,
331 ) -> Result<Vec<Edge>, Error> {
332 self.incoming_edges_capped(
333 dst,
334 etype_filter,
335 crate::index::Query::DEFAULT_ADJACENCY_CAP,
336 )
337 }
338
339 /// Explicit-cap variant of [`Self::incoming_edges`]. Use this
340 /// when a caller is prepared to handle truncation (e.g. an MCP
341 /// tool that streams the bucket and renders its own
342 /// "clipped at N" marker). Default [`Self::incoming_edges`]
343 /// applies [`crate::index::Query::DEFAULT_ADJACENCY_CAP`] so a single
344 /// high-fan-in dst can't `DoS` the agent-side caller.
345 ///
346 /// # Errors
347 ///
348 /// Store or codec errors while walking the incoming-adjacency
349 /// index or decoding Edge blocks.
350 pub fn incoming_edges_capped(
351 &self,
352 dst: &NodeId,
353 etype_filter: Option<&[&str]>,
354 cap: usize,
355 ) -> Result<Vec<Edge>, Error> {
356 let Some(commit) = self.commit.as_ref() else {
357 return Ok(Vec::new());
358 };
359 let Some(indexes_cid) = commit.indexes.as_ref() else {
360 return Ok(Vec::new());
361 };
362 let indexes: crate::objects::IndexSet = decode_from_store(&*self.blockstore, indexes_cid)?;
363 let Some(inc_root) = &indexes.incoming else {
364 return Ok(Vec::new());
365 };
366 let key = ProllyKey::from(*dst);
367 let Some(bucket_cid) = prolly::lookup(&*self.blockstore, inc_root, &key)? else {
368 return Ok(Vec::new());
369 };
370 let bucket: crate::objects::IncomingAdjacencyBucket =
371 decode_from_store(&*self.blockstore, &bucket_cid)?;
372 let mut out = Vec::with_capacity(bucket.edges.len().min(cap));
373 for ae in &bucket.edges {
374 if out.len() >= cap {
375 break;
376 }
377 if let Some(want) = etype_filter
378 && !want.contains(&ae.label.as_str())
379 {
380 continue;
381 }
382 let edge: Edge = decode_from_store(&*self.blockstore, &ae.edge)?;
383 out.push(edge);
384 }
385 Ok(out)
386 }
387
388 /// Whether `id` is listed in the current View's tombstone map.
389 ///
390 /// `true` means a prior commit on this view recorded a
391 /// [`Tombstone`](crate::objects::Tombstone) against the node -
392 /// retrieval paths filter it out by default. The underlying Node
393 /// block may still exist in the node Prolly tree and remains
394 /// addressable by CID; only the "show this to an agent" decision
395 /// changes.
396 #[must_use]
397 pub fn is_tombstoned(&self, id: &NodeId) -> bool {
398 self.view.tombstones.contains_key(id)
399 }
400
401 /// Fetch the tombstone record for `id`, if any.
402 #[must_use]
403 pub fn tombstone_for(&self, id: &NodeId) -> Option<&crate::objects::Tombstone> {
404 self.view.tombstones.get(id)
405 }
406
407 // ---------------- Mutation entrypoint ----------------
408
409 /// Start a transaction. The returned [`Transaction`] holds a cheap
410 /// clone of the current repo state; multiple transactions can be
411 /// started concurrently but only the first to commit wins (subsequent
412 /// commits against stale heads will land on a concurrent op-head in
413 /// M8.5's merge model).
414 #[must_use]
415 pub fn start_transaction(&self) -> Transaction {
416 Transaction::new(self.clone())
417 }
418
419 // ---------------- Query entrypoint ----------------
420
421 /// Convenience: `Query::new(self)`. One-liner entry point for the
422 /// agent-facing retrieval API.
423 ///
424 /// ```no_run
425 /// # use mnem_core::repo::ReadonlyRepo;
426 /// # fn demo(repo: &ReadonlyRepo) -> Result<(), Box<dyn std::error::Error>> {
427 /// let hits = repo.query().label("Person").where_eq("name", "Alice").execute()?;
428 /// # Ok(()) }
429 /// ```
430 #[must_use]
431 pub const fn query(&self) -> crate::index::Query<'_> {
432 crate::index::Query::new(self)
433 }
434
435 /// Build a full-corpus vector index over every node whose
436 /// [`crate::objects::Embedding::model`] equals `model`. Dimensions
437 /// are inferred from the first matching embedding; subsequent
438 /// embeddings with a different dim are silently skipped.
439 ///
440 /// Each index binds to a single `(model, dim)` - agents who use
441 /// multiple embedding models build one index per model.
442 ///
443 /// # Errors
444 ///
445 /// - [`RepoError::Uninitialized`] if the repo has no head commit.
446 /// - Store / codec errors from walking the node Prolly tree.
447 /// - [`crate::error::ObjectError::EmbeddingSizeMismatch`] on a
448 /// corrupted embedding (vector length disagrees with
449 /// `dim * bytes_per_dtype`).
450 pub fn build_vector_index(
451 &self,
452 model: &str,
453 ) -> Result<crate::index::BruteForceVectorIndex, Error> {
454 crate::index::BruteForceVectorIndex::build_from_repo(self, model)
455 }
456
457 /// Start an agent-facing retrieval builder that composes the
458 /// structured query, dense vector similarity, and learned-sparse
459 /// retrieval under a token budget. See [`crate::retrieve`] for the
460 /// full model.
461 ///
462 /// ```no_run
463 /// # use mnem_core::repo::ReadonlyRepo;
464 /// # fn demo(repo: &ReadonlyRepo, embedding: Vec<f32>) -> Result<(), Box<dyn std::error::Error>> {
465 /// let result = repo
466 /// .retrieve()
467 /// .label("Document")
468 /// .vector("openai:text-embedding-3-small", embedding)
469 /// .token_budget(2000)
470 /// .execute()?;
471 /// # Ok(()) }
472 /// ```
473 #[must_use]
474 pub fn retrieve(&self) -> crate::retrieve::Retriever<'_> {
475 crate::retrieve::Retriever::new(self)
476 }
477
478 // ---------------- Compare-and-swap on refs (SPEC §6.4) ----------------
479
480 /// Atomically update a named ref, subject to an expected-previous
481 /// check (SPEC §6.4).
482 ///
483 /// Semantics:
484 ///
485 /// 1. Read the current value of `name` in the current view's `refs`.
486 /// 2. If the current value does not `==`-compare to `expected_prev`
487 /// (structurally equal, not byte-exact - our `RefTarget` derives
488 /// `PartialEq` and constructs canonical form), return
489 /// [`RepoError::Stale`].
490 /// 3. Otherwise, build a new View with the ref updated (insert if
491 /// `new` is `Some`, remove if `new` is `None`), a new Operation
492 /// wrapping it, advance op-heads, and return a fresh repo.
493 ///
494 /// Per SPEC §6.4, CAS guarantees **no lost update** - two
495 /// concurrent CAS attempts against the same base both succeed at
496 /// the op-log layer, and the next read sees a conflicted refs state.
497 /// For **exactly-one-winner** semantics, combine with
498 /// [`Transaction::commit_opts`]'s `linearize: true` or with an
499 /// out-of-process coordinator.
500 ///
501 /// # Errors
502 ///
503 /// - [`RepoError::Stale`] on mismatch with `expected_prev`.
504 /// - Codec / store errors on write.
505 pub fn update_ref(
506 &self,
507 name: &str,
508 expected_prev: Option<&RefTarget>,
509 new: Option<RefTarget>,
510 author: &str,
511 ) -> Result<Self, Error> {
512 let current = self.view.refs.get(name);
513 if current != expected_prev {
514 return Err(RepoError::Stale.into());
515 }
516
517 let bs = self.blockstore.clone();
518 let ohs = self.op_heads.clone();
519
520 // Build the new View.
521 let mut new_view: View = (*self.view).clone();
522 match new {
523 Some(target) => {
524 new_view.refs.insert(name.to_string(), target);
525 }
526 None => {
527 new_view.refs.remove(name);
528 }
529 }
530 let (view_bytes, view_cid) = hash_to_cid(&new_view)?;
531 bs.put(view_cid.clone(), view_bytes)?;
532
533 // Build the new Operation wrapping the new view.
534 let op = Operation::new(
535 view_cid,
536 author,
537 now_micros(),
538 format!("update_ref: {name}"),
539 )
540 .with_parent(self.op_id.clone());
541 let (op_bytes, op_cid) = hash_to_cid(&op)?;
542 bs.put(op_cid.clone(), op_bytes)?;
543
544 // Advance op-heads.
545 ohs.update(op_cid.clone(), std::slice::from_ref(&self.op_id))?;
546
547 Self::load_at(bs, ohs, op_cid)
548 }
549
550 // Remote-v0 insertion point: `update_remote_ref(remote_name,
551 // ref_name, target) -> Result<Self, Error>` will mutate
552 // `View.remote_refs[remote][ref]` atomically (same
553 // Operation-wrapping pattern as `update_ref` above). Called by
554 // the `mnem fetch` path after a successful
555 // `GET /remote/v1/refs` + `POST /remote/v1/fetch-blocks` round.
556 // Must NOT mutate `View.refs` (local heads stay untouched until
557 // `mnem pull` merges). See
558 // `docs/ROADMAP.md#remote-v0-work-items-tracked-inline-in-src`
559 // item 3 and ().
560}
561
562/// Helper: fetch and decode a typed object from a blockstore.
563pub(crate) fn decode_from_store<T, B>(store: &B, cid: &Cid) -> Result<T, Error>
564where
565 B: Blockstore + ?Sized,
566 T: serde::de::DeserializeOwned,
567{
568 let bytes = store
569 .get(cid)?
570 .ok_or_else(|| StoreError::NotFound { cid: cid.clone() })?;
571 Ok(from_canonical_bytes(&bytes)?)
572}
573
574impl std::fmt::Debug for ReadonlyRepo {
575 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
576 f.debug_struct("ReadonlyRepo")
577 .field("op_id", &self.op_id)
578 .field("heads", &self.view.heads)
579 .field("has_commit", &self.commit.is_some())
580 .finish()
581 }
582}
583
584#[cfg(test)]
585mod tests {
586 use super::*;
587 use crate::id::{CODEC_RAW, Multihash};
588 use crate::store::{MemoryBlockstore, MemoryOpHeadsStore};
589
590 fn stores() -> (Arc<dyn Blockstore>, Arc<dyn OpHeadsStore>) {
591 (
592 Arc::new(MemoryBlockstore::new()),
593 Arc::new(MemoryOpHeadsStore::new()),
594 )
595 }
596
597 fn raw_cid(seed: u32) -> Cid {
598 Cid::new(CODEC_RAW, Multihash::sha2_256(&seed.to_be_bytes()))
599 }
600
601 #[test]
602 fn init_creates_a_valid_root() {
603 let (bs, ohs) = stores();
604 let repo = ReadonlyRepo::init(bs.clone(), ohs.clone()).unwrap();
605 assert!(repo.head_commit().is_none());
606 assert!(repo.view().heads.is_empty());
607 assert_eq!(ohs.current().unwrap().len(), 1);
608 assert_eq!(ohs.current().unwrap()[0], *repo.op_id());
609 }
610
611 #[test]
612 fn open_on_uninitialized_errors() {
613 let (bs, ohs) = stores();
614 let err = ReadonlyRepo::open(bs, ohs).unwrap_err();
615 match err {
616 Error::Repo(RepoError::Uninitialized) => {}
617 e => panic!("unexpected variant: {e:?}"),
618 }
619 }
620
621 #[test]
622 fn open_after_init_returns_the_same_op() {
623 let (bs, ohs) = stores();
624 let first = ReadonlyRepo::init(bs.clone(), ohs.clone()).unwrap();
625 let second = ReadonlyRepo::open(bs, ohs).unwrap();
626 assert_eq!(first.op_id(), second.op_id());
627 }
628
629 #[test]
630 fn update_ref_creates_new_ref() {
631 let (bs, ohs) = stores();
632 let repo = ReadonlyRepo::init(bs, ohs).unwrap();
633 let target = RefTarget::normal(raw_cid(1));
634 let r1 = repo
635 .update_ref("refs/heads/main", None, Some(target.clone()), "alice")
636 .unwrap();
637 assert_eq!(r1.view().refs.get("refs/heads/main"), Some(&target));
638 }
639
640 #[test]
641 fn update_ref_returns_stale_on_expected_mismatch() {
642 let (bs, ohs) = stores();
643 let repo = ReadonlyRepo::init(bs, ohs).unwrap();
644 // Ref doesn't exist yet, but we claim it was at some CID.
645 let stale = RefTarget::normal(raw_cid(99));
646 let err = repo
647 .update_ref("refs/heads/main", Some(&stale), None, "alice")
648 .unwrap_err();
649 assert!(matches!(err, Error::Repo(RepoError::Stale)));
650 }
651
652 #[test]
653 fn update_ref_cas_sequence_then_delete() {
654 let (bs, ohs) = stores();
655 let repo = ReadonlyRepo::init(bs, ohs).unwrap();
656 let v1 = RefTarget::normal(raw_cid(1));
657 let v2 = RefTarget::normal(raw_cid(2));
658
659 let r1 = repo
660 .update_ref("refs/heads/x", None, Some(v1.clone()), "a")
661 .unwrap();
662 let r2 = r1
663 .update_ref("refs/heads/x", Some(&v1), Some(v2.clone()), "a")
664 .unwrap();
665 assert_eq!(r2.view().refs.get("refs/heads/x"), Some(&v2));
666
667 let r3 = r2.update_ref("refs/heads/x", Some(&v2), None, "a").unwrap();
668 assert!(!r3.view().refs.contains_key("refs/heads/x"));
669 }
670}