jj_lib/backend.rs
1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Defines the commit backend trait and related types. This is the lowest-level
16//! trait for reading and writing commits, trees, files, etc.
17
18use std::any::Any;
19use std::fmt::Debug;
20use std::pin::Pin;
21use std::slice;
22use std::time::SystemTime;
23
24use async_trait::async_trait;
25use chrono::TimeZone as _;
26use futures::stream::BoxStream;
27use thiserror::Error;
28use tokio::io::AsyncRead;
29
30use crate::content_hash::ContentHash;
31use crate::hex_util;
32use crate::index::Index;
33use crate::merge::Merge;
34use crate::object_id::ObjectId as _;
35use crate::object_id::id_type;
36use crate::repo_path::RepoPath;
37use crate::repo_path::RepoPathBuf;
38use crate::repo_path::RepoPathComponent;
39use crate::repo_path::RepoPathComponentBuf;
40use crate::signing::SignResult;
41
42id_type!(
43 /// Identifier for a [`Commit`] based on its content. When a commit is
44 /// rewritten, its `CommitId` changes.
45 pub CommitId { hex() }
46);
47id_type!(
48 /// Stable identifier for a [`Commit`]. Unlike the `CommitId`, the `ChangeId`
49 /// follows the commit and is not updated when the commit is rewritten.
50 pub ChangeId { reverse_hex() }
51);
52id_type!(
53 /// Identifier for a tree object.
54 pub TreeId { hex() }
55);
56id_type!(
57 /// Identifier for a file content.
58 pub FileId { hex() }
59);
60id_type!(
61 /// Identifier for a symlink.
62 pub SymlinkId { hex() }
63);
64id_type!(
65 /// Identifier for a copy history.
66 pub CopyId { hex() }
67);
68
69impl ChangeId {
70 /// Parses the given "reverse" hex string into a `ChangeId`.
71 pub fn try_from_reverse_hex(hex: impl AsRef<[u8]>) -> Option<Self> {
72 hex_util::decode_reverse_hex(hex).map(Self)
73 }
74
75 /// Returns the hex string representation of this ID, which uses `z-k`
76 /// "digits" instead of `0-9a-f`.
77 pub fn reverse_hex(&self) -> String {
78 hex_util::encode_reverse_hex(&self.0)
79 }
80}
81
82impl CopyId {
83 /// Returns a placeholder copy id to be used when we don't have a real copy
84 /// id yet.
85 // TODO: Delete this
86 pub fn placeholder() -> Self {
87 Self::new(vec![])
88 }
89}
90
91/// Error that may occur when converting a `Timestamp` to a `Datetime``.
92#[derive(Debug, Error)]
93#[error("Out-of-range date")]
94pub struct TimestampOutOfRange;
95
96/// The number of milliseconds since the Unix epoch.
97#[derive(ContentHash, Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
98pub struct MillisSinceEpoch(pub i64);
99
100/// A timestamp with millisecond precision and a time zone offset.
101#[derive(ContentHash, Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
102pub struct Timestamp {
103 /// The number of milliseconds since the Unix epoch.
104 pub timestamp: MillisSinceEpoch,
105 /// Timezone offset in minutes
106 pub tz_offset: i32,
107}
108
109impl Timestamp {
110 /// Returns the current local time as a `Timestamp`.
111 pub fn now() -> Self {
112 Self::from_datetime(chrono::offset::Local::now())
113 }
114
115 /// Creates a `Timestamp` from the given `DateTime`.
116 pub fn from_datetime<Tz: chrono::TimeZone<Offset = chrono::offset::FixedOffset>>(
117 datetime: chrono::DateTime<Tz>,
118 ) -> Self {
119 Self {
120 timestamp: MillisSinceEpoch(datetime.timestamp_millis()),
121 tz_offset: datetime.offset().local_minus_utc() / 60,
122 }
123 }
124
125 /// Converts this `Timestamp` to a `DateTime`.
126 pub fn to_datetime(
127 &self,
128 ) -> Result<chrono::DateTime<chrono::FixedOffset>, TimestampOutOfRange> {
129 let utc = match chrono::Utc.timestamp_opt(
130 self.timestamp.0.div_euclid(1000),
131 (self.timestamp.0.rem_euclid(1000)) as u32 * 1000000,
132 ) {
133 chrono::LocalResult::None => {
134 return Err(TimestampOutOfRange);
135 }
136 chrono::LocalResult::Single(x) => x,
137 chrono::LocalResult::Ambiguous(y, _z) => y,
138 };
139
140 Ok(utc.with_timezone(
141 &chrono::FixedOffset::east_opt(self.tz_offset * 60)
142 .unwrap_or_else(|| chrono::FixedOffset::east_opt(0).unwrap()),
143 ))
144 }
145}
146
147impl serde::Serialize for Timestamp {
148 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
149 where
150 S: serde::Serializer,
151 {
152 // TODO: test is_human_readable() to use raw format?
153 let t = self.to_datetime().map_err(serde::ser::Error::custom)?;
154 t.serialize(serializer)
155 }
156}
157
158/// Represents a person/entity and a timestamp for when they authored or
159/// committed a commit.
160#[derive(ContentHash, Debug, PartialEq, Eq, Clone, serde::Serialize)]
161pub struct Signature {
162 /// The name of the person/entity.
163 pub name: String,
164 /// The email address of the person/entity.
165 pub email: String,
166 /// The timestamp for when the person/entity authored or committed the
167 /// commit.
168 pub timestamp: Timestamp,
169}
170
171/// Represents a cryptographically signed [`Commit`] signature.
172#[derive(ContentHash, Debug, PartialEq, Eq, Clone)]
173pub struct SecureSig {
174 /// The raw data that was signed to produce this signature.
175 pub data: Vec<u8>,
176 /// The signature itself.
177 pub sig: Vec<u8>,
178}
179
180/// Function called to sign a commit. The input is the raw data to sign, and the
181/// output is the signature.
182pub type SigningFn<'a> = dyn FnMut(&[u8]) -> SignResult<Vec<u8>> + Send + 'a;
183
184/// Represents a commit object, which contains a reference to the contents a
185/// that point in time, along with metadata about the commit.
186#[derive(ContentHash, Debug, PartialEq, Eq, Clone, serde::Serialize)]
187pub struct Commit {
188 /// The parent commits of this commit. Commits typically have one parents,
189 /// but they can have any number of parents. Only the root commit has no
190 /// parents.
191 pub parents: Vec<CommitId>,
192 /// The predecessor commits of this commit, i.e. commits that were rewritten
193 /// to create this commit.
194 //
195 // TODO: delete commit.predecessors when we can assume that most commits are
196 // tracked by op.commit_predecessors. (in jj 0.42 or so?)
197 #[serde(skip)] // deprecated
198 pub predecessors: Vec<CommitId>,
199 /// The tree at the root directory in this commit.
200 #[serde(skip)] // TODO: should be exposed?
201 pub root_tree: Merge<TreeId>,
202 /// If resolved, must be empty string. Otherwise, must have same number of
203 /// terms as `root_tree`.
204 #[serde(skip)]
205 pub conflict_labels: Merge<String>,
206 /// The change ID of this commit. This is a stable identifier that follows
207 /// the commit when it's rewritten.
208 pub change_id: ChangeId,
209 /// The description (commit message).
210 pub description: String,
211 /// The person/entity that authored this commit.
212 pub author: Signature,
213 /// The person/entity that committed this commit.
214 pub committer: Signature,
215 /// A cryptographic signature of this commit.
216 #[serde(skip)] // raw data wouldn't be useful
217 pub secure_sig: Option<SecureSig>,
218}
219
220/// An individual copy event, from file A -> B.
221#[derive(Debug, PartialEq, Eq, Clone)]
222pub struct CopyRecord {
223 /// The destination of the copy, B.
224 pub target: RepoPathBuf,
225 /// The CommitId where the copy took place.
226 pub target_commit: CommitId,
227 /// The source path a target was copied from.
228 ///
229 /// It is not required that the source path is different than the target
230 /// path. A custom backend may choose to represent 'rollbacks' as copies
231 /// from a file unto itself, from a specific prior commit.
232 pub source: RepoPathBuf,
233 /// The file id of the source file.
234 pub source_file: FileId,
235 /// The source commit the target was copied from. Backends may use this
236 /// field to implement 'integration' logic, where a source may be
237 /// periodically merged into a target, similar to a branch, but the
238 /// branching occurs at the file level rather than the repository level. It
239 /// also follows naturally that any copy source targeted to a specific
240 /// commit should avoid copy propagation on rebasing, which is desirable
241 /// for 'fork' style copies.
242 ///
243 /// It is required that the commit id is an ancestor of the commit with
244 /// which this copy source is associated.
245 pub source_commit: CommitId,
246}
247
248/// Describes the copy history of a file. The copy object is unchanged when a
249/// file is modified.
250#[derive(ContentHash, Debug, PartialEq, Eq, Clone, PartialOrd, Ord)]
251pub struct CopyHistory {
252 /// The file's current path.
253 pub current_path: RepoPathBuf,
254 /// IDs of the files that became the current incarnation of this file.
255 ///
256 /// A newly created file has no parents. A regular copy or rename has one
257 /// parent. A merge of multiple files has multiple parents.
258 pub parents: Vec<CopyId>,
259 /// An optional piece of data to give the Copy object a different ID. May be
260 /// randomly generated. This allows a commit to say that a file was replaced
261 /// by a new incarnation of it, indicating a logically distinct file
262 /// taking the place of the previous file at the path.
263 pub salt: Vec<u8>,
264}
265
266/// A `CopyHistory` along with its `CopyId`.
267#[derive(Debug, Eq, PartialEq)]
268pub struct RelatedCopy {
269 /// The copy id.
270 pub id: CopyId,
271 /// The copy history.
272 pub history: CopyHistory,
273}
274
275/// Error that may occur during backend initialization.
276#[derive(Debug, Error)]
277#[error(transparent)]
278pub struct BackendInitError(pub Box<dyn std::error::Error + Send + Sync>);
279
280/// Error that may occur during backend loading.
281#[derive(Debug, Error)]
282#[error(transparent)]
283pub struct BackendLoadError(pub Box<dyn std::error::Error + Send + Sync>);
284
285/// Commit-backend error that may occur after the backend is loaded.
286#[derive(Debug, Error)]
287pub enum BackendError {
288 /// The caller attempted to read an object by specifying an ID with an
289 /// invalid hash length for this backend.
290 #[error(
291 "Invalid hash length for object of type {object_type} (expected {expected} bytes, got \
292 {actual} bytes): {hash}"
293 )]
294 InvalidHashLength {
295 /// The expected length of the hash in bytes for this backend.
296 expected: usize,
297 /// The actual length of the hash in bytes that was provided.
298 actual: usize,
299 /// The type of the object that we attempted to read, e.g. "commit" or
300 /// "tree".
301 object_type: String,
302 /// The hex hash that had an invalid length.
303 hash: String,
304 },
305 /// The caller attempted to read an object that internally stored as invalid
306 /// UTF-8, such as a symlink target with invalid UTF-8 stored in the Git
307 /// backend.
308 #[error("Invalid UTF-8 for object {hash} of type {object_type}")]
309 InvalidUtf8 {
310 /// The type of the object that we attempted to read, e.g. "commit" or
311 /// "tree".
312 object_type: String,
313 /// The hex hash of the object that had invalid UTF-8.
314 hash: String,
315 /// The source error.
316 source: std::str::Utf8Error,
317 },
318 /// The caller attempted to read an object that doesn't exist.
319 #[error("Object {hash} of type {object_type} not found")]
320 ObjectNotFound {
321 /// The type of the object that we attempted to read, e.g. "commit" or
322 /// "tree".
323 object_type: String,
324 /// The hex hash of the object that was not found.
325 hash: String,
326 /// The source error.
327 source: Box<dyn std::error::Error + Send + Sync>,
328 },
329 /// Failed to read an object due to an I/O error or other unexpected error.
330 #[error("Error when reading object {hash} of type {object_type}")]
331 ReadObject {
332 /// The type of the object that we attempted to read, e.g. "commit" or
333 /// "tree".
334 object_type: String,
335 /// The hex hash of the object that we failed to read.
336 hash: String,
337 /// The source error.
338 source: Box<dyn std::error::Error + Send + Sync>,
339 },
340 /// The caller attempted to read an object but doesn't have permission to
341 /// read it.
342 #[error("Access denied to read object {hash} of type {object_type}")]
343 ReadAccessDenied {
344 /// The type of the object that we attempted to read, e.g. "commit" or
345 /// "tree".
346 object_type: String,
347 /// The hex hash of the object that the caller doesn't have permission
348 /// to read.
349 hash: String,
350 /// The source error.
351 source: Box<dyn std::error::Error + Send + Sync>,
352 },
353 /// Failed to read a file's content due to an I/O error or other unexpected
354 /// error.
355 #[error(
356 "Error when reading file content for file {path} with id {id}",
357 path = path.as_internal_file_string()
358 )]
359 ReadFile {
360 /// The path of the file we failed to read.
361 path: RepoPathBuf,
362 /// The ID of the file we failed to read.
363 id: FileId,
364 /// The source error.
365 source: Box<dyn std::error::Error + Send + Sync>,
366 },
367 /// Failed to write an object due to an I/O error or other unexpected error.
368 #[error("Could not write object of type {object_type}")]
369 WriteObject {
370 /// The type of the object that we attempted to write, e.g. "commit" or
371 /// "tree".
372 object_type: &'static str,
373 /// The source error.
374 source: Box<dyn std::error::Error + Send + Sync>,
375 },
376 /// Some other error that doesn't fit into the above categories.
377 #[error(transparent)]
378 Other(Box<dyn std::error::Error + Send + Sync>),
379 /// A valid operation was attempted, but it failed because it isn't
380 /// supported by the particular backend.
381 #[error("{0}")]
382 Unsupported(String),
383}
384
385/// A specialized [`Result`] type for commit backend errors.
386pub type BackendResult<T> = Result<T, BackendError>;
387
388/// Identifies the content at a given path in a tree.
389#[derive(ContentHash, Debug, PartialEq, Eq, Clone, Hash)]
390pub enum TreeValue {
391 // TODO: When there's a CopyId here, the copy object's path must match
392 // the path identified by the tree.
393 /// This path is a regular file, possibly executable.
394 File {
395 /// The file's content ID.
396 id: FileId,
397 /// Whether the file is executable.
398 executable: bool,
399 /// The copy id.
400 copy_id: CopyId,
401 },
402 /// This path is a symbolic link.
403 Symlink(SymlinkId),
404 /// This path is a directory.
405 Tree(TreeId),
406 /// This path is a Git submodule.
407 GitSubmodule(CommitId),
408}
409
410impl TreeValue {
411 /// The copy id if this value represents a file.
412 pub fn copy_id(&self) -> Option<&CopyId> {
413 match self {
414 Self::File { copy_id, .. } => Some(copy_id),
415 _ => None,
416 }
417 }
418}
419
420/// An entry in a `Tree` consisting of a basename and a `TreeValue`.
421#[derive(Debug, PartialEq, Eq, Clone)]
422pub struct TreeEntry<'a> {
423 name: &'a RepoPathComponent,
424 value: &'a TreeValue,
425}
426
427impl<'a> TreeEntry<'a> {
428 /// Creates a new `TreeEntry` with the given name and value.
429 pub fn new(name: &'a RepoPathComponent, value: &'a TreeValue) -> Self {
430 Self { name, value }
431 }
432
433 /// Returns the basename at this path.
434 pub fn name(&self) -> &'a RepoPathComponent {
435 self.name
436 }
437
438 /// Returns the tree value at this path.
439 pub fn value(&self) -> &'a TreeValue {
440 self.value
441 }
442}
443
444/// Iterator over the direct entries in a `Tree`.
445pub struct TreeEntriesNonRecursiveIterator<'a> {
446 iter: slice::Iter<'a, (RepoPathComponentBuf, TreeValue)>,
447}
448
449impl<'a> Iterator for TreeEntriesNonRecursiveIterator<'a> {
450 type Item = TreeEntry<'a>;
451
452 fn next(&mut self) -> Option<Self::Item> {
453 self.iter
454 .next()
455 .map(|(name, value)| TreeEntry { name, value })
456 }
457}
458
459/// A tree object, which represents a directory. It contains the direct entries
460/// of the directory. Subdirectories are represented by the `TreeValue::Tree`
461/// variant. The `Tree` object associated with the root directory thus
462/// represents the entire repository at a given point in time.
463///
464/// The entries must be sorted (by `RepoPathComponentBuf`'s ordering) and must
465/// not contain duplicate names.
466#[derive(ContentHash, Default, PartialEq, Eq, Debug, Clone)]
467pub struct Tree {
468 entries: Vec<(RepoPathComponentBuf, TreeValue)>,
469}
470
471impl Tree {
472 /// Creates a new `Tree` from the given entries. The entries must be sorted
473 /// by name and must not contain duplicate names.
474 pub fn from_sorted_entries(entries: Vec<(RepoPathComponentBuf, TreeValue)>) -> Self {
475 debug_assert!(entries.is_sorted_by(|(a, _), (b, _)| a < b));
476 Self { entries }
477 }
478
479 /// Checks if this tree has no entries.
480 pub fn is_empty(&self) -> bool {
481 self.entries.is_empty()
482 }
483
484 /// Returns an iterator over the names of the entries in this tree.
485 pub fn names(&self) -> impl Iterator<Item = &RepoPathComponent> {
486 self.entries.iter().map(|(name, _)| name.as_ref())
487 }
488
489 /// Returns an iterator over the entries in this tree.
490 pub fn entries(&self) -> TreeEntriesNonRecursiveIterator<'_> {
491 TreeEntriesNonRecursiveIterator {
492 iter: self.entries.iter(),
493 }
494 }
495
496 /// Returns the entry at the given basename, if it exists.
497 pub fn entry(&self, name: &RepoPathComponent) -> Option<TreeEntry<'_>> {
498 let index = self
499 .entries
500 .binary_search_by_key(&name, |(name, _)| name)
501 .ok()?;
502 let (name, value) = &self.entries[index];
503 Some(TreeEntry { name, value })
504 }
505
506 /// Returns the value at the given basename, if it exists.
507 pub fn value(&self, name: &RepoPathComponent) -> Option<&TreeValue> {
508 self.entry(name).map(|entry| entry.value)
509 }
510}
511
512/// Creates a root commit object.
513pub fn make_root_commit(root_change_id: ChangeId, empty_tree_id: TreeId) -> Commit {
514 let timestamp = Timestamp {
515 timestamp: MillisSinceEpoch(0),
516 tz_offset: 0,
517 };
518 let signature = Signature {
519 name: String::new(),
520 email: String::new(),
521 timestamp,
522 };
523 Commit {
524 parents: vec![],
525 predecessors: vec![],
526 root_tree: Merge::resolved(empty_tree_id),
527 conflict_labels: Merge::resolved(String::new()),
528 change_id: root_change_id,
529 description: String::new(),
530 author: signature.clone(),
531 committer: signature,
532 secure_sig: None,
533 }
534}
535
536/// Defines the interface for commit backends.
537#[async_trait]
538pub trait Backend: Any + Send + Sync + Debug {
539 /// A unique name that identifies this backend. Written to
540 /// `.jj/repo/store/type` when the repo is created.
541 fn name(&self) -> &str;
542
543 /// The length of commit IDs in bytes.
544 fn commit_id_length(&self) -> usize;
545
546 /// The length of change IDs in bytes.
547 fn change_id_length(&self) -> usize;
548
549 /// The root commit's ID.
550 ///
551 /// The root commit is a possibly virtual commit that is an ancestor of all
552 /// commits in the repository. It is the only commit that has no parents.
553 fn root_commit_id(&self) -> &CommitId;
554
555 /// The root commit's change ID.
556 fn root_change_id(&self) -> &ChangeId;
557
558 /// The empty tree's ID. All empty trees must have the same ID regardless of
559 /// the path.
560 fn empty_tree_id(&self) -> &TreeId;
561
562 /// An estimate of how many concurrent requests this backend handles well. A
563 /// local backend like the Git backend (at until it supports partial clones)
564 /// may want to set this to 1. A cloud-backed backend may want to set it to
565 /// 100 or so.
566 ///
567 /// It is not guaranteed that at most this number of concurrent requests are
568 /// sent. It is the backend's responsibility to make sure it doesn't put
569 /// too much load on its storage, e.g. by queueing requests if necessary.
570 fn concurrency(&self) -> usize;
571
572 /// Returns a reader for reading the contents of a file from the backend.
573 async fn read_file(
574 &self,
575 path: &RepoPath,
576 id: &FileId,
577 ) -> BackendResult<Pin<Box<dyn AsyncRead + Send>>>;
578
579 /// Writes the contents of the writer to the backend. Returns the ID of the
580 /// written file.
581 async fn write_file(
582 &self,
583 path: &RepoPath,
584 contents: &mut (dyn AsyncRead + Send + Unpin),
585 ) -> BackendResult<FileId>;
586
587 /// Reads the target of a symlink from the backend. Returns the target path.
588 /// It is not a `RepoPath` because it doesn't necessarily point within the
589 /// repository.
590 async fn read_symlink(&self, path: &RepoPath, id: &SymlinkId) -> BackendResult<String>;
591
592 /// Writes a symlink with the given target to the backend and returns its
593 /// ID.
594 async fn write_symlink(&self, path: &RepoPath, target: &str) -> BackendResult<SymlinkId>;
595
596 /// Read the specified `CopyHistory` object.
597 ///
598 /// Backends that don't support copy tracking may return
599 /// `BackendError::Unsupported`.
600 async fn read_copy(&self, id: &CopyId) -> BackendResult<CopyHistory>;
601
602 /// Write the `CopyHistory` object and return its ID.
603 ///
604 /// Backends that don't support copy tracking may return
605 /// `BackendError::Unsupported`.
606 async fn write_copy(&self, copy: &CopyHistory) -> BackendResult<CopyId>;
607
608 /// Find all copy histories that are related to the specified one. This is
609 /// defined as those that are ancestors of the given specified one, plus
610 /// all descendants of those ancestors. Children must be returned before
611 /// parents.
612 ///
613 /// It is valid (but wasteful) to include other copy histories, such as
614 /// siblings, or even completely unrelated copy histories.
615 ///
616 /// Backends that don't support copy tracking may return
617 /// `BackendError::Unsupported`.
618 async fn get_related_copies(&self, copy_id: &CopyId) -> BackendResult<Vec<RelatedCopy>>;
619
620 /// Reads the tree at the given path with the given ID.
621 async fn read_tree(&self, path: &RepoPath, id: &TreeId) -> BackendResult<Tree>;
622
623 /// Writes the given tree at the given path to the backend and returns its
624 /// ID.
625 async fn write_tree(&self, path: &RepoPath, contents: &Tree) -> BackendResult<TreeId>;
626
627 /// Reads the commit with the given ID.
628 async fn read_commit(&self, id: &CommitId) -> BackendResult<Commit>;
629
630 /// Writes a commit and returns its ID and the commit itself. The commit
631 /// should contain the data that was actually written, which may differ
632 /// from the data passed in. For example, the backend may change the
633 /// committer name to an authenticated user's name, or the backend's
634 /// timestamps may have less precision than the millisecond precision in
635 /// `Commit`.
636 ///
637 /// The `sign_with` parameter could contain a function to cryptographically
638 /// sign some binary representation of the commit.
639 /// If the backend supports it, it could call it and store the result in
640 /// an implementation specific fashion, and both `read_commit` and the
641 /// return of `write_commit` should read it back as the `secure_sig`
642 /// field.
643 async fn write_commit(
644 &self,
645 contents: Commit,
646 sign_with: Option<&mut SigningFn>,
647 ) -> BackendResult<(CommitId, Commit)>;
648
649 /// Get copy records for the dag range `root..head`. If `paths` is None
650 /// include all paths, otherwise restrict to only `paths`.
651 ///
652 /// The exact order these are returned is unspecified, but it is guaranteed
653 /// to be reverse-topological. That is, for any two copy records with
654 /// different commit ids A and B, if A is an ancestor of B, A is streamed
655 /// after B.
656 ///
657 /// Streaming by design to better support large backends which may have very
658 /// large single-file histories. This also allows more iterative algorithms
659 /// like blame/annotate to short-circuit after a point without wasting
660 /// unnecessary resources.
661 fn get_copy_records(
662 &self,
663 paths: Option<&[RepoPathBuf]>,
664 root: &CommitId,
665 head: &CommitId,
666 ) -> BackendResult<BoxStream<'_, BackendResult<CopyRecord>>>;
667
668 /// Perform garbage collection.
669 ///
670 /// All commits found in the `index` won't be removed. In addition to that,
671 /// objects created after `keep_newer` will be preserved. This mitigates a
672 /// risk of deleting new commits created concurrently by another process.
673 fn gc(&self, index: &dyn Index, keep_newer: SystemTime) -> BackendResult<()>;
674}
675
676impl dyn Backend {
677 /// Returns reference of the implementation type.
678 pub fn downcast_ref<T: Backend>(&self) -> Option<&T> {
679 (self as &dyn Any).downcast_ref()
680 }
681}