Skip to main content

jj_lib/
local_working_copy.rs

1// Copyright 2020 The Jujutsu Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#![expect(missing_docs)]
16
17use std::borrow::Cow;
18use std::cmp::Ordering;
19use std::collections::HashMap;
20use std::collections::HashSet;
21use std::error::Error;
22use std::fs;
23use std::fs::DirEntry;
24use std::fs::File;
25use std::fs::Metadata;
26use std::fs::OpenOptions;
27use std::io;
28use std::io::Read as _;
29use std::io::Write as _;
30use std::iter;
31use std::mem;
32use std::ops::Range;
33#[cfg(unix)]
34use std::os::unix::fs::PermissionsExt as _;
35use std::path::Path;
36use std::path::PathBuf;
37use std::slice;
38use std::sync::Arc;
39use std::sync::OnceLock;
40use std::sync::mpsc::Sender;
41use std::sync::mpsc::channel;
42use std::time::SystemTime;
43
44use async_trait::async_trait;
45use either::Either;
46use futures::StreamExt as _;
47use itertools::EitherOrBoth;
48use itertools::Itertools as _;
49use once_cell::unsync::OnceCell;
50use pollster::FutureExt as _;
51use prost::Message as _;
52use rayon::iter::IntoParallelIterator as _;
53use rayon::prelude::IndexedParallelIterator as _;
54use rayon::prelude::ParallelIterator as _;
55use tempfile::NamedTempFile;
56use thiserror::Error;
57use tokio::io::AsyncRead;
58use tokio::io::AsyncReadExt as _;
59use tracing::instrument;
60use tracing::trace_span;
61
62use crate::backend::BackendError;
63use crate::backend::CopyId;
64use crate::backend::FileId;
65use crate::backend::MillisSinceEpoch;
66use crate::backend::SymlinkId;
67use crate::backend::TreeId;
68use crate::backend::TreeValue;
69use crate::commit::Commit;
70use crate::config::ConfigGetError;
71use crate::conflict_labels::ConflictLabels;
72use crate::conflicts;
73use crate::conflicts::ConflictMarkerStyle;
74use crate::conflicts::ConflictMaterializeOptions;
75use crate::conflicts::MIN_CONFLICT_MARKER_LEN;
76use crate::conflicts::MaterializedTreeValue;
77use crate::conflicts::choose_materialized_conflict_marker_len;
78use crate::conflicts::materialize_merge_result_to_bytes;
79use crate::conflicts::materialize_tree_value;
80pub use crate::eol::EolConversionMode;
81use crate::eol::TargetEolStrategy;
82use crate::file_util::BlockingAsyncReader;
83use crate::file_util::FileIdentity;
84use crate::file_util::check_symlink_support;
85use crate::file_util::copy_async_to_sync;
86use crate::file_util::persist_temp_file;
87use crate::file_util::symlink_file;
88use crate::fsmonitor::FsmonitorSettings;
89#[cfg(feature = "watchman")]
90use crate::fsmonitor::WatchmanConfig;
91#[cfg(feature = "watchman")]
92use crate::fsmonitor::watchman;
93use crate::gitignore::GitIgnoreFile;
94use crate::lock::FileLock;
95use crate::matchers::DifferenceMatcher;
96use crate::matchers::EverythingMatcher;
97use crate::matchers::FilesMatcher;
98use crate::matchers::IntersectionMatcher;
99use crate::matchers::Matcher;
100use crate::matchers::PrefixMatcher;
101use crate::matchers::UnionMatcher;
102use crate::merge::Merge;
103use crate::merge::MergeBuilder;
104use crate::merge::MergedTreeValue;
105use crate::merge::SameChange;
106use crate::merged_tree::MergedTree;
107use crate::merged_tree::TreeDiffEntry;
108use crate::merged_tree_builder::MergedTreeBuilder;
109use crate::object_id::ObjectId as _;
110use crate::op_store::OperationId;
111use crate::ref_name::WorkspaceName;
112use crate::ref_name::WorkspaceNameBuf;
113use crate::repo_path::RepoPath;
114use crate::repo_path::RepoPathBuf;
115use crate::repo_path::RepoPathComponent;
116use crate::settings::UserSettings;
117use crate::store::Store;
118use crate::working_copy::CheckoutError;
119use crate::working_copy::CheckoutStats;
120use crate::working_copy::LockedWorkingCopy;
121use crate::working_copy::ResetError;
122use crate::working_copy::SnapshotError;
123use crate::working_copy::SnapshotOptions;
124use crate::working_copy::SnapshotProgress;
125use crate::working_copy::SnapshotStats;
126use crate::working_copy::UntrackedReason;
127use crate::working_copy::WorkingCopy;
128use crate::working_copy::WorkingCopyFactory;
129use crate::working_copy::WorkingCopyStateError;
130
131fn symlink_target_convert_to_store(path: &Path) -> Option<Cow<'_, str>> {
132    let path = path.to_str()?;
133    if std::path::MAIN_SEPARATOR == '/' {
134        Some(Cow::Borrowed(path))
135    } else {
136        // When storing the symlink target on Windows, convert "\" to "/", so that the
137        // symlink remains valid on Unix.
138        //
139        // Note that we don't use std::path to handle the conversion, because it
140        // performs poorly with Windows verbatim paths like \\?\Global\C:\file.txt.
141        Some(Cow::Owned(path.replace(std::path::MAIN_SEPARATOR_STR, "/")))
142    }
143}
144
145fn symlink_target_convert_to_disk(path: &str) -> PathBuf {
146    let path = if std::path::MAIN_SEPARATOR == '/' {
147        Cow::Borrowed(path)
148    } else {
149        // Use the main separator to reformat the input path to avoid creating a broken
150        // symlink with the incorrect separator "/".
151        //
152        // See https://github.com/jj-vcs/jj/issues/6934 for the relevant bug.
153        Cow::Owned(path.replace('/', std::path::MAIN_SEPARATOR_STR))
154    };
155    PathBuf::from(path.as_ref())
156}
157
158/// How to propagate executable bit changes in file metadata to/from the repo.
159///
160/// On Windows, executable bits are always ignored, but on Unix they are
161/// respected by default, but may be ignored by user settings or if we find
162/// that the filesystem of the working copy doesn't support executable bits.
163#[derive(Clone, Copy, Debug)]
164enum ExecChangePolicy {
165    Ignore,
166    #[cfg_attr(windows, expect(dead_code))]
167    Respect,
168}
169
170/// The executable bit change setting as exposed to the user.
171#[derive(Clone, Copy, Debug, Default, serde::Deserialize)]
172#[serde(rename_all = "kebab-case")]
173pub enum ExecChangeSetting {
174    Ignore,
175    Respect,
176    #[default]
177    Auto,
178}
179
180impl ExecChangePolicy {
181    /// Get the executable bit policy based on user settings and executable bit
182    /// support in the working copy's state path.
183    ///
184    /// On Unix we check whether executable bits are supported in the working
185    /// copy to determine respect/ignorance, but we default to respect.
186    #[cfg_attr(windows, expect(unused_variables))]
187    fn new(exec_change_setting: ExecChangeSetting, state_path: &Path) -> Self {
188        #[cfg(windows)]
189        return Self::Ignore;
190        #[cfg(unix)]
191        return match exec_change_setting {
192            ExecChangeSetting::Ignore => Self::Ignore,
193            ExecChangeSetting::Respect => Self::Respect,
194            ExecChangeSetting::Auto => {
195                match crate::file_util::check_executable_bit_support(state_path) {
196                    Ok(false) => Self::Ignore,
197                    Ok(true) => Self::Respect,
198                    Err(err) => {
199                        tracing::warn!(?err, "Error when checking for executable bit support");
200                        Self::Respect
201                    }
202                }
203            }
204        };
205    }
206}
207
208/// On-disk state of file executable as cached in the file states. This does
209/// *not* necessarily equal the `executable` field of [`TreeValue::File`]: the
210/// two are allowed to diverge if and only if we're ignoring executable bit
211/// changes.
212///
213/// This will only ever be true on Windows if the repo is also being accessed
214/// from a Unix version of jj, such as when accessed from WSL.
215#[derive(Clone, Copy, Debug, Eq, PartialEq)]
216pub struct ExecBit(bool);
217
218impl ExecBit {
219    /// Get the executable bit for a tree value to write to the repo store.
220    ///
221    /// If we're ignoring the executable bit, then we fallback to the previous
222    /// in-repo executable bit if present.
223    fn for_tree_value(
224        self,
225        exec_policy: ExecChangePolicy,
226        prev_in_repo: impl FnOnce() -> Option<bool>,
227    ) -> bool {
228        match exec_policy {
229            ExecChangePolicy::Ignore => prev_in_repo().unwrap_or(false),
230            ExecChangePolicy::Respect => self.0,
231        }
232    }
233
234    /// Set the on-disk executable bit to be written based on the in-repo bit or
235    /// the previous on-disk executable bit.
236    ///
237    /// On Windows, we return `false` because when we later write files, we
238    /// always create them anew, and the executable bit will be `false` even if
239    /// shared with a Unix machine.
240    ///
241    /// `prev_on_disk` is a closure because it is somewhat expensive and is only
242    /// used if ignoring the executable bit on Unix.
243    fn new_from_repo(
244        in_repo: bool,
245        exec_policy: ExecChangePolicy,
246        prev_on_disk: impl FnOnce() -> Option<Self>,
247    ) -> Self {
248        match exec_policy {
249            _ if cfg!(windows) => Self(false),
250            ExecChangePolicy::Ignore => prev_on_disk().unwrap_or(Self(false)),
251            ExecChangePolicy::Respect => Self(in_repo),
252        }
253    }
254
255    /// Load the on-disk executable bit from file metadata.
256    #[cfg_attr(windows, expect(unused_variables))]
257    fn new_from_disk(metadata: &Metadata) -> Self {
258        #[cfg(unix)]
259        return Self(metadata.permissions().mode() & 0o111 != 0);
260        #[cfg(windows)]
261        return Self(false);
262    }
263}
264
265/// Set the executable bit of a file on-disk. This is a no-op on Windows.
266///
267/// On Unix, we manually set the executable bit to the previous value on-disk.
268/// This is necessary because we write all files by creating them new, so files
269/// won't preserve their permissions naturally.
270#[cfg_attr(windows, expect(unused_variables))]
271fn set_executable(exec_bit: ExecBit, disk_path: &Path) -> Result<(), io::Error> {
272    #[cfg(unix)]
273    {
274        let mode = if exec_bit.0 { 0o755 } else { 0o644 };
275        fs::set_permissions(disk_path, fs::Permissions::from_mode(mode))?;
276    }
277    Ok(())
278}
279
280#[derive(Debug, PartialEq, Eq, Clone)]
281pub enum FileType {
282    Normal { exec_bit: ExecBit },
283    Symlink,
284    GitSubmodule,
285}
286
287#[derive(Debug, PartialEq, Eq, Clone, Copy)]
288pub struct MaterializedConflictData {
289    pub conflict_marker_len: u32,
290}
291
292#[derive(Debug, PartialEq, Eq, Clone)]
293pub struct FileState {
294    pub file_type: FileType,
295    pub mtime: MillisSinceEpoch,
296    pub size: u64,
297    pub materialized_conflict_data: Option<MaterializedConflictData>,
298    /* TODO: What else do we need here? Git stores a lot of fields.
299     * TODO: Could possibly handle case-insensitive file systems keeping an
300     *       Option<PathBuf> with the actual path here. */
301}
302
303impl FileState {
304    /// Check whether a file state appears clean compared to a previous file
305    /// state, ignoring materialized conflict data.
306    pub fn is_clean(&self, old_file_state: &Self) -> bool {
307        self.file_type == old_file_state.file_type
308            && self.mtime == old_file_state.mtime
309            && self.size == old_file_state.size
310    }
311
312    /// Indicates that a file exists in the tree but that it needs to be
313    /// re-stat'ed on the next snapshot.
314    fn placeholder() -> Self {
315        Self {
316            file_type: FileType::Normal {
317                exec_bit: ExecBit(false),
318            },
319            mtime: MillisSinceEpoch(0),
320            size: 0,
321            materialized_conflict_data: None,
322        }
323    }
324
325    fn for_file(
326        exec_bit: ExecBit,
327        size: u64,
328        metadata: &Metadata,
329    ) -> Result<Self, MtimeOutOfRange> {
330        Ok(Self {
331            file_type: FileType::Normal { exec_bit },
332            mtime: mtime_from_metadata(metadata)?,
333            size,
334            materialized_conflict_data: None,
335        })
336    }
337
338    fn for_symlink(metadata: &Metadata) -> Result<Self, MtimeOutOfRange> {
339        // When using fscrypt, the reported size is not the content size. So if
340        // we were to record the content size here (like we do for regular files), we
341        // would end up thinking the file has changed every time we snapshot.
342        Ok(Self {
343            file_type: FileType::Symlink,
344            mtime: mtime_from_metadata(metadata)?,
345            size: metadata.len(),
346            materialized_conflict_data: None,
347        })
348    }
349
350    fn for_gitsubmodule() -> Self {
351        Self {
352            file_type: FileType::GitSubmodule,
353            mtime: MillisSinceEpoch(0),
354            size: 0,
355            materialized_conflict_data: None,
356        }
357    }
358}
359
360/// Owned map of path to file states, backed by proto data.
361#[derive(Clone, Debug)]
362struct FileStatesMap {
363    data: Vec<crate::protos::local_working_copy::FileStateEntry>,
364}
365
366impl FileStatesMap {
367    fn new() -> Self {
368        Self { data: Vec::new() }
369    }
370
371    fn from_proto(
372        mut data: Vec<crate::protos::local_working_copy::FileStateEntry>,
373        is_sorted: bool,
374    ) -> Self {
375        if !is_sorted {
376            data.sort_unstable_by(|entry1, entry2| {
377                let path1 = RepoPath::from_internal_string(&entry1.path).unwrap();
378                let path2 = RepoPath::from_internal_string(&entry2.path).unwrap();
379                path1.cmp(path2)
380            });
381        }
382        debug_assert!(is_file_state_entries_proto_unique_and_sorted(&data));
383        Self { data }
384    }
385
386    /// Merges changed and deleted entries into this map. The changed entries
387    /// must be sorted by path.
388    fn merge_in(
389        &mut self,
390        changed_file_states: Vec<(RepoPathBuf, FileState)>,
391        deleted_files: &HashSet<RepoPathBuf>,
392    ) {
393        if changed_file_states.is_empty() && deleted_files.is_empty() {
394            return;
395        }
396        debug_assert!(
397            changed_file_states.is_sorted_by(|(path1, _), (path2, _)| path1 < path2),
398            "changed_file_states must be sorted and have no duplicates"
399        );
400        self.data = itertools::merge_join_by(
401            mem::take(&mut self.data),
402            changed_file_states,
403            |old_entry, (changed_path, _)| {
404                RepoPath::from_internal_string(&old_entry.path)
405                    .unwrap()
406                    .cmp(changed_path)
407            },
408        )
409        .filter_map(|diff| match diff {
410            EitherOrBoth::Both(_, (path, state)) | EitherOrBoth::Right((path, state)) => {
411                debug_assert!(!deleted_files.contains(&path));
412                Some(file_state_entry_to_proto(path, &state))
413            }
414            EitherOrBoth::Left(entry) => {
415                let present =
416                    !deleted_files.contains(RepoPath::from_internal_string(&entry.path).unwrap());
417                present.then_some(entry)
418            }
419        })
420        .collect();
421    }
422
423    fn clear(&mut self) {
424        self.data.clear();
425    }
426
427    /// Returns read-only map containing all file states.
428    fn all(&self) -> FileStates<'_> {
429        FileStates::from_sorted(&self.data)
430    }
431}
432
433/// Read-only map of path to file states, possibly filtered by path prefix.
434#[derive(Clone, Copy, Debug)]
435pub struct FileStates<'a> {
436    data: &'a [crate::protos::local_working_copy::FileStateEntry],
437}
438
439impl<'a> FileStates<'a> {
440    fn from_sorted(data: &'a [crate::protos::local_working_copy::FileStateEntry]) -> Self {
441        debug_assert!(is_file_state_entries_proto_unique_and_sorted(data));
442        Self { data }
443    }
444
445    /// Returns file states under the given directory path.
446    pub fn prefixed(&self, base: &RepoPath) -> Self {
447        let range = self.prefixed_range(base);
448        Self::from_sorted(&self.data[range])
449    }
450
451    /// Faster version of `prefixed("<dir>/<base>")`. Requires that all entries
452    /// share the same prefix `dir`.
453    fn prefixed_at(&self, dir: &RepoPath, base: &RepoPathComponent) -> Self {
454        let range = self.prefixed_range_at(dir, base);
455        Self::from_sorted(&self.data[range])
456    }
457
458    /// Returns true if this contains no entries.
459    pub fn is_empty(&self) -> bool {
460        self.data.is_empty()
461    }
462
463    /// Returns true if the given `path` exists.
464    pub fn contains_path(&self, path: &RepoPath) -> bool {
465        self.exact_position(path).is_some()
466    }
467
468    /// Returns file state for the given `path`.
469    pub fn get(&self, path: &RepoPath) -> Option<FileState> {
470        let pos = self.exact_position(path)?;
471        let (_, state) = file_state_entry_from_proto(&self.data[pos]);
472        Some(state)
473    }
474
475    /// Returns the executable bit state if `path` is a normal file.
476    pub fn get_exec_bit(&self, path: &RepoPath) -> Option<ExecBit> {
477        match self.get(path)?.file_type {
478            FileType::Normal { exec_bit } => Some(exec_bit),
479            FileType::Symlink | FileType::GitSubmodule => None,
480        }
481    }
482
483    /// Faster version of `get("<dir>/<name>")`. Requires that all entries share
484    /// the same prefix `dir`.
485    fn get_at(&self, dir: &RepoPath, name: &RepoPathComponent) -> Option<FileState> {
486        let pos = self.exact_position_at(dir, name)?;
487        let (_, state) = file_state_entry_from_proto(&self.data[pos]);
488        Some(state)
489    }
490
491    fn exact_position(&self, path: &RepoPath) -> Option<usize> {
492        self.data
493            .binary_search_by(|entry| {
494                RepoPath::from_internal_string(&entry.path)
495                    .unwrap()
496                    .cmp(path)
497            })
498            .ok()
499    }
500
501    fn exact_position_at(&self, dir: &RepoPath, name: &RepoPathComponent) -> Option<usize> {
502        debug_assert!(self.paths().all(|path| path.starts_with(dir)));
503        let slash_len = usize::from(!dir.is_root());
504        let prefix_len = dir.as_internal_file_string().len() + slash_len;
505        self.data
506            .binary_search_by(|entry| {
507                let tail = entry.path.get(prefix_len..).unwrap_or("");
508                match tail.split_once('/') {
509                    // "<name>/*" > "<name>"
510                    Some((pre, _)) => pre.cmp(name.as_internal_str()).then(Ordering::Greater),
511                    None => tail.cmp(name.as_internal_str()),
512                }
513            })
514            .ok()
515    }
516
517    fn prefixed_range(&self, base: &RepoPath) -> Range<usize> {
518        let start = self
519            .data
520            .partition_point(|entry| RepoPath::from_internal_string(&entry.path).unwrap() < base);
521        let len = self.data[start..].partition_point(|entry| {
522            RepoPath::from_internal_string(&entry.path)
523                .unwrap()
524                .starts_with(base)
525        });
526        start..(start + len)
527    }
528
529    fn prefixed_range_at(&self, dir: &RepoPath, base: &RepoPathComponent) -> Range<usize> {
530        debug_assert!(self.paths().all(|path| path.starts_with(dir)));
531        let slash_len = usize::from(!dir.is_root());
532        let prefix_len = dir.as_internal_file_string().len() + slash_len;
533        let start = self.data.partition_point(|entry| {
534            let tail = entry.path.get(prefix_len..).unwrap_or("");
535            let entry_name = tail.split_once('/').map_or(tail, |(name, _)| name);
536            entry_name < base.as_internal_str()
537        });
538        let len = self.data[start..].partition_point(|entry| {
539            let tail = entry.path.get(prefix_len..).unwrap_or("");
540            let entry_name = tail.split_once('/').map_or(tail, |(name, _)| name);
541            entry_name == base.as_internal_str()
542        });
543        start..(start + len)
544    }
545
546    /// Iterates file state entries sorted by path.
547    pub fn iter(&self) -> FileStatesIter<'a> {
548        self.data.iter().map(file_state_entry_from_proto)
549    }
550
551    /// Iterates sorted file paths.
552    pub fn paths(&self) -> impl ExactSizeIterator<Item = &'a RepoPath> + use<'a> {
553        self.data
554            .iter()
555            .map(|entry| RepoPath::from_internal_string(&entry.path).unwrap())
556    }
557}
558
559type FileStatesIter<'a> = iter::Map<
560    slice::Iter<'a, crate::protos::local_working_copy::FileStateEntry>,
561    fn(&crate::protos::local_working_copy::FileStateEntry) -> (&RepoPath, FileState),
562>;
563
564impl<'a> IntoIterator for FileStates<'a> {
565    type Item = (&'a RepoPath, FileState);
566    type IntoIter = FileStatesIter<'a>;
567
568    fn into_iter(self) -> Self::IntoIter {
569        self.iter()
570    }
571}
572
573fn file_state_from_proto(proto: &crate::protos::local_working_copy::FileState) -> FileState {
574    let file_type = match proto.file_type() {
575        crate::protos::local_working_copy::FileType::Normal => FileType::Normal {
576            exec_bit: ExecBit(false),
577        },
578        // On Windows, `FileType::Executable` can exist if the repo is being
579        // shared with a Unix version of jj, such as when accessed from WSL.
580        crate::protos::local_working_copy::FileType::Executable => FileType::Normal {
581            exec_bit: ExecBit(true),
582        },
583        crate::protos::local_working_copy::FileType::Symlink => FileType::Symlink,
584        #[expect(deprecated)]
585        crate::protos::local_working_copy::FileType::Conflict => FileType::Normal {
586            exec_bit: ExecBit(false),
587        },
588        crate::protos::local_working_copy::FileType::GitSubmodule => FileType::GitSubmodule,
589    };
590    FileState {
591        file_type,
592        mtime: MillisSinceEpoch(proto.mtime_millis_since_epoch),
593        size: proto.size,
594        materialized_conflict_data: proto.materialized_conflict_data.as_ref().map(|data| {
595            MaterializedConflictData {
596                conflict_marker_len: data.conflict_marker_len,
597            }
598        }),
599    }
600}
601
602fn file_state_to_proto(file_state: &FileState) -> crate::protos::local_working_copy::FileState {
603    let mut proto = crate::protos::local_working_copy::FileState::default();
604    let file_type = match &file_state.file_type {
605        FileType::Normal { exec_bit } => {
606            if exec_bit.0 {
607                crate::protos::local_working_copy::FileType::Executable
608            } else {
609                crate::protos::local_working_copy::FileType::Normal
610            }
611        }
612        FileType::Symlink => crate::protos::local_working_copy::FileType::Symlink,
613        FileType::GitSubmodule => crate::protos::local_working_copy::FileType::GitSubmodule,
614    };
615    proto.file_type = file_type as i32;
616    proto.mtime_millis_since_epoch = file_state.mtime.0;
617    proto.size = file_state.size;
618    proto.materialized_conflict_data = file_state.materialized_conflict_data.map(|data| {
619        crate::protos::local_working_copy::MaterializedConflictData {
620            conflict_marker_len: data.conflict_marker_len,
621        }
622    });
623    proto
624}
625
626fn file_state_entry_from_proto(
627    proto: &crate::protos::local_working_copy::FileStateEntry,
628) -> (&RepoPath, FileState) {
629    let path = RepoPath::from_internal_string(&proto.path).unwrap();
630    (path, file_state_from_proto(proto.state.as_ref().unwrap()))
631}
632
633fn file_state_entry_to_proto(
634    path: RepoPathBuf,
635    state: &FileState,
636) -> crate::protos::local_working_copy::FileStateEntry {
637    crate::protos::local_working_copy::FileStateEntry {
638        path: path.into_internal_string(),
639        state: Some(file_state_to_proto(state)),
640    }
641}
642
643fn is_file_state_entries_proto_unique_and_sorted(
644    data: &[crate::protos::local_working_copy::FileStateEntry],
645) -> bool {
646    data.iter()
647        .map(|entry| RepoPath::from_internal_string(&entry.path).unwrap())
648        .is_sorted_by(|path1, path2| path1 < path2)
649}
650
651fn sparse_patterns_from_proto(
652    proto: Option<&crate::protos::local_working_copy::SparsePatterns>,
653) -> Vec<RepoPathBuf> {
654    let mut sparse_patterns = vec![];
655    if let Some(proto_sparse_patterns) = proto {
656        for prefix in &proto_sparse_patterns.prefixes {
657            sparse_patterns.push(RepoPathBuf::from_internal_string(prefix).unwrap());
658        }
659    } else {
660        // For compatibility with old working copies.
661        // TODO: Delete this is late 2022 or so.
662        sparse_patterns.push(RepoPathBuf::root());
663    }
664    sparse_patterns
665}
666
667/// Creates intermediate directories from the `working_copy_path` to the
668/// `repo_path` parent. Returns disk path for the `repo_path` file.
669///
670/// If an intermediate directory exists and if it is a file or symlink, this
671/// function returns `Ok(None)` to signal that the path should be skipped.
672/// The `working_copy_path` directory may be a symlink.
673///
674/// If an existing or newly-created sub directory points to ".git" or ".jj",
675/// this function returns an error.
676///
677/// Note that this does not prevent TOCTOU bugs caused by concurrent checkouts.
678/// Another process may remove the directory created by this function and put a
679/// symlink there.
680fn create_parent_dirs(
681    working_copy_path: &Path,
682    repo_path: &RepoPath,
683) -> Result<Option<PathBuf>, CheckoutError> {
684    let (parent_path, basename) = repo_path.split().expect("repo path shouldn't be root");
685    let mut dir_path = working_copy_path.to_owned();
686    for c in parent_path.components() {
687        // Ensure that the name is a normal entry of the current dir_path.
688        dir_path.push(c.to_fs_name().map_err(|err| err.with_path(repo_path))?);
689        // A directory named ".git" or ".jj" can be temporarily created. It
690        // might trick workspace path discovery, but is harmless so long as the
691        // directory is empty.
692        let (new_dir_created, is_dir) = match fs::create_dir(&dir_path) {
693            Ok(()) => (true, true), // New directory
694            Err(err) => match dir_path.symlink_metadata() {
695                Ok(m) => (false, m.is_dir()), // Existing file or directory
696                Err(_) => {
697                    return Err(CheckoutError::Other {
698                        message: format!(
699                            "Failed to create parent directories for {}",
700                            repo_path.to_fs_path_unchecked(working_copy_path).display(),
701                        ),
702                        err: err.into(),
703                    });
704                }
705            },
706        };
707        // Invalid component (e.g. "..") should have been rejected.
708        // The current dir_path should be an entry of dir_path.parent().
709        reject_reserved_existing_path(&dir_path).inspect_err(|_| {
710            if new_dir_created {
711                fs::remove_dir(&dir_path).ok();
712            }
713        })?;
714        if !is_dir {
715            return Ok(None); // Skip existing file or symlink
716        }
717    }
718
719    let mut file_path = dir_path;
720    file_path.push(
721        basename
722            .to_fs_name()
723            .map_err(|err| err.with_path(repo_path))?,
724    );
725    Ok(Some(file_path))
726}
727
728/// Removes existing file named `disk_path` if any. Returns `Ok(true)` if the
729/// file was there and got removed, meaning that new file can be safely created.
730///
731/// If the existing file points to ".git" or ".jj", this function returns an
732/// error.
733fn remove_old_file(disk_path: &Path) -> Result<bool, CheckoutError> {
734    reject_reserved_existing_path(disk_path)?;
735    match fs::remove_file(disk_path) {
736        Ok(()) => Ok(true),
737        Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(false),
738        // TODO: Use io::ErrorKind::IsADirectory if it gets stabilized
739        Err(_) if disk_path.symlink_metadata().is_ok_and(|m| m.is_dir()) => Ok(false),
740        Err(err) => Err(CheckoutError::Other {
741            message: format!("Failed to remove file {}", disk_path.display()),
742            err: err.into(),
743        }),
744    }
745}
746
747/// Checks if new file or symlink named `disk_path` can be created.
748///
749/// If the file already exists, this function return `Ok(false)` to signal
750/// that the path should be skipped.
751///
752/// If the path may point to ".git" or ".jj" entry, this function returns an
753/// error.
754///
755/// This function can fail if `disk_path.parent()` isn't a directory.
756fn can_create_new_file(disk_path: &Path) -> Result<bool, CheckoutError> {
757    // New file or symlink will be created by caller. If it were pointed to by
758    // name ".git" or ".jj", git/jj CLI could be tricked to load configuration
759    // from an attacker-controlled location. So we first test the path by
760    // creating an empty file.
761    let new_file = match OpenOptions::new()
762        .write(true)
763        .create_new(true) // Don't overwrite, don't follow symlink
764        .open(disk_path)
765    {
766        Ok(file) => Some(file),
767        Err(err) if err.kind() == io::ErrorKind::AlreadyExists => None,
768        // Workaround for "Access is denied. (os error 5)" error on Windows.
769        Err(_) => match disk_path.symlink_metadata() {
770            Ok(_) => None,
771            Err(err) => {
772                return Err(CheckoutError::Other {
773                    message: format!("Failed to stat {}", disk_path.display()),
774                    err: err.into(),
775                });
776            }
777        },
778    };
779
780    let new_file_created = new_file.is_some();
781
782    if let Some(new_file) = new_file {
783        reject_reserved_existing_file(new_file, disk_path).inspect_err(|_| {
784            // We keep the error from `reject_reserved_existing_file`
785            fs::remove_file(disk_path).ok();
786        })?;
787
788        fs::remove_file(disk_path).map_err(|err| CheckoutError::Other {
789            message: format!("Failed to remove temporary file {}", disk_path.display()),
790            err: err.into(),
791        })?;
792    } else {
793        reject_reserved_existing_path(disk_path)?;
794    }
795    Ok(new_file_created)
796}
797
798const RESERVED_DIR_NAMES: &[&str] = &[".git", ".jj"];
799
800fn file_identity_from_symlink_path(disk_path: &Path) -> io::Result<Option<FileIdentity>> {
801    match FileIdentity::from_symlink_path(disk_path) {
802        Ok(identity) => Ok(Some(identity)),
803        Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(None),
804        Err(err) => Err(err),
805    }
806}
807
808/// Wrapper for [`reject_reserved_existing_file_identity`] which avoids a
809/// syscall by converting the provided `file` to a `FileIdentity` via its
810/// file descriptor.
811///
812/// See [`reject_reserved_existing_file_identity`] for more info.
813fn reject_reserved_existing_file(file: File, disk_path: &Path) -> Result<(), CheckoutError> {
814    // Note: since the file is open, we don't expect that it's possible for
815    // `io::ErrorKind::NotFound` to be a possible error returned here.
816    let file_identity = FileIdentity::from_file(file).map_err(|err| CheckoutError::Other {
817        message: format!("Failed to validate path {}", disk_path.display()),
818        err: err.into(),
819    })?;
820
821    reject_reserved_existing_file_identity(file_identity, disk_path)
822}
823
824/// Wrapper for [`reject_reserved_existing_file_identity`] which converts
825/// the provided `disk_path` to a `FileIdentity`.
826///
827/// See [`reject_reserved_existing_file_identity`] for more info.
828///
829/// # Remarks
830///
831/// On Windows, this incurs an additional syscall cost to open and close the
832/// file `HANDLE` for `disk_path`. On Unix, `lstat()` is used.
833fn reject_reserved_existing_path(disk_path: &Path) -> Result<(), CheckoutError> {
834    let Some(disk_identity) =
835        file_identity_from_symlink_path(disk_path).map_err(|err| CheckoutError::Other {
836            message: format!("Failed to validate path {}", disk_path.display()),
837            err: err.into(),
838        })?
839    else {
840        // If the existing disk_path pointed to the reserved path, we would have
841        // gotten an identity back. Since we got nothing, the file does not exist
842        // and cannot be a reserved path name.
843        return Ok(());
844    };
845
846    reject_reserved_existing_file_identity(disk_identity, disk_path)
847}
848
849/// Suppose the `disk_path` exists, checks if the last component points to
850/// ".git" or ".jj" in the same parent directory.
851///
852/// `disk_identity` is expected to be an identity of the file described by
853/// `disk_path`.
854///
855/// # Remarks
856///
857/// On Windows, this incurs a syscall cost to open and close a file `HANDLE` for
858/// each filename in `RESERVED_DIR_NAMES`. On Unix, `lstat()` is used.
859fn reject_reserved_existing_file_identity(
860    disk_identity: FileIdentity,
861    disk_path: &Path,
862) -> Result<(), CheckoutError> {
863    let parent_dir_path = disk_path.parent().expect("content path shouldn't be root");
864    for name in RESERVED_DIR_NAMES {
865        let reserved_path = parent_dir_path.join(name);
866
867        let Some(reserved_identity) =
868            file_identity_from_symlink_path(&reserved_path).map_err(|err| {
869                CheckoutError::Other {
870                    message: format!("Failed to validate path {}", disk_path.display()),
871                    err: err.into(),
872                }
873            })?
874        else {
875            // If the existing disk_path pointed to the reserved path, we would have
876            // gotten an identity back. Since we got nothing, the file does not exist
877            // and cannot be a reserved path name.
878            continue;
879        };
880
881        if disk_identity == reserved_identity {
882            return Err(CheckoutError::ReservedPathComponent {
883                path: disk_path.to_owned(),
884                name,
885            });
886        }
887    }
888
889    Ok(())
890}
891
892#[derive(Debug, Error)]
893#[error("Out-of-range file modification time")]
894struct MtimeOutOfRange;
895
896fn mtime_from_metadata(metadata: &Metadata) -> Result<MillisSinceEpoch, MtimeOutOfRange> {
897    let time = metadata
898        .modified()
899        .expect("File mtime not supported on this platform?");
900    system_time_to_millis(time).ok_or(MtimeOutOfRange)
901}
902
903fn system_time_to_millis(time: SystemTime) -> Option<MillisSinceEpoch> {
904    let millis = match time.duration_since(SystemTime::UNIX_EPOCH) {
905        Ok(duration) => i64::try_from(duration.as_millis()).ok()?,
906        Err(err) => -i64::try_from(err.duration().as_millis()).ok()?,
907    };
908    Some(MillisSinceEpoch(millis))
909}
910
911/// Create a new [`FileState`] from metadata.
912fn file_state(metadata: &Metadata) -> Result<Option<FileState>, MtimeOutOfRange> {
913    let metadata_file_type = metadata.file_type();
914    let file_type = if metadata_file_type.is_dir() {
915        None
916    } else if metadata_file_type.is_symlink() {
917        Some(FileType::Symlink)
918    } else if metadata_file_type.is_file() {
919        let exec_bit = ExecBit::new_from_disk(metadata);
920        Some(FileType::Normal { exec_bit })
921    } else {
922        None
923    };
924    if let Some(file_type) = file_type {
925        Ok(Some(FileState {
926            file_type,
927            mtime: mtime_from_metadata(metadata)?,
928            size: metadata.len(),
929            materialized_conflict_data: None,
930        }))
931    } else {
932        Ok(None)
933    }
934}
935
936struct FsmonitorMatcher {
937    matcher: Option<Box<dyn Matcher>>,
938    watchman_clock: Option<crate::protos::local_working_copy::WatchmanClock>,
939}
940
941/// Settings specific to the tree state of the [`LocalWorkingCopy`] backend.
942#[derive(Clone, Debug)]
943pub struct TreeStateSettings {
944    /// Conflict marker style to use when materializing files or when checking
945    /// changed files.
946    pub conflict_marker_style: ConflictMarkerStyle,
947    /// Configuring auto-converting CRLF line endings into LF when you add a
948    /// file to the backend, and vice versa when it checks out code onto your
949    /// filesystem.
950    pub eol_conversion_mode: EolConversionMode,
951    /// Whether to ignore changes to the executable bit for files on Unix.
952    pub exec_change_setting: ExecChangeSetting,
953    /// The fsmonitor (e.g. Watchman) to use, if any.
954    pub fsmonitor_settings: FsmonitorSettings,
955}
956
957impl TreeStateSettings {
958    /// Create [`TreeStateSettings`] from [`UserSettings`].
959    pub fn try_from_user_settings(user_settings: &UserSettings) -> Result<Self, ConfigGetError> {
960        Ok(Self {
961            conflict_marker_style: user_settings.get("ui.conflict-marker-style")?,
962            eol_conversion_mode: EolConversionMode::try_from_settings(user_settings)?,
963            exec_change_setting: user_settings.get("working-copy.exec-bit-change")?,
964            fsmonitor_settings: FsmonitorSettings::from_settings(user_settings)?,
965        })
966    }
967}
968
969pub struct TreeState {
970    store: Arc<Store>,
971    working_copy_path: PathBuf,
972    state_path: PathBuf,
973    tree: MergedTree,
974    file_states: FileStatesMap,
975    // Currently only path prefixes
976    sparse_patterns: Vec<RepoPathBuf>,
977    own_mtime: MillisSinceEpoch,
978    symlink_support: bool,
979
980    /// The most recent clock value returned by Watchman. Will only be set if
981    /// the repo is configured to use the Watchman filesystem monitor and
982    /// Watchman has been queried at least once.
983    watchman_clock: Option<crate::protos::local_working_copy::WatchmanClock>,
984
985    conflict_marker_style: ConflictMarkerStyle,
986    exec_policy: ExecChangePolicy,
987    fsmonitor_settings: FsmonitorSettings,
988    target_eol_strategy: TargetEolStrategy,
989}
990
991#[derive(Debug, Error)]
992pub enum TreeStateError {
993    #[error("Reading tree state from {path}")]
994    ReadTreeState { path: PathBuf, source: io::Error },
995    #[error("Decoding tree state from {path}")]
996    DecodeTreeState {
997        path: PathBuf,
998        source: prost::DecodeError,
999    },
1000    #[error("Writing tree state to temporary file {path}")]
1001    WriteTreeState { path: PathBuf, source: io::Error },
1002    #[error("Persisting tree state to file {path}")]
1003    PersistTreeState { path: PathBuf, source: io::Error },
1004    #[error("Filesystem monitor error")]
1005    Fsmonitor(#[source] Box<dyn Error + Send + Sync>),
1006}
1007
1008impl TreeState {
1009    pub fn working_copy_path(&self) -> &Path {
1010        &self.working_copy_path
1011    }
1012
1013    pub fn current_tree(&self) -> &MergedTree {
1014        &self.tree
1015    }
1016
1017    pub fn file_states(&self) -> FileStates<'_> {
1018        self.file_states.all()
1019    }
1020
1021    pub fn sparse_patterns(&self) -> &Vec<RepoPathBuf> {
1022        &self.sparse_patterns
1023    }
1024
1025    fn sparse_matcher(&self) -> Box<dyn Matcher> {
1026        Box::new(PrefixMatcher::new(&self.sparse_patterns))
1027    }
1028
1029    pub fn init(
1030        store: Arc<Store>,
1031        working_copy_path: PathBuf,
1032        state_path: PathBuf,
1033        tree_state_settings: &TreeStateSettings,
1034    ) -> Result<Self, TreeStateError> {
1035        let mut wc = Self::empty(store, working_copy_path, state_path, tree_state_settings);
1036        wc.save()?;
1037        Ok(wc)
1038    }
1039
1040    fn empty(
1041        store: Arc<Store>,
1042        working_copy_path: PathBuf,
1043        state_path: PathBuf,
1044        &TreeStateSettings {
1045            conflict_marker_style,
1046            eol_conversion_mode,
1047            exec_change_setting,
1048            ref fsmonitor_settings,
1049        }: &TreeStateSettings,
1050    ) -> Self {
1051        let exec_policy = ExecChangePolicy::new(exec_change_setting, &state_path);
1052        Self {
1053            store: store.clone(),
1054            working_copy_path,
1055            state_path,
1056            tree: store.empty_merged_tree(),
1057            file_states: FileStatesMap::new(),
1058            sparse_patterns: vec![RepoPathBuf::root()],
1059            own_mtime: MillisSinceEpoch(0),
1060            symlink_support: check_symlink_support().unwrap_or(false),
1061            watchman_clock: None,
1062            conflict_marker_style,
1063            exec_policy,
1064            fsmonitor_settings: fsmonitor_settings.clone(),
1065            target_eol_strategy: TargetEolStrategy::new(eol_conversion_mode),
1066        }
1067    }
1068
1069    pub fn load(
1070        store: Arc<Store>,
1071        working_copy_path: PathBuf,
1072        state_path: PathBuf,
1073        tree_state_settings: &TreeStateSettings,
1074    ) -> Result<Self, TreeStateError> {
1075        let tree_state_path = state_path.join("tree_state");
1076        let file = match File::open(&tree_state_path) {
1077            Err(ref err) if err.kind() == io::ErrorKind::NotFound => {
1078                return Self::init(store, working_copy_path, state_path, tree_state_settings);
1079            }
1080            Err(err) => {
1081                return Err(TreeStateError::ReadTreeState {
1082                    path: tree_state_path,
1083                    source: err,
1084                });
1085            }
1086            Ok(file) => file,
1087        };
1088
1089        let mut wc = Self::empty(store, working_copy_path, state_path, tree_state_settings);
1090        wc.read(&tree_state_path, file)?;
1091        Ok(wc)
1092    }
1093
1094    fn update_own_mtime(&mut self) {
1095        if let Ok(metadata) = self.state_path.join("tree_state").symlink_metadata()
1096            && let Ok(mtime) = mtime_from_metadata(&metadata)
1097        {
1098            self.own_mtime = mtime;
1099        } else {
1100            self.own_mtime = MillisSinceEpoch(0);
1101        }
1102    }
1103
1104    fn read(&mut self, tree_state_path: &Path, mut file: File) -> Result<(), TreeStateError> {
1105        self.update_own_mtime();
1106        let mut buf = Vec::new();
1107        file.read_to_end(&mut buf)
1108            .map_err(|err| TreeStateError::ReadTreeState {
1109                path: tree_state_path.to_owned(),
1110                source: err,
1111            })?;
1112        let proto = crate::protos::local_working_copy::TreeState::decode(&*buf).map_err(|err| {
1113            TreeStateError::DecodeTreeState {
1114                path: tree_state_path.to_owned(),
1115                source: err,
1116            }
1117        })?;
1118        #[expect(deprecated)]
1119        if proto.tree_ids.is_empty() {
1120            self.tree = MergedTree::resolved(
1121                self.store.clone(),
1122                TreeId::new(proto.legacy_tree_id.clone()),
1123            );
1124        } else {
1125            let tree_ids_builder: MergeBuilder<TreeId> = proto
1126                .tree_ids
1127                .iter()
1128                .map(|id| TreeId::new(id.clone()))
1129                .collect();
1130            self.tree = MergedTree::new(
1131                self.store.clone(),
1132                tree_ids_builder.build(),
1133                ConflictLabels::from_vec(proto.conflict_labels),
1134            );
1135        }
1136        self.file_states =
1137            FileStatesMap::from_proto(proto.file_states, proto.is_file_states_sorted);
1138        self.sparse_patterns = sparse_patterns_from_proto(proto.sparse_patterns.as_ref());
1139        self.watchman_clock = proto.watchman_clock;
1140        Ok(())
1141    }
1142
1143    #[expect(clippy::assigning_clones, clippy::field_reassign_with_default)]
1144    pub fn save(&mut self) -> Result<(), TreeStateError> {
1145        let mut proto: crate::protos::local_working_copy::TreeState = Default::default();
1146        proto.tree_ids = self
1147            .tree
1148            .tree_ids()
1149            .iter()
1150            .map(|id| id.to_bytes())
1151            .collect();
1152        proto.conflict_labels = self.tree.labels().as_slice().to_owned();
1153        proto.file_states = self.file_states.data.clone();
1154        // `FileStatesMap` is guaranteed to be sorted.
1155        proto.is_file_states_sorted = true;
1156        let mut sparse_patterns = crate::protos::local_working_copy::SparsePatterns::default();
1157        for path in &self.sparse_patterns {
1158            sparse_patterns
1159                .prefixes
1160                .push(path.as_internal_file_string().to_owned());
1161        }
1162        proto.sparse_patterns = Some(sparse_patterns);
1163        proto.watchman_clock = self.watchman_clock.clone();
1164
1165        let wrap_write_err = |source| TreeStateError::WriteTreeState {
1166            path: self.state_path.clone(),
1167            source,
1168        };
1169        let mut temp_file = NamedTempFile::new_in(&self.state_path).map_err(wrap_write_err)?;
1170        temp_file
1171            .as_file_mut()
1172            .write_all(&proto.encode_to_vec())
1173            .map_err(wrap_write_err)?;
1174        // update own write time while we before we rename it, so we know
1175        // there is no unknown data in it
1176        self.update_own_mtime();
1177        // TODO: Retry if persisting fails (it will on Windows if the file happened to
1178        // be open for read).
1179        let target_path = self.state_path.join("tree_state");
1180        persist_temp_file(temp_file, &target_path).map_err(|source| {
1181            TreeStateError::PersistTreeState {
1182                path: target_path.clone(),
1183                source,
1184            }
1185        })?;
1186        Ok(())
1187    }
1188
1189    fn reset_watchman(&mut self) {
1190        self.watchman_clock.take();
1191    }
1192
1193    #[cfg(feature = "watchman")]
1194    #[instrument(skip(self))]
1195    pub async fn query_watchman(
1196        &self,
1197        config: &WatchmanConfig,
1198    ) -> Result<(watchman::Clock, Option<Vec<PathBuf>>), TreeStateError> {
1199        let previous_clock = self.watchman_clock.clone().map(watchman::Clock::from);
1200
1201        let tokio_fn = async || {
1202            let fsmonitor = watchman::Fsmonitor::init(&self.working_copy_path, config)
1203                .await
1204                .map_err(|err| TreeStateError::Fsmonitor(Box::new(err)))?;
1205            fsmonitor
1206                .query_changed_files(previous_clock)
1207                .await
1208                .map_err(|err| TreeStateError::Fsmonitor(Box::new(err)))
1209        };
1210
1211        match tokio::runtime::Handle::try_current() {
1212            Ok(_handle) => tokio_fn().await,
1213            Err(_) => {
1214                let runtime = tokio::runtime::Builder::new_current_thread()
1215                    .enable_all()
1216                    .build()
1217                    .map_err(|err| TreeStateError::Fsmonitor(Box::new(err)))?;
1218                runtime.block_on(tokio_fn())
1219            }
1220        }
1221    }
1222
1223    #[cfg(feature = "watchman")]
1224    #[instrument(skip(self))]
1225    pub async fn is_watchman_trigger_registered(
1226        &self,
1227        config: &WatchmanConfig,
1228    ) -> Result<bool, TreeStateError> {
1229        let tokio_fn = async || {
1230            let fsmonitor = watchman::Fsmonitor::init(&self.working_copy_path, config)
1231                .await
1232                .map_err(|err| TreeStateError::Fsmonitor(Box::new(err)))?;
1233            fsmonitor
1234                .is_trigger_registered()
1235                .await
1236                .map_err(|err| TreeStateError::Fsmonitor(Box::new(err)))
1237        };
1238
1239        match tokio::runtime::Handle::try_current() {
1240            Ok(_handle) => tokio_fn().await,
1241            Err(_) => {
1242                let runtime = tokio::runtime::Builder::new_current_thread()
1243                    .enable_all()
1244                    .build()
1245                    .map_err(|err| TreeStateError::Fsmonitor(Box::new(err)))?;
1246                runtime.block_on(tokio_fn())
1247            }
1248        }
1249    }
1250}
1251
1252/// Functions to snapshot local-disk files to the store.
1253impl TreeState {
1254    /// Look for changes to the working copy. If there are any changes, create
1255    /// a new tree from it.
1256    #[instrument(skip_all)]
1257    pub async fn snapshot(
1258        &mut self,
1259        options: &SnapshotOptions<'_>,
1260    ) -> Result<(bool, SnapshotStats), SnapshotError> {
1261        let &SnapshotOptions {
1262            ref base_ignores,
1263            progress,
1264            start_tracking_matcher,
1265            force_tracking_matcher,
1266            max_new_file_size,
1267        } = options;
1268
1269        let sparse_matcher = self.sparse_matcher();
1270
1271        let fsmonitor_clock_needs_save = self.fsmonitor_settings != FsmonitorSettings::None;
1272        let mut is_dirty = fsmonitor_clock_needs_save;
1273        let FsmonitorMatcher {
1274            matcher: fsmonitor_matcher,
1275            watchman_clock,
1276        } = self
1277            .make_fsmonitor_matcher(&self.fsmonitor_settings)
1278            .await?;
1279        let fsmonitor_matcher = match fsmonitor_matcher.as_ref() {
1280            None => &EverythingMatcher,
1281            Some(fsmonitor_matcher) => fsmonitor_matcher.as_ref(),
1282        };
1283
1284        let matcher = IntersectionMatcher::new(
1285            sparse_matcher.as_ref(),
1286            UnionMatcher::new(fsmonitor_matcher, force_tracking_matcher),
1287        );
1288        if matcher.visit(RepoPath::root()).is_nothing() {
1289            // No need to load the current tree, set up channels, etc.
1290            self.watchman_clock = watchman_clock;
1291            return Ok((is_dirty, SnapshotStats::default()));
1292        }
1293
1294        let (tree_entries_tx, tree_entries_rx) = channel();
1295        let (file_states_tx, file_states_rx) = channel();
1296        let (untracked_paths_tx, untracked_paths_rx) = channel();
1297        let (deleted_files_tx, deleted_files_rx) = channel();
1298
1299        trace_span!("traverse filesystem").in_scope(|| -> Result<(), SnapshotError> {
1300            let snapshotter = FileSnapshotter {
1301                tree_state: self,
1302                current_tree: &self.tree,
1303                matcher: &matcher,
1304                start_tracking_matcher,
1305                force_tracking_matcher,
1306                // Move tx sides so they'll be dropped at the end of the scope.
1307                tree_entries_tx,
1308                file_states_tx,
1309                untracked_paths_tx,
1310                deleted_files_tx,
1311                error: OnceLock::new(),
1312                progress,
1313                max_new_file_size,
1314            };
1315            let directory_to_visit = DirectoryToVisit {
1316                dir: RepoPathBuf::root(),
1317                disk_dir: self.working_copy_path.clone(),
1318                git_ignore: base_ignores.clone(),
1319                file_states: self.file_states.all(),
1320            };
1321            // Here we use scope as a queue of per-directory jobs.
1322            rayon::scope(|scope| {
1323                snapshotter.spawn_ok(scope, |scope| {
1324                    snapshotter.visit_directory(directory_to_visit, scope)
1325                });
1326            });
1327            snapshotter.into_result()
1328        })?;
1329
1330        let stats = SnapshotStats {
1331            untracked_paths: untracked_paths_rx.into_iter().collect(),
1332        };
1333        let mut tree_builder = MergedTreeBuilder::new(self.tree.clone());
1334        trace_span!("process tree entries").in_scope(|| {
1335            for (path, tree_values) in &tree_entries_rx {
1336                tree_builder.set_or_remove(path, tree_values);
1337            }
1338        });
1339        let deleted_files = trace_span!("process deleted tree entries").in_scope(|| {
1340            let deleted_files = HashSet::from_iter(deleted_files_rx);
1341            is_dirty |= !deleted_files.is_empty();
1342            for file in &deleted_files {
1343                tree_builder.set_or_remove(file.clone(), Merge::absent());
1344            }
1345            deleted_files
1346        });
1347        trace_span!("process file states").in_scope(|| {
1348            let changed_file_states = file_states_rx
1349                .iter()
1350                .sorted_unstable_by(|(path1, _), (path2, _)| path1.cmp(path2))
1351                .collect_vec();
1352            is_dirty |= !changed_file_states.is_empty();
1353            self.file_states
1354                .merge_in(changed_file_states, &deleted_files);
1355        });
1356        trace_span!("write tree").in_scope(|| -> Result<(), BackendError> {
1357            let new_tree = tree_builder.write_tree()?;
1358            is_dirty |= new_tree.tree_ids_and_labels() != self.tree.tree_ids_and_labels();
1359            self.tree = new_tree.clone();
1360            Ok(())
1361        })?;
1362        if cfg!(debug_assertions) {
1363            let tree_paths: HashSet<_> = self
1364                .tree
1365                .entries_matching(sparse_matcher.as_ref())
1366                .filter_map(|(path, result)| result.is_ok().then_some(path))
1367                .collect();
1368            let file_states = self.file_states.all();
1369            let state_paths: HashSet<_> = file_states.paths().map(|path| path.to_owned()).collect();
1370            assert_eq!(state_paths, tree_paths);
1371        }
1372        // Since untracked paths aren't cached in the tree state, we'll need to
1373        // rescan the working directory changes to report or track them later.
1374        // TODO: store untracked paths and update watchman_clock?
1375        if stats.untracked_paths.is_empty() || watchman_clock.is_none() {
1376            self.watchman_clock = watchman_clock;
1377        } else {
1378            tracing::info!("not updating watchman clock because there are untracked files");
1379        }
1380        Ok((is_dirty, stats))
1381    }
1382
1383    #[instrument(skip_all)]
1384    async fn make_fsmonitor_matcher(
1385        &self,
1386        fsmonitor_settings: &FsmonitorSettings,
1387    ) -> Result<FsmonitorMatcher, SnapshotError> {
1388        let (watchman_clock, changed_files) = match fsmonitor_settings {
1389            FsmonitorSettings::None => (None, None),
1390            FsmonitorSettings::Test { changed_files } => (None, Some(changed_files.clone())),
1391            #[cfg(feature = "watchman")]
1392            FsmonitorSettings::Watchman(config) => match self.query_watchman(config).await {
1393                Ok((watchman_clock, changed_files)) => (Some(watchman_clock.into()), changed_files),
1394                Err(err) => {
1395                    tracing::warn!(?err, "Failed to query filesystem monitor");
1396                    (None, None)
1397                }
1398            },
1399            #[cfg(not(feature = "watchman"))]
1400            FsmonitorSettings::Watchman(_) => {
1401                return Err(SnapshotError::Other {
1402                    message: "Failed to query the filesystem monitor".to_string(),
1403                    err: "Cannot query Watchman because jj was not compiled with the `watchman` \
1404                          feature (consider disabling `fsmonitor.backend`)"
1405                        .into(),
1406                });
1407            }
1408        };
1409        let matcher: Option<Box<dyn Matcher>> = match changed_files {
1410            None => None,
1411            Some(changed_files) => {
1412                let (repo_paths, gitignore_prefixes) = trace_span!("processing fsmonitor paths")
1413                    .in_scope(|| {
1414                        let repo_paths = changed_files
1415                            .iter()
1416                            .filter_map(|path| RepoPathBuf::from_relative_path(path).ok())
1417                            .collect_vec();
1418                        // .gitignore changes require rescanning parent directories to pick up newly
1419                        // unignored files.
1420                        let gitignore_prefixes = repo_paths
1421                            .iter()
1422                            .filter_map(|repo_path| {
1423                                let (parent, basename) = repo_path.split()?;
1424                                (basename.as_internal_str() == ".gitignore")
1425                                    .then(|| parent.to_owned())
1426                            })
1427                            .collect_vec();
1428                        (repo_paths, gitignore_prefixes)
1429                    });
1430
1431                let matcher: Box<dyn Matcher> = if gitignore_prefixes.is_empty() {
1432                    Box::new(FilesMatcher::new(repo_paths))
1433                } else {
1434                    Box::new(UnionMatcher::new(
1435                        FilesMatcher::new(repo_paths),
1436                        PrefixMatcher::new(gitignore_prefixes),
1437                    ))
1438                };
1439
1440                Some(matcher)
1441            }
1442        };
1443        Ok(FsmonitorMatcher {
1444            matcher,
1445            watchman_clock,
1446        })
1447    }
1448}
1449
1450struct DirectoryToVisit<'a> {
1451    dir: RepoPathBuf,
1452    disk_dir: PathBuf,
1453    git_ignore: Arc<GitIgnoreFile>,
1454    file_states: FileStates<'a>,
1455}
1456
1457#[derive(Clone, Copy, Debug, Eq, PartialEq)]
1458enum PresentDirEntryKind {
1459    Dir,
1460    File,
1461}
1462
1463#[derive(Clone, Debug)]
1464struct PresentDirEntries {
1465    dirs: HashSet<String>,
1466    files: HashSet<String>,
1467}
1468
1469/// Helper to scan local-disk directories and files in parallel.
1470struct FileSnapshotter<'a> {
1471    tree_state: &'a TreeState,
1472    current_tree: &'a MergedTree,
1473    matcher: &'a dyn Matcher,
1474    start_tracking_matcher: &'a dyn Matcher,
1475    force_tracking_matcher: &'a dyn Matcher,
1476    tree_entries_tx: Sender<(RepoPathBuf, MergedTreeValue)>,
1477    file_states_tx: Sender<(RepoPathBuf, FileState)>,
1478    untracked_paths_tx: Sender<(RepoPathBuf, UntrackedReason)>,
1479    deleted_files_tx: Sender<RepoPathBuf>,
1480    error: OnceLock<SnapshotError>,
1481    progress: Option<&'a SnapshotProgress<'a>>,
1482    max_new_file_size: u64,
1483}
1484
1485impl FileSnapshotter<'_> {
1486    fn spawn_ok<'scope, F>(&'scope self, scope: &rayon::Scope<'scope>, body: F)
1487    where
1488        F: FnOnce(&rayon::Scope<'scope>) -> Result<(), SnapshotError> + Send + 'scope,
1489    {
1490        scope.spawn(|scope| {
1491            if self.error.get().is_some() {
1492                return;
1493            }
1494            match body(scope) {
1495                Ok(()) => {}
1496                Err(err) => self.error.set(err).unwrap_or(()),
1497            }
1498        });
1499    }
1500
1501    /// Extracts the result of the snapshot.
1502    fn into_result(self) -> Result<(), SnapshotError> {
1503        match self.error.into_inner() {
1504            Some(err) => Err(err),
1505            None => Ok(()),
1506        }
1507    }
1508
1509    /// Visits the directory entries, spawns jobs to recurse into sub
1510    /// directories.
1511    fn visit_directory<'scope>(
1512        &'scope self,
1513        directory_to_visit: DirectoryToVisit<'scope>,
1514        scope: &rayon::Scope<'scope>,
1515    ) -> Result<(), SnapshotError> {
1516        let DirectoryToVisit {
1517            dir,
1518            disk_dir,
1519            git_ignore,
1520            file_states,
1521        } = directory_to_visit;
1522
1523        let git_ignore = git_ignore
1524            .chain_with_file(&dir.to_internal_dir_string(), disk_dir.join(".gitignore"))?;
1525        let dir_entries: Vec<_> = disk_dir
1526            .read_dir()
1527            .and_then(|entries| entries.try_collect())
1528            .map_err(|err| SnapshotError::Other {
1529                message: format!("Failed to read directory {}", disk_dir.display()),
1530                err: err.into(),
1531            })?;
1532        let (dirs, files) = dir_entries
1533            .into_par_iter()
1534            // Don't split into too many small jobs. For a small directory,
1535            // sequential scan should be fast enough.
1536            .with_min_len(100)
1537            .filter_map(|entry| {
1538                self.process_dir_entry(&dir, &git_ignore, file_states, &entry, scope)
1539                    .transpose()
1540            })
1541            .map(|item| match item {
1542                Ok((PresentDirEntryKind::Dir, name)) => Ok(Either::Left(name)),
1543                Ok((PresentDirEntryKind::File, name)) => Ok(Either::Right(name)),
1544                Err(err) => Err(err),
1545            })
1546            .collect::<Result<_, _>>()?;
1547        let present_entries = PresentDirEntries { dirs, files };
1548        self.emit_deleted_files(&dir, file_states, &present_entries);
1549        Ok(())
1550    }
1551
1552    fn process_dir_entry<'scope>(
1553        &'scope self,
1554        dir: &RepoPath,
1555        git_ignore: &Arc<GitIgnoreFile>,
1556        file_states: FileStates<'scope>,
1557        entry: &DirEntry,
1558        scope: &rayon::Scope<'scope>,
1559    ) -> Result<Option<(PresentDirEntryKind, String)>, SnapshotError> {
1560        let file_type = entry.file_type().unwrap();
1561        let file_name = entry.file_name();
1562        let name_string = file_name
1563            .into_string()
1564            .map_err(|path| SnapshotError::InvalidUtf8Path { path })?;
1565
1566        if RESERVED_DIR_NAMES.contains(&name_string.as_str()) {
1567            return Ok(None);
1568        }
1569        let name = RepoPathComponent::new(&name_string).unwrap();
1570        let path = dir.join(name);
1571        let maybe_current_file_state = file_states.get_at(dir, name);
1572        if let Some(file_state) = &maybe_current_file_state
1573            && file_state.file_type == FileType::GitSubmodule
1574        {
1575            return Ok(None);
1576        }
1577
1578        if file_type.is_dir() {
1579            let file_states = file_states.prefixed_at(dir, name);
1580            // If a submodule was added in commit C, and a user decides to run
1581            // `jj new <something before C>` from after C, then the submodule
1582            // files stick around but it is no longer seen as a submodule.
1583            // We need to ensure that it is not tracked as if it was added to
1584            // the main repo.
1585            // See https://github.com/jj-vcs/jj/issues/4349.
1586            // To solve this, we ignore all nested repos entirely.
1587            let disk_dir = entry.path();
1588            for &name in RESERVED_DIR_NAMES {
1589                if disk_dir.join(name).symlink_metadata().is_ok() {
1590                    return Ok(None);
1591                }
1592            }
1593
1594            if git_ignore.matches(&path.to_internal_dir_string())
1595                && self.force_tracking_matcher.visit(&path).is_nothing()
1596            {
1597                // If the whole directory is ignored by .gitignore, visit only
1598                // paths we're already tracking. This is because .gitignore in
1599                // ignored directory must be ignored. It's also more efficient.
1600                // start_tracking_matcher is NOT tested here because we need to
1601                // scan directory entries to report untracked paths.
1602                self.spawn_ok(scope, move |_| self.visit_tracked_files(file_states));
1603            } else if !self.matcher.visit(&path).is_nothing() {
1604                let directory_to_visit = DirectoryToVisit {
1605                    dir: path,
1606                    disk_dir,
1607                    git_ignore: git_ignore.clone(),
1608                    file_states,
1609                };
1610                self.spawn_ok(scope, |scope| {
1611                    self.visit_directory(directory_to_visit, scope)
1612                });
1613            }
1614            // Whether or not the directory path matches, any child file entries
1615            // shouldn't be touched within the current recursion step.
1616            Ok(Some((PresentDirEntryKind::Dir, name_string)))
1617        } else if self.matcher.matches(&path) {
1618            if let Some(progress) = self.progress {
1619                progress(&path);
1620            }
1621            if maybe_current_file_state.is_none()
1622                && (git_ignore.matches(path.as_internal_file_string())
1623                    && !self.force_tracking_matcher.matches(&path))
1624            {
1625                // If it wasn't already tracked and it matches
1626                // the ignored paths, then ignore it.
1627                Ok(None)
1628            } else if maybe_current_file_state.is_none()
1629                && !self.start_tracking_matcher.matches(&path)
1630            {
1631                // Leave the file untracked
1632                self.untracked_paths_tx
1633                    .send((path, UntrackedReason::FileNotAutoTracked))
1634                    .ok();
1635                Ok(None)
1636            } else {
1637                let metadata = entry.metadata().map_err(|err| SnapshotError::Other {
1638                    message: format!("Failed to stat file {}", entry.path().display()),
1639                    err: err.into(),
1640                })?;
1641                if maybe_current_file_state.is_none()
1642                    && (metadata.len() > self.max_new_file_size
1643                        && !self.force_tracking_matcher.matches(&path))
1644                {
1645                    // Leave the large file untracked
1646                    let reason = UntrackedReason::FileTooLarge {
1647                        size: metadata.len(),
1648                        max_size: self.max_new_file_size,
1649                    };
1650                    self.untracked_paths_tx.send((path, reason)).ok();
1651                    Ok(None)
1652                } else if let Some(new_file_state) = file_state(&metadata)
1653                    .map_err(|err| snapshot_error_for_mtime_out_of_range(err, &entry.path()))?
1654                {
1655                    self.process_present_file(
1656                        path,
1657                        &entry.path(),
1658                        maybe_current_file_state.as_ref(),
1659                        new_file_state,
1660                    )?;
1661                    Ok(Some((PresentDirEntryKind::File, name_string)))
1662                } else {
1663                    // Special file is not considered present
1664                    Ok(None)
1665                }
1666            }
1667        } else {
1668            Ok(None)
1669        }
1670    }
1671
1672    /// Visits only paths we're already tracking.
1673    fn visit_tracked_files(&self, file_states: FileStates<'_>) -> Result<(), SnapshotError> {
1674        for (tracked_path, current_file_state) in file_states {
1675            if current_file_state.file_type == FileType::GitSubmodule {
1676                continue;
1677            }
1678            if !self.matcher.matches(tracked_path) {
1679                continue;
1680            }
1681            let disk_path = tracked_path.to_fs_path(&self.tree_state.working_copy_path)?;
1682            let metadata = match disk_path.symlink_metadata() {
1683                Ok(metadata) => Some(metadata),
1684                Err(err) if err.kind() == io::ErrorKind::NotFound => None,
1685                Err(err) => {
1686                    return Err(SnapshotError::Other {
1687                        message: format!("Failed to stat file {}", disk_path.display()),
1688                        err: err.into(),
1689                    });
1690                }
1691            };
1692            if let Some(metadata) = &metadata
1693                && let Some(new_file_state) = file_state(metadata)
1694                    .map_err(|err| snapshot_error_for_mtime_out_of_range(err, &disk_path))?
1695            {
1696                self.process_present_file(
1697                    tracked_path.to_owned(),
1698                    &disk_path,
1699                    Some(&current_file_state),
1700                    new_file_state,
1701                )?;
1702            } else {
1703                self.deleted_files_tx.send(tracked_path.to_owned()).ok();
1704            }
1705        }
1706        Ok(())
1707    }
1708
1709    fn process_present_file(
1710        &self,
1711        path: RepoPathBuf,
1712        disk_path: &Path,
1713        maybe_current_file_state: Option<&FileState>,
1714        mut new_file_state: FileState,
1715    ) -> Result<(), SnapshotError> {
1716        let update = self.get_updated_tree_value(
1717            &path,
1718            disk_path,
1719            maybe_current_file_state,
1720            &new_file_state,
1721        )?;
1722        // Preserve materialized conflict data for normal, non-resolved files
1723        if matches!(new_file_state.file_type, FileType::Normal { .. })
1724            && !update.as_ref().is_some_and(|update| update.is_resolved())
1725        {
1726            new_file_state.materialized_conflict_data =
1727                maybe_current_file_state.and_then(|state| state.materialized_conflict_data);
1728        }
1729        if let Some(tree_value) = update {
1730            self.tree_entries_tx.send((path.clone(), tree_value)).ok();
1731        }
1732        if Some(&new_file_state) != maybe_current_file_state {
1733            self.file_states_tx.send((path, new_file_state)).ok();
1734        }
1735        Ok(())
1736    }
1737
1738    /// Emits file paths that don't exist in the `present_entries`.
1739    fn emit_deleted_files(
1740        &self,
1741        dir: &RepoPath,
1742        file_states: FileStates<'_>,
1743        present_entries: &PresentDirEntries,
1744    ) {
1745        let file_state_chunks = file_states.iter().chunk_by(|(path, _state)| {
1746            // Extract <name> from <dir>, <dir>/<name>, or <dir>/<name>/**.
1747            // (file_states may contain <dir> file on file->dir transition.)
1748            debug_assert!(path.starts_with(dir));
1749            let slash = usize::from(!dir.is_root());
1750            let len = dir.as_internal_file_string().len() + slash;
1751            let tail = path.as_internal_file_string().get(len..).unwrap_or("");
1752            match tail.split_once('/') {
1753                Some((name, _)) => (PresentDirEntryKind::Dir, name),
1754                None => (PresentDirEntryKind::File, tail),
1755            }
1756        });
1757        file_state_chunks
1758            .into_iter()
1759            .filter(|&((kind, name), _)| match kind {
1760                PresentDirEntryKind::Dir => !present_entries.dirs.contains(name),
1761                PresentDirEntryKind::File => !present_entries.files.contains(name),
1762            })
1763            .flat_map(|(_, chunk)| chunk)
1764            // Whether or not the entry exists, submodule should be ignored
1765            .filter(|(_, state)| state.file_type != FileType::GitSubmodule)
1766            .filter(|(path, _)| self.matcher.matches(path))
1767            .try_for_each(|(path, _)| self.deleted_files_tx.send(path.to_owned()))
1768            .ok();
1769    }
1770
1771    fn get_updated_tree_value(
1772        &self,
1773        repo_path: &RepoPath,
1774        disk_path: &Path,
1775        maybe_current_file_state: Option<&FileState>,
1776        new_file_state: &FileState,
1777    ) -> Result<Option<MergedTreeValue>, SnapshotError> {
1778        let clean = match maybe_current_file_state {
1779            None => {
1780                // untracked
1781                false
1782            }
1783            Some(current_file_state) => {
1784                // If the file's mtime was set at the same time as this state file's own mtime,
1785                // then we don't know if the file was modified before or after this state file.
1786                new_file_state.is_clean(current_file_state)
1787                    && current_file_state.mtime < self.tree_state.own_mtime
1788            }
1789        };
1790        if clean {
1791            Ok(None)
1792        } else {
1793            let current_tree_values = self.current_tree.path_value(repo_path)?;
1794            let new_file_type = if !self.tree_state.symlink_support {
1795                let mut new_file_type = new_file_state.file_type.clone();
1796                if matches!(new_file_type, FileType::Normal { .. })
1797                    && matches!(current_tree_values.as_normal(), Some(TreeValue::Symlink(_)))
1798                {
1799                    new_file_type = FileType::Symlink;
1800                }
1801                new_file_type
1802            } else {
1803                new_file_state.file_type.clone()
1804            };
1805            let new_tree_values = match new_file_type {
1806                FileType::Normal { exec_bit } => self
1807                    .write_path_to_store(
1808                        repo_path,
1809                        disk_path,
1810                        &current_tree_values,
1811                        exec_bit,
1812                        maybe_current_file_state.and_then(|state| state.materialized_conflict_data),
1813                    )
1814                    .block_on()?,
1815                FileType::Symlink => {
1816                    let id = self
1817                        .write_symlink_to_store(repo_path, disk_path)
1818                        .block_on()?;
1819                    Merge::normal(TreeValue::Symlink(id))
1820                }
1821                FileType::GitSubmodule => panic!("git submodule cannot be written to store"),
1822            };
1823            if new_tree_values != current_tree_values {
1824                Ok(Some(new_tree_values))
1825            } else {
1826                Ok(None)
1827            }
1828        }
1829    }
1830
1831    fn store(&self) -> &Store {
1832        &self.tree_state.store
1833    }
1834
1835    async fn write_path_to_store(
1836        &self,
1837        repo_path: &RepoPath,
1838        disk_path: &Path,
1839        current_tree_values: &MergedTreeValue,
1840        exec_bit: ExecBit,
1841        materialized_conflict_data: Option<MaterializedConflictData>,
1842    ) -> Result<MergedTreeValue, SnapshotError> {
1843        if let Some(current_tree_value) = current_tree_values.as_resolved() {
1844            let id = self.write_file_to_store(repo_path, disk_path).await?;
1845            // On Windows, we preserve the executable bit from the current tree.
1846            let executable = exec_bit.for_tree_value(self.tree_state.exec_policy, || {
1847                if let Some(TreeValue::File {
1848                    id: _,
1849                    executable,
1850                    copy_id: _,
1851                }) = current_tree_value
1852                {
1853                    Some(*executable)
1854                } else {
1855                    None
1856                }
1857            });
1858            // Preserve the copy id from the current tree
1859            let copy_id = {
1860                if let Some(TreeValue::File {
1861                    id: _,
1862                    executable: _,
1863                    copy_id,
1864                }) = current_tree_value
1865                {
1866                    copy_id.clone()
1867                } else {
1868                    CopyId::placeholder()
1869                }
1870            };
1871            Ok(Merge::normal(TreeValue::File {
1872                id,
1873                executable,
1874                copy_id,
1875            }))
1876        } else if let Some(old_file_ids) = current_tree_values.to_file_merge() {
1877            // Safe to unwrap because the copy id exists exactly on the file variant
1878            let copy_id_merge = current_tree_values.to_copy_id_merge().unwrap();
1879            let copy_id = copy_id_merge
1880                .resolve_trivial(SameChange::Accept)
1881                .cloned()
1882                .flatten()
1883                .unwrap_or_else(CopyId::placeholder);
1884            let mut contents = vec![];
1885            let file = File::open(disk_path).map_err(|err| SnapshotError::Other {
1886                message: format!("Failed to open file {}", disk_path.display()),
1887                err: err.into(),
1888            })?;
1889            self.tree_state
1890                .target_eol_strategy
1891                .convert_eol_for_snapshot(BlockingAsyncReader::new(file))
1892                .await
1893                .map_err(|err| SnapshotError::Other {
1894                    message: "Failed to convert the EOL".to_string(),
1895                    err: err.into(),
1896                })?
1897                .read_to_end(&mut contents)
1898                .await
1899                .map_err(|err| SnapshotError::Other {
1900                    message: "Failed to read the EOL converted contents".to_string(),
1901                    err: err.into(),
1902                })?;
1903            // If the file contained a conflict before and is a normal file on
1904            // disk, we try to parse any conflict markers in the file into a
1905            // conflict.
1906            let new_file_ids = conflicts::update_from_content(
1907                &old_file_ids,
1908                self.store(),
1909                repo_path,
1910                &contents,
1911                materialized_conflict_data.map_or(MIN_CONFLICT_MARKER_LEN, |data| {
1912                    data.conflict_marker_len as usize
1913                }),
1914            )
1915            .await?;
1916            match new_file_ids.into_resolved() {
1917                Ok(file_id) => {
1918                    // On Windows, we preserve the executable bit from the merged trees.
1919                    let executable = exec_bit.for_tree_value(self.tree_state.exec_policy, || {
1920                        current_tree_values
1921                            .to_executable_merge()
1922                            .as_ref()
1923                            .and_then(conflicts::resolve_file_executable)
1924                    });
1925                    Ok(Merge::normal(TreeValue::File {
1926                        id: file_id.unwrap(),
1927                        executable,
1928                        copy_id,
1929                    }))
1930                }
1931                Err(new_file_ids) => {
1932                    if new_file_ids != old_file_ids {
1933                        Ok(current_tree_values.with_new_file_ids(&new_file_ids))
1934                    } else {
1935                        Ok(current_tree_values.clone())
1936                    }
1937                }
1938            }
1939        } else {
1940            Ok(current_tree_values.clone())
1941        }
1942    }
1943
1944    async fn write_file_to_store(
1945        &self,
1946        path: &RepoPath,
1947        disk_path: &Path,
1948    ) -> Result<FileId, SnapshotError> {
1949        let file = File::open(disk_path).map_err(|err| SnapshotError::Other {
1950            message: format!("Failed to open file {}", disk_path.display()),
1951            err: err.into(),
1952        })?;
1953        let mut contents = self
1954            .tree_state
1955            .target_eol_strategy
1956            .convert_eol_for_snapshot(BlockingAsyncReader::new(file))
1957            .await
1958            .map_err(|err| SnapshotError::Other {
1959                message: "Failed to convert the EOL".to_string(),
1960                err: err.into(),
1961            })?;
1962        Ok(self.store().write_file(path, &mut contents).await?)
1963    }
1964
1965    async fn write_symlink_to_store(
1966        &self,
1967        path: &RepoPath,
1968        disk_path: &Path,
1969    ) -> Result<SymlinkId, SnapshotError> {
1970        if self.tree_state.symlink_support {
1971            let target = disk_path.read_link().map_err(|err| SnapshotError::Other {
1972                message: format!("Failed to read symlink {}", disk_path.display()),
1973                err: err.into(),
1974            })?;
1975            let str_target = symlink_target_convert_to_store(&target).ok_or_else(|| {
1976                SnapshotError::InvalidUtf8SymlinkTarget {
1977                    path: disk_path.to_path_buf(),
1978                }
1979            })?;
1980            Ok(self.store().write_symlink(path, &str_target).await?)
1981        } else {
1982            let target = fs::read(disk_path).map_err(|err| SnapshotError::Other {
1983                message: format!("Failed to read file {}", disk_path.display()),
1984                err: err.into(),
1985            })?;
1986            let string_target =
1987                String::from_utf8(target).map_err(|_| SnapshotError::InvalidUtf8SymlinkTarget {
1988                    path: disk_path.to_path_buf(),
1989                })?;
1990            Ok(self.store().write_symlink(path, &string_target).await?)
1991        }
1992    }
1993}
1994
1995fn snapshot_error_for_mtime_out_of_range(err: MtimeOutOfRange, path: &Path) -> SnapshotError {
1996    SnapshotError::Other {
1997        message: format!("Failed to process file metadata {}", path.display()),
1998        err: err.into(),
1999    }
2000}
2001
2002/// Functions to update local-disk files from the store.
2003impl TreeState {
2004    async fn write_file(
2005        &self,
2006        disk_path: &Path,
2007        contents: impl AsyncRead + Send + Unpin,
2008        exec_bit: ExecBit,
2009        apply_eol_conversion: bool,
2010    ) -> Result<FileState, CheckoutError> {
2011        let mut file = File::options()
2012            .write(true)
2013            .create_new(true) // Don't overwrite un-ignored file. Don't follow symlink.
2014            .open(disk_path)
2015            .map_err(|err| CheckoutError::Other {
2016                message: format!("Failed to open file {} for writing", disk_path.display()),
2017                err: err.into(),
2018            })?;
2019        let contents = if apply_eol_conversion {
2020            self.target_eol_strategy
2021                .convert_eol_for_update(contents)
2022                .await
2023                .map_err(|err| CheckoutError::Other {
2024                    message: "Failed to convert the EOL for the content".to_string(),
2025                    err: err.into(),
2026                })?
2027        } else {
2028            Box::new(contents)
2029        };
2030        let size = copy_async_to_sync(contents, &mut file)
2031            .await
2032            .map_err(|err| CheckoutError::Other {
2033                message: format!(
2034                    "Failed to write the content to the file {}",
2035                    disk_path.display()
2036                ),
2037                err: err.into(),
2038            })?;
2039        set_executable(exec_bit, disk_path)
2040            .map_err(|err| checkout_error_for_stat_error(err, disk_path))?;
2041        // Read the file state from the file descriptor. That way, know that the file
2042        // exists and is of the expected type, and the stat information is most likely
2043        // accurate, except for other processes modifying the file concurrently (The
2044        // mtime is set at write time and won't change when we close the file.)
2045        let metadata = file
2046            .metadata()
2047            .map_err(|err| checkout_error_for_stat_error(err, disk_path))?;
2048        FileState::for_file(exec_bit, size as u64, &metadata)
2049            .map_err(|err| checkout_error_for_mtime_out_of_range(err, disk_path))
2050    }
2051
2052    fn write_symlink(&self, disk_path: &Path, target: String) -> Result<FileState, CheckoutError> {
2053        let target = symlink_target_convert_to_disk(&target);
2054
2055        if cfg!(windows) {
2056            // On Windows, "/" can't be part of valid file name, and "/" is also not a valid
2057            // separator for the symlink target. See an example of this issue in
2058            // https://github.com/jj-vcs/jj/issues/6934.
2059            //
2060            // We use debug_assert_* instead of assert_* because we want to avoid panic in
2061            // release build, and we are sure that we shouldn't create invalid symlinks in
2062            // tests.
2063            debug_assert_ne!(
2064                target.as_os_str().to_str().map(|path| path.contains('/')),
2065                Some(true),
2066                "Expect the symlink target doesn't contain \"/\", but got invalid symlink target: \
2067                 {}.",
2068                target.display()
2069            );
2070        }
2071
2072        // On Windows, this will create a nonfunctional link for directories,
2073        // but at the moment we don't have enough information in the tree to
2074        // determine whether the symlink target is a file or a directory.
2075        symlink_file(&target, disk_path).map_err(|err| CheckoutError::Other {
2076            message: format!(
2077                "Failed to create symlink from {} to {}",
2078                disk_path.display(),
2079                target.display()
2080            ),
2081            err: err.into(),
2082        })?;
2083        let metadata = disk_path
2084            .symlink_metadata()
2085            .map_err(|err| checkout_error_for_stat_error(err, disk_path))?;
2086        FileState::for_symlink(&metadata)
2087            .map_err(|err| checkout_error_for_mtime_out_of_range(err, disk_path))
2088    }
2089
2090    async fn write_conflict(
2091        &self,
2092        disk_path: &Path,
2093        contents: &[u8],
2094        exec_bit: ExecBit,
2095    ) -> Result<FileState, CheckoutError> {
2096        let contents = self
2097            .target_eol_strategy
2098            .convert_eol_for_update(contents)
2099            .await
2100            .map_err(|err| CheckoutError::Other {
2101                message: "Failed to convert the EOL when writing a merge conflict".to_string(),
2102                err: err.into(),
2103            })?;
2104        let mut file = OpenOptions::new()
2105            .write(true)
2106            .create_new(true) // Don't overwrite un-ignored file. Don't follow symlink.
2107            .open(disk_path)
2108            .map_err(|err| CheckoutError::Other {
2109                message: format!("Failed to open file {} for writing", disk_path.display()),
2110                err: err.into(),
2111            })?;
2112        let size = copy_async_to_sync(contents, &mut file)
2113            .await
2114            .map_err(|err| CheckoutError::Other {
2115                message: format!("Failed to write conflict to file {}", disk_path.display()),
2116                err: err.into(),
2117            })? as u64;
2118        set_executable(exec_bit, disk_path)
2119            .map_err(|err| checkout_error_for_stat_error(err, disk_path))?;
2120        let metadata = file
2121            .metadata()
2122            .map_err(|err| checkout_error_for_stat_error(err, disk_path))?;
2123        FileState::for_file(exec_bit, size, &metadata)
2124            .map_err(|err| checkout_error_for_mtime_out_of_range(err, disk_path))
2125    }
2126
2127    pub fn check_out(&mut self, new_tree: &MergedTree) -> Result<CheckoutStats, CheckoutError> {
2128        let old_tree = self.tree.clone();
2129        let stats = self
2130            .update(&old_tree, new_tree, self.sparse_matcher().as_ref())
2131            .block_on()?;
2132        self.tree = new_tree.clone();
2133        Ok(stats)
2134    }
2135
2136    pub fn set_sparse_patterns(
2137        &mut self,
2138        sparse_patterns: Vec<RepoPathBuf>,
2139    ) -> Result<CheckoutStats, CheckoutError> {
2140        let tree = self.tree.clone();
2141        let old_matcher = PrefixMatcher::new(&self.sparse_patterns);
2142        let new_matcher = PrefixMatcher::new(&sparse_patterns);
2143        let added_matcher = DifferenceMatcher::new(&new_matcher, &old_matcher);
2144        let removed_matcher = DifferenceMatcher::new(&old_matcher, &new_matcher);
2145        let empty_tree = self.store.empty_merged_tree();
2146        let added_stats = self.update(&empty_tree, &tree, &added_matcher).block_on()?;
2147        let removed_stats = self
2148            .update(&tree, &empty_tree, &removed_matcher)
2149            .block_on()?;
2150        self.sparse_patterns = sparse_patterns;
2151        assert_eq!(added_stats.updated_files, 0);
2152        assert_eq!(added_stats.removed_files, 0);
2153        assert_eq!(removed_stats.updated_files, 0);
2154        assert_eq!(removed_stats.added_files, 0);
2155        assert_eq!(removed_stats.skipped_files, 0);
2156        Ok(CheckoutStats {
2157            updated_files: 0,
2158            added_files: added_stats.added_files,
2159            removed_files: removed_stats.removed_files,
2160            skipped_files: added_stats.skipped_files,
2161        })
2162    }
2163
2164    async fn update(
2165        &mut self,
2166        old_tree: &MergedTree,
2167        new_tree: &MergedTree,
2168        matcher: &dyn Matcher,
2169    ) -> Result<CheckoutStats, CheckoutError> {
2170        // TODO: maybe it's better not include the skipped counts in the "intended"
2171        // counts
2172        let mut stats = CheckoutStats {
2173            updated_files: 0,
2174            added_files: 0,
2175            removed_files: 0,
2176            skipped_files: 0,
2177        };
2178        let mut changed_file_states = Vec::new();
2179        let mut deleted_files = HashSet::new();
2180        let mut prev_created_path: RepoPathBuf = RepoPathBuf::root();
2181
2182        let mut process_diff_entry = async |path: RepoPathBuf,
2183                                            before: MergedTreeValue,
2184                                            after: MaterializedTreeValue|
2185               -> Result<(), CheckoutError> {
2186            if after.is_absent() {
2187                stats.removed_files += 1;
2188            } else if before.is_absent() {
2189                stats.added_files += 1;
2190            } else {
2191                stats.updated_files += 1;
2192            }
2193
2194            // Existing Git submodule can be a non-empty directory on disk. We
2195            // shouldn't attempt to manage it as a tracked path.
2196            //
2197            // TODO: It might be better to add general support for paths not
2198            // tracked by jj than processing submodules specially. For example,
2199            // paths excluded by .gitignore can be marked as such so that
2200            // newly-"unignored" paths won't be snapshotted automatically.
2201            if matches!(before.as_normal(), Some(TreeValue::GitSubmodule(_)))
2202                && matches!(after, MaterializedTreeValue::GitSubmodule(_))
2203            {
2204                eprintln!("ignoring git submodule at {path:?}");
2205                // Not updating the file state as if there were no diffs. Leave
2206                // the state type as FileType::GitSubmodule if it was before.
2207                return Ok(());
2208            }
2209
2210            // This path and the previous one we did work for may have a common prefix. We
2211            // can adjust the "working copy" path to the parent directory which we know
2212            // is already created. If there is no common prefix, this will by default use
2213            // RepoPath::root() as the common prefix.
2214            let (common_prefix, adjusted_diff_file_path) =
2215                path.split_common_prefix(&prev_created_path);
2216
2217            let disk_path = if adjusted_diff_file_path.is_root() {
2218                // The path being "root" here implies that the entire path has already been
2219                // created.
2220                //
2221                // e.g we may have have already processed a path like: "foo/bar/baz" and this is
2222                // our `prev_created_path`.
2223                //
2224                // and the current path is:
2225                // "foo/bar"
2226                //
2227                // This results in a common prefix of "foo/bar" with empty string for the
2228                // remainder since its entire prefix has already been created.
2229                // This means that we _dont_ need to create its parent dirs
2230                // either.
2231
2232                path.to_fs_path(self.working_copy_path())?
2233            } else {
2234                let adjusted_working_copy_path =
2235                    common_prefix.to_fs_path(self.working_copy_path())?;
2236
2237                // Create parent directories no matter if after.is_present(). This
2238                // ensures that the path never traverses symlinks.
2239                let Some(disk_path) =
2240                    create_parent_dirs(&adjusted_working_copy_path, adjusted_diff_file_path)?
2241                else {
2242                    changed_file_states.push((path, FileState::placeholder()));
2243                    stats.skipped_files += 1;
2244                    return Ok(());
2245                };
2246
2247                // Cache this path for the next iteration. This must occur after
2248                // `create_parent_dirs` to ensure that the path is only set when
2249                // no symlinks are encountered. Otherwise there could be
2250                // opportunity for a filesystem write-what-where attack.
2251                prev_created_path = path
2252                    .parent()
2253                    .map(RepoPath::to_owned)
2254                    .expect("diff path has no parent");
2255
2256                disk_path
2257            };
2258
2259            // If the path was present, check reserved path first and delete it.
2260            let present_file_deleted = before.is_present() && remove_old_file(&disk_path)?;
2261            // If not, create temporary file to test the path validity.
2262            if !present_file_deleted && !can_create_new_file(&disk_path)? {
2263                changed_file_states.push((path, FileState::placeholder()));
2264                stats.skipped_files += 1;
2265                return Ok(());
2266            }
2267
2268            // We get the previous executable bit from the file states and not
2269            // the tree value because only the file states store the on-disk
2270            // executable bit.
2271            let get_prev_exec = || self.file_states().get_exec_bit(&path);
2272
2273            // TODO: Check that the file has not changed before overwriting/removing it.
2274            let file_state = match after {
2275                MaterializedTreeValue::Absent | MaterializedTreeValue::AccessDenied(_) => {
2276                    // Reset the previous path to avoid scenarios where this path is deleted,
2277                    // then on the next iteration recreation is skipped because of this
2278                    // optimization.
2279                    prev_created_path = RepoPathBuf::root();
2280
2281                    let mut parent_dir = disk_path.parent().unwrap();
2282                    loop {
2283                        if fs::remove_dir(parent_dir).is_err() {
2284                            break;
2285                        }
2286
2287                        parent_dir = parent_dir.parent().unwrap();
2288                    }
2289                    deleted_files.insert(path);
2290                    return Ok(());
2291                }
2292                MaterializedTreeValue::File(file) => {
2293                    let exec_bit =
2294                        ExecBit::new_from_repo(file.executable, self.exec_policy, get_prev_exec);
2295                    self.write_file(&disk_path, file.reader, exec_bit, true)
2296                        .await?
2297                }
2298                MaterializedTreeValue::Symlink { id: _, target } => {
2299                    if self.symlink_support {
2300                        self.write_symlink(&disk_path, target)?
2301                    } else {
2302                        // The fake symlink file shouldn't be executable.
2303                        self.write_file(&disk_path, target.as_bytes(), ExecBit(false), false)
2304                            .await?
2305                    }
2306                }
2307                MaterializedTreeValue::GitSubmodule(_) => {
2308                    eprintln!("ignoring git submodule at {path:?}");
2309                    FileState::for_gitsubmodule()
2310                }
2311                MaterializedTreeValue::Tree(_) => {
2312                    panic!("unexpected tree entry in diff at {path:?}");
2313                }
2314                MaterializedTreeValue::FileConflict(file) => {
2315                    let conflict_marker_len =
2316                        choose_materialized_conflict_marker_len(&file.contents);
2317                    let options = ConflictMaterializeOptions {
2318                        marker_style: self.conflict_marker_style,
2319                        marker_len: Some(conflict_marker_len),
2320                        merge: self.store.merge_options().clone(),
2321                    };
2322                    let exec_bit = ExecBit::new_from_repo(
2323                        file.executable.unwrap_or(false),
2324                        self.exec_policy,
2325                        get_prev_exec,
2326                    );
2327                    let contents =
2328                        materialize_merge_result_to_bytes(&file.contents, &file.labels, &options);
2329                    let mut file_state =
2330                        self.write_conflict(&disk_path, &contents, exec_bit).await?;
2331                    file_state.materialized_conflict_data = Some(MaterializedConflictData {
2332                        conflict_marker_len: conflict_marker_len.try_into().unwrap_or(u32::MAX),
2333                    });
2334                    file_state
2335                }
2336                MaterializedTreeValue::OtherConflict { id, labels } => {
2337                    // Unless all terms are regular files, we can't do much
2338                    // better than trying to describe the merge.
2339                    let contents = id.describe(&labels);
2340                    // Since this is a dummy file, it shouldn't be executable.
2341                    self.write_conflict(&disk_path, contents.as_bytes(), ExecBit(false))
2342                        .await?
2343                }
2344            };
2345            changed_file_states.push((path, file_state));
2346            Ok(())
2347        };
2348
2349        let mut diff_stream = old_tree
2350            .diff_stream_for_file_system(new_tree, matcher)
2351            .map(async |TreeDiffEntry { path, values }| match values {
2352                Ok(diff) => {
2353                    let result =
2354                        materialize_tree_value(&self.store, &path, diff.after, new_tree.labels())
2355                            .await;
2356                    (path, result.map(|value| (diff.before, value)))
2357                }
2358                Err(err) => (path, Err(err)),
2359            })
2360            .buffered(self.store.concurrency().max(1));
2361
2362        // If a conflicted file didn't change between the two trees, but the conflict
2363        // labels did, we still need to re-materialize it in the working copy. We don't
2364        // need to do this if the conflicts have different numbers of sides though since
2365        // these conflicts are considered different, so they will be materialized by
2366        // `MergedTree::diff_stream_for_file_system` already.
2367        let mut conflicts_to_rematerialize: HashMap<RepoPathBuf, MergedTreeValue> =
2368            if old_tree.tree_ids().num_sides() == new_tree.tree_ids().num_sides()
2369                && old_tree.labels() != new_tree.labels()
2370            {
2371                // TODO: it might be better to use an async stream here and merge it with the
2372                // other diff stream, but it could be difficult since the diff stream is not
2373                // sorted in the same order as the conflicts iterator.
2374                new_tree
2375                    .conflicts_matching(matcher)
2376                    .map(|(path, value)| value.map(|value| (path, value)))
2377                    .try_collect()?
2378            } else {
2379                HashMap::new()
2380            };
2381
2382        while let Some((path, data)) = diff_stream.next().await {
2383            let (before, after) = data?;
2384            conflicts_to_rematerialize.remove(&path);
2385            process_diff_entry(path, before, after).await?;
2386        }
2387
2388        if !conflicts_to_rematerialize.is_empty() {
2389            for (path, conflict) in conflicts_to_rematerialize {
2390                let materialized =
2391                    materialize_tree_value(&self.store, &path, conflict.clone(), new_tree.labels())
2392                        .await?;
2393                process_diff_entry(path, conflict, materialized).await?;
2394            }
2395
2396            // We need to re-sort the changed file states since we may have inserted a
2397            // conflicted file out of order.
2398            changed_file_states.sort_unstable_by(|(path1, _), (path2, _)| path1.cmp(path2));
2399        }
2400
2401        self.file_states
2402            .merge_in(changed_file_states, &deleted_files);
2403        Ok(stats)
2404    }
2405
2406    pub async fn reset(&mut self, new_tree: &MergedTree) -> Result<(), ResetError> {
2407        let matcher = self.sparse_matcher();
2408        let mut changed_file_states = Vec::new();
2409        let mut deleted_files = HashSet::new();
2410        let mut diff_stream = self
2411            .tree
2412            .diff_stream_for_file_system(new_tree, matcher.as_ref());
2413        while let Some(TreeDiffEntry { path, values }) = diff_stream.next().await {
2414            let after = values?.after;
2415            if after.is_absent() {
2416                deleted_files.insert(path);
2417            } else {
2418                let file_type = match after.into_resolved() {
2419                    Ok(value) => match value.unwrap() {
2420                        TreeValue::File {
2421                            id: _,
2422                            executable,
2423                            copy_id: _,
2424                        } => {
2425                            let get_prev_exec = || self.file_states().get_exec_bit(&path);
2426                            let exec_bit =
2427                                ExecBit::new_from_repo(executable, self.exec_policy, get_prev_exec);
2428                            FileType::Normal { exec_bit }
2429                        }
2430                        TreeValue::Symlink(_id) => FileType::Symlink,
2431                        TreeValue::GitSubmodule(_id) => {
2432                            eprintln!("ignoring git submodule at {path:?}");
2433                            FileType::GitSubmodule
2434                        }
2435                        TreeValue::Tree(_id) => {
2436                            panic!("unexpected tree entry in diff at {path:?}");
2437                        }
2438                    },
2439                    Err(_values) => {
2440                        // TODO: Try to set the executable bit based on the conflict
2441                        FileType::Normal {
2442                            exec_bit: ExecBit(false),
2443                        }
2444                    }
2445                };
2446                let file_state = FileState {
2447                    file_type,
2448                    mtime: MillisSinceEpoch(0),
2449                    size: 0,
2450                    materialized_conflict_data: None,
2451                };
2452                changed_file_states.push((path, file_state));
2453            }
2454        }
2455        self.file_states
2456            .merge_in(changed_file_states, &deleted_files);
2457        self.tree = new_tree.clone();
2458        Ok(())
2459    }
2460
2461    pub async fn recover(&mut self, new_tree: &MergedTree) -> Result<(), ResetError> {
2462        self.file_states.clear();
2463        self.tree = self.store.empty_merged_tree();
2464        self.reset(new_tree).await
2465    }
2466}
2467
2468fn checkout_error_for_stat_error(err: io::Error, path: &Path) -> CheckoutError {
2469    CheckoutError::Other {
2470        message: format!("Failed to stat file {}", path.display()),
2471        err: err.into(),
2472    }
2473}
2474
2475fn checkout_error_for_mtime_out_of_range(err: MtimeOutOfRange, path: &Path) -> CheckoutError {
2476    CheckoutError::Other {
2477        message: format!("Failed to process file metadata {}", path.display()),
2478        err: err.into(),
2479    }
2480}
2481
2482/// Working copy state stored in "checkout" file.
2483#[derive(Clone, Debug)]
2484struct CheckoutState {
2485    operation_id: OperationId,
2486    workspace_name: WorkspaceNameBuf,
2487}
2488
2489impl CheckoutState {
2490    fn load(state_path: &Path) -> Result<Self, WorkingCopyStateError> {
2491        let wrap_err = |err| WorkingCopyStateError {
2492            message: "Failed to read checkout state".to_owned(),
2493            err,
2494        };
2495        let buf = fs::read(state_path.join("checkout")).map_err(|err| wrap_err(err.into()))?;
2496        let proto = crate::protos::local_working_copy::Checkout::decode(&*buf)
2497            .map_err(|err| wrap_err(err.into()))?;
2498        Ok(Self {
2499            operation_id: OperationId::new(proto.operation_id),
2500            workspace_name: if proto.workspace_name.is_empty() {
2501                // For compatibility with old working copies.
2502                // TODO: Delete in mid 2022 or so
2503                WorkspaceName::DEFAULT.to_owned()
2504            } else {
2505                proto.workspace_name.into()
2506            },
2507        })
2508    }
2509
2510    #[instrument(skip_all)]
2511    fn save(&self, state_path: &Path) -> Result<(), WorkingCopyStateError> {
2512        let wrap_err = |err| WorkingCopyStateError {
2513            message: "Failed to write checkout state".to_owned(),
2514            err,
2515        };
2516        let proto = crate::protos::local_working_copy::Checkout {
2517            operation_id: self.operation_id.to_bytes(),
2518            workspace_name: (*self.workspace_name).into(),
2519        };
2520        let mut temp_file =
2521            NamedTempFile::new_in(state_path).map_err(|err| wrap_err(err.into()))?;
2522        temp_file
2523            .as_file_mut()
2524            .write_all(&proto.encode_to_vec())
2525            .map_err(|err| wrap_err(err.into()))?;
2526        // TODO: Retry if persisting fails (it will on Windows if the file happened to
2527        // be open for read).
2528        persist_temp_file(temp_file, state_path.join("checkout"))
2529            .map_err(|err| wrap_err(err.into()))?;
2530        Ok(())
2531    }
2532}
2533
2534pub struct LocalWorkingCopy {
2535    store: Arc<Store>,
2536    working_copy_path: PathBuf,
2537    state_path: PathBuf,
2538    checkout_state: CheckoutState,
2539    tree_state: OnceCell<TreeState>,
2540    tree_state_settings: TreeStateSettings,
2541}
2542
2543impl WorkingCopy for LocalWorkingCopy {
2544    fn name(&self) -> &str {
2545        Self::name()
2546    }
2547
2548    fn workspace_name(&self) -> &WorkspaceName {
2549        &self.checkout_state.workspace_name
2550    }
2551
2552    fn operation_id(&self) -> &OperationId {
2553        &self.checkout_state.operation_id
2554    }
2555
2556    fn tree(&self) -> Result<&MergedTree, WorkingCopyStateError> {
2557        Ok(self.tree_state()?.current_tree())
2558    }
2559
2560    fn sparse_patterns(&self) -> Result<&[RepoPathBuf], WorkingCopyStateError> {
2561        Ok(self.tree_state()?.sparse_patterns())
2562    }
2563
2564    fn start_mutation(&self) -> Result<Box<dyn LockedWorkingCopy>, WorkingCopyStateError> {
2565        let lock_path = self.state_path.join("working_copy.lock");
2566        let lock = FileLock::lock(lock_path).map_err(|err| WorkingCopyStateError {
2567            message: "Failed to lock working copy".to_owned(),
2568            err: err.into(),
2569        })?;
2570
2571        let wc = Self {
2572            store: self.store.clone(),
2573            working_copy_path: self.working_copy_path.clone(),
2574            state_path: self.state_path.clone(),
2575            // Re-read the state after taking the lock
2576            checkout_state: CheckoutState::load(&self.state_path)?,
2577            // Empty so we re-read the state after taking the lock
2578            // TODO: It's expensive to reload the whole tree. We should copy it from `self` if it
2579            // hasn't changed.
2580            tree_state: OnceCell::new(),
2581            tree_state_settings: self.tree_state_settings.clone(),
2582        };
2583        let old_operation_id = wc.operation_id().clone();
2584        let old_tree = wc.tree()?.clone();
2585        Ok(Box::new(LockedLocalWorkingCopy {
2586            wc,
2587            old_operation_id,
2588            old_tree,
2589            tree_state_dirty: false,
2590            new_workspace_name: None,
2591            _lock: lock,
2592        }))
2593    }
2594}
2595
2596impl LocalWorkingCopy {
2597    pub fn name() -> &'static str {
2598        "local"
2599    }
2600
2601    /// Initializes a new working copy at `working_copy_path`. The working
2602    /// copy's state will be stored in the `state_path` directory. The working
2603    /// copy will have the empty tree checked out.
2604    pub fn init(
2605        store: Arc<Store>,
2606        working_copy_path: PathBuf,
2607        state_path: PathBuf,
2608        operation_id: OperationId,
2609        workspace_name: WorkspaceNameBuf,
2610        user_settings: &UserSettings,
2611    ) -> Result<Self, WorkingCopyStateError> {
2612        let checkout_state = CheckoutState {
2613            operation_id,
2614            workspace_name,
2615        };
2616        checkout_state.save(&state_path)?;
2617        let tree_state_settings = TreeStateSettings::try_from_user_settings(user_settings)
2618            .map_err(|err| WorkingCopyStateError {
2619                message: "Failed to read the tree state settings".to_string(),
2620                err: err.into(),
2621            })?;
2622        let tree_state = TreeState::init(
2623            store.clone(),
2624            working_copy_path.clone(),
2625            state_path.clone(),
2626            &tree_state_settings,
2627        )
2628        .map_err(|err| WorkingCopyStateError {
2629            message: "Failed to initialize working copy state".to_string(),
2630            err: err.into(),
2631        })?;
2632        Ok(Self {
2633            store,
2634            working_copy_path,
2635            state_path,
2636            checkout_state,
2637            tree_state: OnceCell::with_value(tree_state),
2638            tree_state_settings,
2639        })
2640    }
2641
2642    pub fn load(
2643        store: Arc<Store>,
2644        working_copy_path: PathBuf,
2645        state_path: PathBuf,
2646        user_settings: &UserSettings,
2647    ) -> Result<Self, WorkingCopyStateError> {
2648        let checkout_state = CheckoutState::load(&state_path)?;
2649        let tree_state_settings = TreeStateSettings::try_from_user_settings(user_settings)
2650            .map_err(|err| WorkingCopyStateError {
2651                message: "Failed to read the tree state settings".to_string(),
2652                err: err.into(),
2653            })?;
2654        Ok(Self {
2655            store,
2656            working_copy_path,
2657            state_path,
2658            checkout_state,
2659            tree_state: OnceCell::new(),
2660            tree_state_settings,
2661        })
2662    }
2663
2664    pub fn state_path(&self) -> &Path {
2665        &self.state_path
2666    }
2667
2668    #[instrument(skip_all)]
2669    fn tree_state(&self) -> Result<&TreeState, WorkingCopyStateError> {
2670        self.tree_state.get_or_try_init(|| {
2671            TreeState::load(
2672                self.store.clone(),
2673                self.working_copy_path.clone(),
2674                self.state_path.clone(),
2675                &self.tree_state_settings,
2676            )
2677            .map_err(|err| WorkingCopyStateError {
2678                message: "Failed to read working copy state".to_string(),
2679                err: err.into(),
2680            })
2681        })
2682    }
2683
2684    fn tree_state_mut(&mut self) -> Result<&mut TreeState, WorkingCopyStateError> {
2685        self.tree_state()?; // ensure loaded
2686        Ok(self.tree_state.get_mut().unwrap())
2687    }
2688
2689    pub fn file_states(&self) -> Result<FileStates<'_>, WorkingCopyStateError> {
2690        Ok(self.tree_state()?.file_states())
2691    }
2692
2693    #[cfg(feature = "watchman")]
2694    pub async fn query_watchman(
2695        &self,
2696        config: &WatchmanConfig,
2697    ) -> Result<(watchman::Clock, Option<Vec<PathBuf>>), WorkingCopyStateError> {
2698        self.tree_state()?
2699            .query_watchman(config)
2700            .await
2701            .map_err(|err| WorkingCopyStateError {
2702                message: "Failed to query watchman".to_string(),
2703                err: err.into(),
2704            })
2705    }
2706
2707    #[cfg(feature = "watchman")]
2708    pub async fn is_watchman_trigger_registered(
2709        &self,
2710        config: &WatchmanConfig,
2711    ) -> Result<bool, WorkingCopyStateError> {
2712        self.tree_state()?
2713            .is_watchman_trigger_registered(config)
2714            .await
2715            .map_err(|err| WorkingCopyStateError {
2716                message: "Failed to query watchman".to_string(),
2717                err: err.into(),
2718            })
2719    }
2720}
2721
2722pub struct LocalWorkingCopyFactory {}
2723
2724impl WorkingCopyFactory for LocalWorkingCopyFactory {
2725    fn init_working_copy(
2726        &self,
2727        store: Arc<Store>,
2728        working_copy_path: PathBuf,
2729        state_path: PathBuf,
2730        operation_id: OperationId,
2731        workspace_name: WorkspaceNameBuf,
2732        settings: &UserSettings,
2733    ) -> Result<Box<dyn WorkingCopy>, WorkingCopyStateError> {
2734        Ok(Box::new(LocalWorkingCopy::init(
2735            store,
2736            working_copy_path,
2737            state_path,
2738            operation_id,
2739            workspace_name,
2740            settings,
2741        )?))
2742    }
2743
2744    fn load_working_copy(
2745        &self,
2746        store: Arc<Store>,
2747        working_copy_path: PathBuf,
2748        state_path: PathBuf,
2749        settings: &UserSettings,
2750    ) -> Result<Box<dyn WorkingCopy>, WorkingCopyStateError> {
2751        Ok(Box::new(LocalWorkingCopy::load(
2752            store,
2753            working_copy_path,
2754            state_path,
2755            settings,
2756        )?))
2757    }
2758}
2759
2760/// A working copy that's locked on disk. The lock is held until you call
2761/// `finish()` or `discard()`.
2762pub struct LockedLocalWorkingCopy {
2763    wc: LocalWorkingCopy,
2764    old_operation_id: OperationId,
2765    old_tree: MergedTree,
2766    tree_state_dirty: bool,
2767    new_workspace_name: Option<WorkspaceNameBuf>,
2768    _lock: FileLock,
2769}
2770
2771#[async_trait]
2772impl LockedWorkingCopy for LockedLocalWorkingCopy {
2773    fn old_operation_id(&self) -> &OperationId {
2774        &self.old_operation_id
2775    }
2776
2777    fn old_tree(&self) -> &MergedTree {
2778        &self.old_tree
2779    }
2780
2781    async fn snapshot(
2782        &mut self,
2783        options: &SnapshotOptions,
2784    ) -> Result<(MergedTree, SnapshotStats), SnapshotError> {
2785        let tree_state = self.wc.tree_state_mut()?;
2786        let (is_dirty, stats) = tree_state.snapshot(options).await?;
2787        self.tree_state_dirty |= is_dirty;
2788        Ok((tree_state.current_tree().clone(), stats))
2789    }
2790
2791    async fn check_out(&mut self, commit: &Commit) -> Result<CheckoutStats, CheckoutError> {
2792        // TODO: Write a "pending_checkout" file with the new TreeId so we can
2793        // continue an interrupted update if we find such a file.
2794        let new_tree = commit.tree();
2795        let tree_state = self.wc.tree_state_mut()?;
2796        if tree_state.tree.tree_ids_and_labels() != new_tree.tree_ids_and_labels() {
2797            let stats = tree_state.check_out(&new_tree)?;
2798            self.tree_state_dirty = true;
2799            Ok(stats)
2800        } else {
2801            Ok(CheckoutStats::default())
2802        }
2803    }
2804
2805    fn rename_workspace(&mut self, new_name: WorkspaceNameBuf) {
2806        self.new_workspace_name = Some(new_name);
2807    }
2808
2809    async fn reset(&mut self, commit: &Commit) -> Result<(), ResetError> {
2810        let new_tree = commit.tree();
2811        self.wc.tree_state_mut()?.reset(&new_tree).await?;
2812        self.tree_state_dirty = true;
2813        Ok(())
2814    }
2815
2816    async fn recover(&mut self, commit: &Commit) -> Result<(), ResetError> {
2817        let new_tree = commit.tree();
2818        self.wc.tree_state_mut()?.recover(&new_tree).await?;
2819        self.tree_state_dirty = true;
2820        Ok(())
2821    }
2822
2823    fn sparse_patterns(&self) -> Result<&[RepoPathBuf], WorkingCopyStateError> {
2824        self.wc.sparse_patterns()
2825    }
2826
2827    async fn set_sparse_patterns(
2828        &mut self,
2829        new_sparse_patterns: Vec<RepoPathBuf>,
2830    ) -> Result<CheckoutStats, CheckoutError> {
2831        // TODO: Write a "pending_checkout" file with new sparse patterns so we can
2832        // continue an interrupted update if we find such a file.
2833        let stats = self
2834            .wc
2835            .tree_state_mut()?
2836            .set_sparse_patterns(new_sparse_patterns)?;
2837        self.tree_state_dirty = true;
2838        Ok(stats)
2839    }
2840
2841    #[instrument(skip_all)]
2842    async fn finish(
2843        mut self: Box<Self>,
2844        operation_id: OperationId,
2845    ) -> Result<Box<dyn WorkingCopy>, WorkingCopyStateError> {
2846        assert!(
2847            self.tree_state_dirty
2848                || self.old_tree.tree_ids_and_labels() == self.wc.tree()?.tree_ids_and_labels()
2849        );
2850        if self.tree_state_dirty {
2851            self.wc
2852                .tree_state_mut()?
2853                .save()
2854                .map_err(|err| WorkingCopyStateError {
2855                    message: "Failed to write working copy state".to_string(),
2856                    err: Box::new(err),
2857                })?;
2858        }
2859        if self.old_operation_id != operation_id || self.new_workspace_name.is_some() {
2860            self.wc.checkout_state.operation_id = operation_id;
2861            if let Some(workspace_name) = self.new_workspace_name {
2862                self.wc.checkout_state.workspace_name = workspace_name;
2863            }
2864            self.wc.checkout_state.save(&self.wc.state_path)?;
2865        }
2866        // TODO: Clear the "pending_checkout" file here.
2867        Ok(Box::new(self.wc))
2868    }
2869}
2870
2871impl LockedLocalWorkingCopy {
2872    pub fn reset_watchman(&mut self) -> Result<(), SnapshotError> {
2873        self.wc.tree_state_mut()?.reset_watchman();
2874        self.tree_state_dirty = true;
2875        Ok(())
2876    }
2877}
2878
2879#[cfg(test)]
2880mod tests {
2881    use std::time::Duration;
2882
2883    use maplit::hashset;
2884
2885    use super::*;
2886
2887    fn repo_path(value: &str) -> &RepoPath {
2888        RepoPath::from_internal_string(value).unwrap()
2889    }
2890
2891    fn repo_path_component(value: &str) -> &RepoPathComponent {
2892        RepoPathComponent::new(value).unwrap()
2893    }
2894
2895    fn new_state(size: u64) -> FileState {
2896        FileState {
2897            file_type: FileType::Normal {
2898                exec_bit: ExecBit(false),
2899            },
2900            mtime: MillisSinceEpoch(0),
2901            size,
2902            materialized_conflict_data: None,
2903        }
2904    }
2905
2906    #[test]
2907    fn test_file_states_merge() {
2908        let new_static_entry = |path: &'static str, size| (repo_path(path), new_state(size));
2909        let new_owned_entry = |path: &str, size| (repo_path(path).to_owned(), new_state(size));
2910        let new_proto_entry = |path: &str, size| {
2911            file_state_entry_to_proto(repo_path(path).to_owned(), &new_state(size))
2912        };
2913        let data = vec![
2914            new_proto_entry("aa", 0),
2915            new_proto_entry("b#", 4), // '#' < '/'
2916            new_proto_entry("b/c", 1),
2917            new_proto_entry("b/d/e", 2),
2918            new_proto_entry("b/e", 3),
2919            new_proto_entry("bc", 5),
2920        ];
2921        let mut file_states = FileStatesMap::from_proto(data, false);
2922
2923        let changed_file_states = vec![
2924            new_owned_entry("aa", 10),    // change
2925            new_owned_entry("b/d/f", 11), // add
2926            new_owned_entry("b/e", 12),   // change
2927            new_owned_entry("c", 13),     // add
2928        ];
2929        let deleted_files = hashset! {
2930            repo_path("b/c").to_owned(),
2931            repo_path("b#").to_owned(),
2932        };
2933        file_states.merge_in(changed_file_states, &deleted_files);
2934        assert_eq!(
2935            file_states.all().iter().collect_vec(),
2936            vec![
2937                new_static_entry("aa", 10),
2938                new_static_entry("b/d/e", 2),
2939                new_static_entry("b/d/f", 11),
2940                new_static_entry("b/e", 12),
2941                new_static_entry("bc", 5),
2942                new_static_entry("c", 13),
2943            ],
2944        );
2945    }
2946
2947    #[test]
2948    fn test_file_states_lookup() {
2949        let new_proto_entry = |path: &str, size| {
2950            file_state_entry_to_proto(repo_path(path).to_owned(), &new_state(size))
2951        };
2952        let data = vec![
2953            new_proto_entry("aa", 0),
2954            new_proto_entry("b/c", 1),
2955            new_proto_entry("b/d/e", 2),
2956            new_proto_entry("b/e", 3),
2957            new_proto_entry("b#", 4), // '#' < '/'
2958            new_proto_entry("bc", 5),
2959        ];
2960        let file_states = FileStates::from_sorted(&data);
2961
2962        assert_eq!(
2963            file_states.prefixed(repo_path("")).paths().collect_vec(),
2964            ["aa", "b/c", "b/d/e", "b/e", "b#", "bc"].map(repo_path)
2965        );
2966        assert!(file_states.prefixed(repo_path("a")).is_empty());
2967        assert_eq!(
2968            file_states.prefixed(repo_path("aa")).paths().collect_vec(),
2969            ["aa"].map(repo_path)
2970        );
2971        assert_eq!(
2972            file_states.prefixed(repo_path("b")).paths().collect_vec(),
2973            ["b/c", "b/d/e", "b/e"].map(repo_path)
2974        );
2975        assert_eq!(
2976            file_states.prefixed(repo_path("b/d")).paths().collect_vec(),
2977            ["b/d/e"].map(repo_path)
2978        );
2979        assert_eq!(
2980            file_states.prefixed(repo_path("b#")).paths().collect_vec(),
2981            ["b#"].map(repo_path)
2982        );
2983        assert_eq!(
2984            file_states.prefixed(repo_path("bc")).paths().collect_vec(),
2985            ["bc"].map(repo_path)
2986        );
2987        assert!(file_states.prefixed(repo_path("z")).is_empty());
2988
2989        assert!(!file_states.contains_path(repo_path("a")));
2990        assert!(file_states.contains_path(repo_path("aa")));
2991        assert!(file_states.contains_path(repo_path("b/d/e")));
2992        assert!(!file_states.contains_path(repo_path("b/d")));
2993        assert!(file_states.contains_path(repo_path("b#")));
2994        assert!(file_states.contains_path(repo_path("bc")));
2995        assert!(!file_states.contains_path(repo_path("z")));
2996
2997        assert_eq!(file_states.get(repo_path("a")), None);
2998        assert_eq!(file_states.get(repo_path("aa")), Some(new_state(0)));
2999        assert_eq!(file_states.get(repo_path("b/d/e")), Some(new_state(2)));
3000        assert_eq!(file_states.get(repo_path("bc")), Some(new_state(5)));
3001        assert_eq!(file_states.get(repo_path("z")), None);
3002    }
3003
3004    #[test]
3005    fn test_file_states_lookup_at() {
3006        let new_proto_entry = |path: &str, size| {
3007            file_state_entry_to_proto(repo_path(path).to_owned(), &new_state(size))
3008        };
3009        let data = vec![
3010            new_proto_entry("b/c", 0),
3011            new_proto_entry("b/d/e", 1),
3012            new_proto_entry("b/d#", 2), // '#' < '/'
3013            new_proto_entry("b/e", 3),
3014            new_proto_entry("b#", 4), // '#' < '/'
3015        ];
3016        let file_states = FileStates::from_sorted(&data);
3017
3018        // At root
3019        assert_eq!(
3020            file_states.get_at(RepoPath::root(), repo_path_component("b")),
3021            None
3022        );
3023        assert_eq!(
3024            file_states.get_at(RepoPath::root(), repo_path_component("b#")),
3025            Some(new_state(4))
3026        );
3027
3028        // At prefixed dir
3029        let prefixed_states = file_states.prefixed_at(RepoPath::root(), repo_path_component("b"));
3030        assert_eq!(
3031            prefixed_states.paths().collect_vec(),
3032            ["b/c", "b/d/e", "b/d#", "b/e"].map(repo_path)
3033        );
3034        assert_eq!(
3035            prefixed_states.get_at(repo_path("b"), repo_path_component("c")),
3036            Some(new_state(0))
3037        );
3038        assert_eq!(
3039            prefixed_states.get_at(repo_path("b"), repo_path_component("d")),
3040            None
3041        );
3042        assert_eq!(
3043            prefixed_states.get_at(repo_path("b"), repo_path_component("d#")),
3044            Some(new_state(2))
3045        );
3046
3047        // At nested prefixed dir
3048        let prefixed_states = prefixed_states.prefixed_at(repo_path("b"), repo_path_component("d"));
3049        assert_eq!(
3050            prefixed_states.paths().collect_vec(),
3051            ["b/d/e"].map(repo_path)
3052        );
3053        assert_eq!(
3054            prefixed_states.get_at(repo_path("b/d"), repo_path_component("e")),
3055            Some(new_state(1))
3056        );
3057        assert_eq!(
3058            prefixed_states.get_at(repo_path("b/d"), repo_path_component("#")),
3059            None
3060        );
3061
3062        // At prefixed file
3063        let prefixed_states = file_states.prefixed_at(RepoPath::root(), repo_path_component("b#"));
3064        assert_eq!(prefixed_states.paths().collect_vec(), ["b#"].map(repo_path));
3065        assert_eq!(
3066            prefixed_states.get_at(repo_path("b#"), repo_path_component("#")),
3067            None
3068        );
3069    }
3070
3071    #[test]
3072    fn test_system_time_to_millis() {
3073        let epoch = SystemTime::UNIX_EPOCH;
3074        assert_eq!(system_time_to_millis(epoch), Some(MillisSinceEpoch(0)));
3075        if let Some(time) = epoch.checked_add(Duration::from_millis(1)) {
3076            assert_eq!(system_time_to_millis(time), Some(MillisSinceEpoch(1)));
3077        }
3078        if let Some(time) = epoch.checked_sub(Duration::from_millis(1)) {
3079            assert_eq!(system_time_to_millis(time), Some(MillisSinceEpoch(-1)));
3080        }
3081        if let Some(time) = epoch.checked_add(Duration::from_millis(i64::MAX as u64)) {
3082            assert_eq!(
3083                system_time_to_millis(time),
3084                Some(MillisSinceEpoch(i64::MAX))
3085            );
3086        }
3087        if let Some(time) = epoch.checked_sub(Duration::from_millis(i64::MAX as u64)) {
3088            assert_eq!(
3089                system_time_to_millis(time),
3090                Some(MillisSinceEpoch(-i64::MAX))
3091            );
3092        }
3093        if let Some(time) = epoch.checked_sub(Duration::from_millis(i64::MAX as u64 + 1)) {
3094            // i64::MIN could be returned, but we don't care such old timestamp
3095            assert_eq!(system_time_to_millis(time), None);
3096        }
3097    }
3098}