1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
use bitflags::bitflags;

bitflags! {
    pub struct Mode: u32 {
        /// directory (only used for sparse checkouts), equivalent to a tree
        const DIR = 0o040000;
        /// regular file
        const FILE = 0o100644;
        /// regular file, executable
        const FILE_EXECUTABLE = 0o100755;
        /// Symbolic link
        const SYMLINK = 0o120000;
        /// A git commit for submodules
        const COMMIT = 0o160000;
    }
}

pub(crate) mod mode {
    impl super::Mode {
        pub fn is_sparse(&self) -> bool {
            *self == Self::DIR
        }
    }
}

pub(crate) mod at_rest {
    use bitflags::bitflags;

    bitflags! {
        /// Flags how they are serialized to a storage location
        pub struct Flags: u16 {
            /// A portion of a the flags that encodes the length of the path that follows.
            const PATH_LEN = 0x0fff;
            const STAGE_MASK = 0x3000;
            /// If set, there is more extended flags past this one
            const EXTENDED = 0x4000;
            /// If set, the entry be assumed to match with the version on the working tree, as a way to avoid `lstat()`  checks.
            const ASSUME_VALID = 0x8000;
        }
    }

    bitflags! {
        /// Extended flags - add flags for serialization here and offset them down to u16.
        pub struct FlagsExtended: u16 {
            const INTENT_TO_ADD = 1 << (29 - 16);
            const SKIP_WORKTREE = 1 << (30 - 16);
        }
    }

    impl FlagsExtended {
        pub fn to_flags(self) -> Option<super::Flags> {
            super::Flags::from_bits((self.bits as u32) << 16)
        }
    }

    impl Flags {
        pub fn to_memory(self) -> super::Flags {
            super::Flags::from_bits((self & (Flags::PATH_LEN | Flags::STAGE_MASK | Flags::ASSUME_VALID)).bits as u32)
                .expect("PATHLEN is part of memory representation")
        }
    }

    #[cfg(test)]
    mod tests {
        use super::*;

        #[test]
        fn flags_from_bits_with_conflict() {
            let input = 0b1110_0010_1000_1011;
            assert_eq!(Flags::from_bits(input).unwrap().bits, input);
        }
    }
}

bitflags! {
    /// In-memory flags
    pub struct Flags: u32 {
        const STAGE_MASK = 0x3000;
        // TODO: could we use the pathlen ourselves to save 8 bytes? And how to handle longer paths than that? 0 as sentinel maybe?
        const PATH_LEN = 0x0fff;
        /// If set, the entry be assumed to match with the version on the working tree, as a way to avoid `lstat()`  checks.
        const ASSUME_VALID = 1 << 15;
        const UPDATE = 1 << 16;
        const REMOVE = 1 << 17;
        const UPTODATE = 1 << 18;
        const ADDED = 1 << 19;

        const HASHED = 1 << 20;
        const FSMONITOR_VALID = 1 << 21;
        /// Remove in work directory
        const WORKTREE_REMOVE = 1 << 22;
        const CONFLICTED = 1 << 23;

        const UNPACKED = 1 << 24;
        const NEW_SKIP_WORKTREE = 1 << 25;

        /// temporarily mark paths matched by a path spec
        const PATHSPEC_MATCHED = 1 << 26;

        const UPDATE_IN_BASE = 1 << 27;
        const STRIP_NAME = 1 << 28;

        const INTENT_TO_ADD = 1 << 29; // stored at rest, see at_rest::FlagsExtended
        const SKIP_WORKTREE = 1 << 30; // stored at rest
    }
}

impl Flags {
    pub fn stage(&self) -> u32 {
        (*self & Flags::STAGE_MASK).bits >> 12
    }
}

#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Time {
    /// The amount of seconds elapsed since EPOCH
    pub secs: u32,
    /// The amount of nanoseconds elapsed in the current second, ranging from 0 to 999.999.999 .
    pub nsecs: u32,
}

#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Stat {
    pub mtime: Time,
    pub ctime: Time,
    pub dev: u32,
    pub ino: u32,
    pub uid: u32,
    pub gid: u32,
    /// The size of bytes on disk. Capped to u32 so files bigger than that will need thorough checking (and hopefully never make it)
    pub size: u32,
}

mod access {
    use bstr::{BStr, ByteSlice};

    use crate::{Entry, State};

    impl Entry {
        pub fn path<'a>(&self, state: &'a State) -> &'a BStr {
            (&state.path_backing[self.path.clone()]).as_bstr()
        }

        pub fn path_in<'backing>(&self, backing: &'backing crate::PathStorageRef) -> &'backing BStr {
            (backing[self.path.clone()]).as_bstr()
        }

        pub fn stage(&self) -> u32 {
            self.flags.stage()
        }
    }
}

mod _impls {
    use std::cmp::Ordering;

    use crate::{Entry, State};

    impl Entry {
        pub fn cmp(&self, other: &Self, state: &State) -> Ordering {
            let lhs = self.path(state);
            let rhs = other.path(state);
            let common_len = lhs.len().min(rhs.len());
            lhs[..common_len]
                .cmp(&rhs[..common_len])
                .then_with(|| lhs.len().cmp(&rhs.len()))
                .then_with(|| self.stage().cmp(&other.stage()))
        }
    }
}