1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
use std::{cell::RefCell, collections::HashMap, fmt, fs, io::Read, mem, path::PathBuf, rc::Rc};

use zerocopy::{FromBytes, FromZeroes};

use crate::{
    cache_address::CacheAddr,
    error::{self, CCPResult},
    time::WindowsEpochMicroseconds,
};
use static_assertions as sa;

const BLOCK_MAGIC: u32 = 0xc104cac3;
const BLOCK_HEADER_SIZE: usize = 8192;
const MAX_BLOCKS: usize = (BLOCK_HEADER_SIZE - 80) * 8;
const INLINE_KEY_SIZE: usize = 160;

#[derive(Debug, FromZeroes, FromBytes)]
#[repr(C)]
struct AllocBitmap {
    data: [u32; MAX_BLOCKS / 32],
}

#[derive(Debug, FromZeroes, FromBytes, Clone)]
#[repr(C, packed(4))]
pub struct RankingsNode {
    pub last_used: WindowsEpochMicroseconds,
    pub last_modified: WindowsEpochMicroseconds,
    pub next: CacheAddr,
    pub prev: CacheAddr,
    pub contents: CacheAddr,
    pub dirty: i32,
    pub self_hash: u32,
}

sa::const_assert_eq!(mem::size_of::<RankingsNode>(), 36);

// See: https://chromium.googlesource.com/chromium/src/net/+/ddbc6c5954c4bee29902082eb9052405e83abc02/disk_cache/disk_format_base.h
#[derive(Debug, FromZeroes, FromBytes)]
#[repr(C)]
struct BlockFileHeader {
    pub magic: u32,
    pub version: u32,
    pub this_file: i16,
    pub next_file: i16,
    pub entry_size: i32,
    pub num_entries: i32,
    pub max_entries: i32,
    pub empty: [i32; 4],
    pub hints: [i32; 4],
    pub updating: i32,
    pub user: [i32; 5],
    pub allocation_map: AllocBitmap,
}

#[derive(FromZeroes, FromBytes, Clone)]
pub struct InlineCacheKey {
    key: [u8; INLINE_KEY_SIZE],
}

impl fmt::Debug for InlineCacheKey {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(f, "{}", std::str::from_utf8(&self.key).unwrap())
    }
}

impl fmt::Display for InlineCacheKey {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        let key = std::str::from_utf8(&self.key)
            .map_err(|_| fmt::Error)?
            .trim_end_matches(char::from(0));
        write!(f, "{}", key)?;
        Ok(())
    }
}

// See: https://chromium.googlesource.com/chromium/src/net/+/ddbc6c5954c4bee29902082eb9052405e83abc02/disk_cache/disk_format.h#101
#[derive(Debug, FromZeroes, FromBytes, Clone)]
#[repr(C)]
pub struct BlockFileCacheEntry {
    pub hash: u32,
    pub next: CacheAddr,
    pub rankings_node: CacheAddr,
    pub reuse_count: i32,
    pub refetch_count: i32,
    pub state: i32,
    pub creation_time: WindowsEpochMicroseconds,
    pub key_len: i32,
    pub long_key: CacheAddr,
    pub data_size: [i32; 4],
    pub data_addr: [CacheAddr; 4],
    pub flags: u32,
    pad: [u32; 4],
    pub self_hash: u32,
    pub key: InlineCacheKey,
}

sa::const_assert_eq!(mem::size_of::<BlockFileCacheEntry>(), 256);

/// An iterator over the logical entries in a map of block files. Data files are lazily loaded and
/// cached. An entry in the chrome cache is a node in a linked list of entries in the block files.
/// The index file is a hash table that maps keys to the first entry in the linked list.
///
/// The next node in a given linked list is not guaranteed to be in the same block file, so each
/// entry needs needs a reference to all of the data files.
///
/// By storing the reference to the data files, we can lazily evaluate the actual entries without
/// copying the underlying buffer. The iterator yields a parser with a shared reference to the
/// underlying data required for transmutation.
///
/// `LazyBlockFileCacheEntryIterator`` is to be instantiated with the cache address of the first
/// entry and yields any subsequent entries in the linked list.
pub struct LazyBlockFileCacheEntryIterator {
    current: Option<CacheAddr>,
    data_files: Rc<RefCell<DataFiles>>,
}

impl LazyBlockFileCacheEntryIterator {
    pub fn new(
        data_files: Rc<RefCell<DataFiles>>,
        start: CacheAddr,
    ) -> LazyBlockFileCacheEntryIterator {
        LazyBlockFileCacheEntryIterator {
            current: Some(start),
            data_files,
        }
    }
}

/// A map of data files, lazily loaded and cached. Provides a method to get a cache entry from a
/// cache address, selecting the approapriate data file by the file number in the cache address.
pub struct DataFiles {
    data_files: HashMap<u32, LazyBlockFile>,
    path: PathBuf,
}

impl DataFiles {
    pub fn new(data_files: HashMap<u32, LazyBlockFile>, path: PathBuf) -> DataFiles {
        DataFiles { data_files, path }
    }

    fn get(&mut self, file_number: u32) -> &LazyBlockFile {
        self.data_files.entry(file_number).or_insert_with(|| {
            let file_path = self.path.join(format!("data_{}", file_number));

            let mut file = fs::File::open(file_path).unwrap();
            let mut buf: Vec<u8> = Vec::new();
            file.read_to_end(&mut buf).unwrap();
            LazyBlockFile::new(Rc::new(buf))
        })
    }

    pub fn get_entry(&mut self, addr: &CacheAddr) -> CCPResult<BufferSlice> {
        let data_file = self.get(addr.file_number());
        data_file.entry(addr)
    }
}

impl Iterator for LazyBlockFileCacheEntryIterator {
    type Item = LazyBlockFileCacheEntry;

    fn next(&mut self) -> Option<Self::Item> {
        let current = self.current.take()?;

        let mut data_files = (*self.data_files).borrow_mut();

        let current = data_files.get_entry(&current).ok()?;
        let current = LazyBlockFileCacheEntry::new(current, Rc::clone(&self.data_files));

        if let Ok(current) = current.get() {
            let next = current.next;
            if next.is_initialized() {
                self.current = Some(next);
            }
        }

        Some(current)
    }
}

pub struct LazyRankingsNode {
    buffer: BufferSlice,
}

/// A slice to a shared buffer. Enables us to pass a reference to the buffer to all of the
/// transmuters.
pub struct BufferSlice {
    buffer: Rc<Vec<u8>>,
    start: usize,
    size: usize,
}

impl BufferSlice {
    pub fn new(buffer: Rc<Vec<u8>>, start: usize, size: usize) -> BufferSlice {
        BufferSlice {
            buffer,
            start,
            size,
        }
    }

    pub fn get(&self) -> &[u8] {
        &self.buffer[self.start..self.start + self.size]
    }
}

impl LazyRankingsNode {
    pub fn get(&self) -> CCPResult<&RankingsNode> {
        RankingsNode::ref_from(self.buffer.get()).ok_or(error::CCPError::DataMisalignment(format!(
            "rankings node at {}",
            self.buffer.start
        )))
    }
}

pub struct LazyBlockFileCacheEntry {
    buffer: BufferSlice,
    data_files: Rc<RefCell<DataFiles>>,
}

impl LazyBlockFileCacheEntry {
    pub fn new(
        buffer: BufferSlice,
        block_files: Rc<RefCell<DataFiles>>,
    ) -> LazyBlockFileCacheEntry {
        LazyBlockFileCacheEntry {
            buffer,
            data_files: block_files,
        }
    }

    /// Parse the entry from the buffer and return a reference to it.
    pub fn get(&self) -> CCPResult<&BlockFileCacheEntry> {
        BlockFileCacheEntry::ref_from(self.buffer.get()).ok_or(error::CCPError::DataMisalignment(
            format!("block file cache entry at {}", self.buffer.start),
        ))
    }

    pub fn get_rankings_node(&mut self) -> CCPResult<LazyRankingsNode> {
        let cache_entry = self.get()?;

        if !cache_entry.rankings_node.is_initialized() {
            return Err(error::CCPError::InvalidData(
                "rankings node not initialized".to_string(),
            ));
        }

        let mut data_files = self.data_files.borrow_mut();
        let ranking_entry = data_files.get_entry(&cache_entry.rankings_node)?;

        Ok(LazyRankingsNode {
            buffer: ranking_entry,
        })
    }
}

pub struct LazyBlockFile {
    buffer: Rc<Vec<u8>>,
}

/// Represents a block file in the chrome cache. It has a header, providing some metadata about the
/// file, followed by a series of contiguous blocks of a fixed size, defined by a field within the
/// header.
impl LazyBlockFile {
    pub fn new(buffer: Rc<Vec<u8>>) -> LazyBlockFile {
        LazyBlockFile { buffer }
    }

    fn header(&self) -> CCPResult<&BlockFileHeader> {
        let header = BlockFileHeader::ref_from(&self.buffer[0..mem::size_of::<BlockFileHeader>()])
            .ok_or(error::CCPError::DataMisalignment(
                "block file header".to_string(),
            ))?;

        if header.magic != BLOCK_MAGIC {
            return Err(error::CCPError::InvalidData(format!(
                "expected block magic {:x}, got {:x}",
                BLOCK_MAGIC, header.magic
            )));
        }
        Ok(header)
    }

    /// Returns a lazily evaluated cache entry at the given address.
    pub fn entry(&self, addr: &CacheAddr) -> CCPResult<BufferSlice> {
        let header = self.header()?;
        Ok(BufferSlice::new(
            Rc::clone(&self.buffer),
            BLOCK_HEADER_SIZE + addr.start_block() as usize * header.entry_size as usize,
            header.entry_size as usize,
        ))
    }
}