1use std::{io::{Seek, SeekFrom, Read, BufReader}, fs::{File, OpenOptions}, path::PathBuf, collections::HashMap, sync::{Arc, Mutex, MutexGuard}};
81use databuffer::DataBuffer;
82use util::CacheBuilder;
83use crate::util::decompress_container_data;
84
85pub mod util;
86
87type IdxFileOpt<'a> = Option<&'a mut CacheIndex>;
88
89pub struct Cache {
104 pub data_file: Arc<Mutex<BufReader<File>>>,
105 pub indices: HashMap<u8, CacheIndex>
106}
107
108impl Cache {
109 pub fn with(builder: CacheBuilder) -> Option<Self> {
110 let mut path_buff = PathBuf::new();
111 path_buff.push(&builder.cache_path);
112 path_buff.push(format!("{}.idx255", &builder.base_file_name));
113
114 let mut info_file = match OpenOptions::new()
115 .read(true)
116 .open(&path_buff) {
117 Ok(n) => n,
118 Err(e) => {
119 println!("Failed opening info/reference file: {:?}, Error: {}", &path_buff, e);
120 return None;
121 }
122 };
123
124 path_buff.clear();
125 path_buff.push(&builder.cache_path);
126 path_buff.push(format!("{}.dat2", &builder.base_file_name));
127
128 let data_file = match OpenOptions::new()
129 .read(true)
130 .open(&path_buff) {
131 Ok(n) => Arc::from(Mutex::from(BufReader::new(n))),
132 Err(e) => {
133 println!("Failed opening data file: {:?}, Error: {}", &path_buff, e);
134 return None;
135 }
136 };
137
138 let num_files = info_file.metadata().unwrap().len() / 6;
139 println!("{}", num_files);
140 let _ = info_file.seek(SeekFrom::Start(0));
141
142 let mut info = CacheIndex::from(255, 500000, BufReader::new(info_file), IdxContainerInfo::new());
143 let mut indices = HashMap::<u8, CacheIndex>::new();
144
145 for i in 0..num_files {
146 path_buff.clear();
147 path_buff.push(&builder.cache_path);
148 path_buff.push(format!("{}.idx{}", &builder.base_file_name, &i));
149
150 let file = match OpenOptions::new().read(true).open(&path_buff) {
151 Ok(n) => BufReader::new(n),
152 Err(e) => {
153 println!("Error reading idx {}: {}", i, e);
154 continue;
155 }
156 };
157
158 let container_data = match CacheIndex::container_data(&mut info, data_file.lock().unwrap(), i as u32) {
159 Some(n) => n,
160 None => {
161 println!("Unable to get container data.");
162 Vec::new()
163 }
164 };
165
166 let container_info = IdxContainerInfo::from(container_data, builder.calculate_crc32);
167
168 let index = CacheIndex::from(i as u8, 1000000, file, container_info);
169 indices.insert(i as u8, index);
170 }
171
172 indices.insert(255, info);
173
174 Some(Self {
175 data_file,
176 indices
177 })
178 }
179
180 pub fn index(&mut self, idx: usize) -> IdxFileOpt {
181 return match self.indices.get_mut(&(idx as u8)) {
182 Some(n) => Some(n),
183 None => {
184 println!("No such index exists: {}", idx);
185 None
186 }
187 }
188 }
189
190 pub fn clear_raw_data(&mut self){
191 for (_,index) in self.indices.iter_mut() {
192 for (_,c) in index.container_info.containers.iter_mut() {
193 c.clear_filedata();
194 }
195 }
196 }
197}
198
199pub struct CacheIndex {
200 file_id: u8,
201 file: BufReader<File>,
202 max_container_size: u32,
203 pub container_info: IdxContainerInfo,
204 last_archive_id: u32
205}
206
207impl CacheIndex {
208 fn from(file_id: u8, max_size: u32, file: BufReader<File>, container_info: IdxContainerInfo) -> Self {
209 Self {
210 file_id,
211 max_container_size: max_size,
212 file,
213 container_info,
214 last_archive_id: 0
215 }
216 }
217
218 fn get_container_by_name_hash(&mut self, hash: u32) -> u32 {
219 match self.container_info.containers.iter().filter(|(_,c)| c.name_hash == hash).last() {
220 Some((c,_)) => *c,
221 None => hash
222 }
223 }
224
225 pub fn container_data(&mut self, mut data_file: MutexGuard<BufReader<File>>, archive_id: u32) -> Option<Vec<u8>> {
226 let mut file_buff: [u8; 520] = [0; 520];
227 let mut data: [u8;6] = [0; 6];
228
229 let _ = self.file.seek(SeekFrom::Start(6 * archive_id as u64));
230
231 self.last_archive_id = archive_id;
232
233 let _ = match self.file.read(&mut data) {
234 Ok(_) => {}
235 Err(e) => {
236 println!("Error reading from info file: {}", e);
237 }
238 };
239
240 let container_size = (data[2] as u32) + (((data[0] as u32) << 16) + (((data[1] as u32) << 8) & 0xff00));
241 let mut sector = ((data[3] as i32) << 16) - (-((0xff & data[4] as i32) << 8) - (data[5] as i32 & 0xff));
242
243 if container_size > self.max_container_size {
244 println!("Container Size greater than Max Container Size! {} > {}", container_size, self.max_container_size);
245 None
246 } else if sector <= 0 {
247 println!("Sector <= 0! {}", sector);
248 None
249 } else {
250 let mut container_data = Vec::<u8>::new();
251
252 let mut data_read_count = 0;
253 let mut part: u32 = 0;
254
255 let initial_dfile_pos = data_file.seek(SeekFrom::Start(520 * (sector as u64))).unwrap() as i64;
256
257 while container_size > data_read_count {
258 if sector == 0 {
259 println!("Sector == 0!");
260 return None;
261 }
262
263 let seek_target: i64 = 520 * (sector as i64);
264 let current_pos = initial_dfile_pos + (data_read_count as i64) + (part as i64 * 8);
265
266 if current_pos != seek_target {
267 let _ = data_file.seek(SeekFrom::Start(seek_target as u64));
268 }
269
270 let mut data_to_read = container_size - data_read_count;
271
272 if data_to_read > 512 {
273 data_to_read = 512;
274 }
275
276 let bytes_read = data_file.read(&mut file_buff).unwrap();
277
278 if data_to_read + 8 > bytes_read as u32 {
279 let _ = data_file.seek(SeekFrom::Start(520 * (sector as u64)));
280
281 let _ = data_file.read(&mut file_buff);
282 }
283
284 let current_container_id = (0xff & file_buff[1] as u32) + ((0xff & file_buff[0] as u32) << 8);
285 let current_part = ((0xff & file_buff[2] as u32) << 8) + (0xff & file_buff[3] as u32);
286 let next_sector = (0xff & file_buff[6] as u32) + ((0xff & file_buff[5] as u32) << 8) + ((0xff & file_buff[4] as u32) << 16);
287 let current_idx_file_id = 0xff & file_buff[7] as u32;
288
289 if archive_id != (current_container_id as u32) || current_part != part || self.file_id != (current_idx_file_id as u8) {
290 println!("Multipart failure! {} != {} || {} != {} || {} != {}", archive_id, current_container_id, current_part, part, self.file_id, current_idx_file_id);
291 return None;
292 }
293
294 let upper_bound = 8 + data_to_read as usize;
295
296 container_data.extend_from_slice(&file_buff[8..upper_bound]);
297 data_read_count += data_to_read;
298
299 part += 1;
300 sector = next_sector as i32;
301 }
302
303 Some(container_data)
304 }
305 }
306
307 pub fn get_total_files(&mut self) -> u32 {
308 self.container_info.container_indices.sort_unstable();
309
310 let last_archive_id = *self.container_info.container_indices.last().unwrap();
311 let last_archive = self.container_info.containers.get(&last_archive_id).unwrap();
312
313 let last_archive_file_amount = last_archive.file_indices.len();
314 let other_file_amounts = (self.container_info.container_indices.len() - 1) * 256;
315
316 (last_archive_file_amount + other_file_amounts) as u32
317 }
318}
319
320#[allow(dead_code)]
321#[derive(Default)]
322pub struct IdxContainerInfo {
323 pub protocol: u8,
324 pub revision: u32,
325 pub crc: u32,
326 container_indices: Vec<u32>,
327 pub containers: HashMap<u32, IdxContainer>,
328 named_files: bool,
329 whirlpool: bool
330}
331
332impl IdxContainerInfo {
333 pub fn new() -> Self {
334 Self::default()
335 }
336
337 pub fn from(packed_data: Vec<u8>, gencrc: bool) -> Self {
338 let mut crc = 0;
339
340 if gencrc {
341 let mut crc_hasher = crc32fast::Hasher::new();
342 crc_hasher.update(&packed_data);
343 crc = crc_hasher.finalize();
344 }
345
346
347 let mut data = match decompress_container_data(packed_data) {
348 Some(n) => DataBuffer::with_vec(n),
349 None => {
350 println!("Unable to decompress container data.");
351 return Self::new();
352 }
353 };
354
355 let protocol = data.read_u8();
356
357 if protocol != 5 && protocol != 6 {
358 println!("Invalid protocol while parsing container info: {}", protocol);
359 Self::new()
360 } else {
361 let revision = match protocol {
362 5 => 0,
363 _ => data.read_u32()
364 };
365
366 let settings_hash = data.read_u8();
367 let files_named = (0x1 & settings_hash) != 0;
368 let whirlpool = (0x2 & settings_hash) != 0;
369
370 let mut containers = HashMap::<u32, IdxContainer>::new();
371 let mut container_indices = Vec::<u32>::new();
372 let num_indices = data.read_u16();
373
374 for i in 0..num_indices {
375 container_indices.push((data.read_u16() as u32) + match i {
376 0 => 0,
377 _ => *container_indices.last().unwrap()
378 });
379
380 containers.insert(*container_indices.last().unwrap(), IdxContainer::new());
381 }
382
383 if files_named {
384 for c in container_indices.iter().take(num_indices as usize) {
385 containers.get_mut(c).unwrap().name_hash = data.read_u32();
386 }
387 }
388
389 let mut file_hashes: HashMap<u32, [u8;64]> = HashMap::new();
390
391 if whirlpool {
392 for c in container_indices.iter().take(num_indices as usize) {
393 let mut buf: [u8; 64] = [0; 64];
394 let _ = data.read(&mut buf);
395 file_hashes.insert(*c, buf);
396 }
397 }
398
399 for c in container_indices.iter().take(num_indices as usize) {
400 let container = containers.get_mut(c).unwrap();
401 container.crc = data.read_i32();
402 }
403
404 for c in container_indices.iter().take(num_indices as usize) {
405 let container = containers.get_mut(c).unwrap();
406 container.version = data.read_i32();
407 }
408
409 let mut container_index_counts = HashMap::<u32, u16>::new();
410
411 for c in container_indices.iter().take(num_indices as usize) {
412 container_index_counts.insert(*c, data.read_u16());
413 }
414
415 for c in container_indices.iter().take(num_indices as usize) {
416 let container = containers.get_mut(c).unwrap();
417
418 for f in 0..(*container_index_counts.get(c).unwrap() as usize){
419 container.file_indices.push((data.read_u16() as u32) + match f {
420 0 => 0,
421 _ => container.file_indices[f - 1]
422 });
423
424 container.file_containers.insert(container.file_indices[f], IdxFileContainer::new());
425 }
426 }
427
428 if whirlpool {
429 for (container_index, container_id) in container_indices.iter().enumerate() {
430 for file_index in 0..containers.get(&(container_index as u32)).unwrap().file_containers.len() {
431 let file_id = containers.get(&container_id).unwrap().file_indices[file_index];
432
433 containers.get_mut(&container_id).unwrap()
434 .file_containers.get_mut(&file_id).unwrap()
435 .version = file_hashes.get(&container_id).unwrap()[file_id as usize];
436 }
437 }
438 }
439
440 if files_named {
441 for c in container_indices.iter().take(num_indices as usize) {
442 let container = containers.get_mut(c).unwrap();
443
444 for f in 0..(container.file_indices.len()) {
445 let file = container.file_containers.get_mut(&container.file_indices[f]).unwrap();
446 file.name_hash = data.read_u32();
447 }
448 }
449 }
450
451
452 Self {
453 crc,
454 protocol,
455 revision,
456 container_indices,
457 containers,
458 named_files: files_named,
459 whirlpool
460 }
461 }
462 }
463}
464
465#[derive(Default)]
466pub struct IdxContainer {
467 pub version: i32,
468 name_hash: u32,
469 pub crc: i32,
470 file_indices: Vec<u32>,
471 file_containers: HashMap<u32, IdxFileContainer>
472}
473
474impl IdxContainer {
475 pub fn new() -> Self {
476 Self::default()
477 }
478
479 pub fn clear_filedata(&mut self) {
480 for (_, f) in self.file_containers.iter_mut() {
481 f.data = Vec::new()
482 }
483 }
484}
485
486#[allow(dead_code)]
487#[derive(Default)]
488pub struct IdxFileContainer {
489 version: u8,
490 name_hash: u32,
491 crc: i32,
492 data: Vec<u8>
493}
494
495impl IdxFileContainer {
496 pub fn new() -> Self {
497 Self::default()
498 }
499}