1use alloc::{string::String, vec, vec::Vec};
2use core::cmp::Ordering;
3
4use flate2::Compress;
5use hashbrown::HashMap;
6use macros::generator;
7
8use crate::{
9 base::{
10 pkg_path_hash, PkgState, RawFlags, ReadSeekWriteRequest, Response, SeekFrom, BUFFER_SIZE,
11 ENTRY_SIZE, HEADER_SIZE, MAGIC,
12 },
13 EntryCompression, Flags,
14};
15
16use super::{
17 CreateError, Entry, InsertError, RawReadWriteHandle, ReadSeekWriteTruncateRequest, RemoveError,
18 RenameError, RepackError, ReplaceError,
19};
20
21const PREALLOCATED_PATH_LEN: u64 = 30;
22const PREALLOCATED_ENTRY_COUNT: u64 = 64;
23
24impl PkgState {
25 #[generator(static, yield ReadSeekWriteRequest -> Response)]
26 pub fn create() -> Result<PkgState, CreateError> {
27 request!(rewind);
28
29 request!(write all MAGIC);
30 request!(write u16 be HEADER_SIZE as u16);
31 request!(write u16 be ENTRY_SIZE as u16);
32
33 let initial_entry_count = PREALLOCATED_ENTRY_COUNT;
34 let initial_path_region_size = initial_entry_count * PREALLOCATED_PATH_LEN;
35 request!(write u32 be initial_entry_count as u32);
36 request!(write u32 be initial_path_region_size as u32);
37
38 request!(write repeated 0, initial_path_region_size + initial_entry_count * ENTRY_SIZE);
39
40 Ok(PkgState {
41 path_region_size: initial_path_region_size as u32,
42 path_region_empty_offset: 0,
43 entries: vec![None; initial_entry_count as usize],
44 path_to_entry_index_map: HashMap::default(),
45 })
46 }
47
48 #[generator(static, yield ReadSeekWriteRequest -> Response)]
49 pub fn push_back_data_region(&mut self, offset: u64) {
50 log::trace!(target: "silpkg", "Moving data region to {offset}");
51 let entries_to_move = self
52 .entries
53 .iter()
54 .enumerate()
55 .filter_map(|(i, opt)| opt.as_ref().map(|entry| (i, entry)))
56 .filter_map(|(i, entry)| {
57 if (entry.data_offset as u64) < offset {
58 Some(i)
59 } else {
60 None
61 }
62 })
63 .collect::<Vec<_>>();
64
65 log::trace!("Moving {} entries", entries_to_move.len());
66 for i in entries_to_move {
67 let mut entry = self.entries[i].take().unwrap();
68 let new_offset = request!(seek SeekFrom::End(0));
69 let old_offset = core::mem::replace(&mut entry.data_offset, new_offset as u32);
71
72 request!(copy old_offset.into(), entry.data_size.into(), new_offset);
73 request!(seek SeekFrom::Start(
74 PkgState::entry_list_offset() + i as u64 * ENTRY_SIZE,
75 ));
76 entry.write().await;
77
78 self.entries[i] = Some(entry);
79 }
80 }
81
82 #[generator(static, yield ReadSeekWriteRequest -> Response)]
83 pub fn push_back_and_resize_path_region(&mut self, offset: u64, new_size: u64) {
84 log::trace!(target: "silpkg", "Moving path region to {} with a new size of {}", offset, new_size);
85 self.push_back_data_region(offset + new_size).await;
86
87 request!(copy self.path_region_offset(), self.path_region_size as u64, offset);
88
89 self.path_region_size = new_size as u32;
90 request!(seek SeekFrom::Start(MAGIC.len() as u64 + 8));
91 request!(write u32 be self.path_region_size);
92 }
93
94 #[generator(static, yield ReadSeekWriteRequest -> Response)]
95 pub fn reserve_path_space(&mut self, amount: u32) {
96 log::trace!(target: "silpkg", "Resizing path region");
97 let new_path_region_size = self.path_region_size + amount;
98 let new_path_region_start = self.path_region_offset();
99 let new_path_region_end = new_path_region_start as u32 + new_path_region_size;
100
101 self.push_back_data_region(new_path_region_end as u64).await;
102
103 request!(seek SeekFrom::Start(
104 new_path_region_start + self.path_region_empty_offset as u64,
105 ));
106
107 request!(write repeated 0, (new_path_region_size - self.path_region_empty_offset).into());
108
109 self.path_region_size = new_path_region_size;
110
111 request!(seek SeekFrom::Start(MAGIC.len() as u64 + 8));
112 request!(write u32 be self.path_region_size);
113 }
114
115 #[generator(static, yield ReadSeekWriteRequest -> Response)]
116 pub fn reserve_entries(&mut self, amount: u64) {
117 log::trace!(target: "silpkg", "Resizing entry list");
118 let required_extra_entry_space = (amount * ENTRY_SIZE) as u32;
119 let required_extra_path_space = (amount * PREALLOCATED_PATH_LEN) as u32;
120
121 let entry_list_grow_start =
122 PkgState::entry_list_offset() + self.entries.len() as u64 * ENTRY_SIZE;
123
124 let new_path_region_offset = entry_list_grow_start as u32 + required_extra_entry_space;
125
126 self.push_back_and_resize_path_region(
127 new_path_region_offset as u64,
128 self.path_region_size as u64 + required_extra_path_space as u64,
129 )
130 .await;
131
132 request!(seek SeekFrom::Start(entry_list_grow_start));
133 request!(write repeated 0, required_extra_entry_space.into());
134
135 self.entries.reserve_exact(amount as usize);
136 for _ in 0..amount {
137 self.entries.push(None);
138 }
139
140 request!(seek SeekFrom::Start(MAGIC.len() as u64 + 4));
141 request!(write u32 be self.entries.len() as u32);
142 }
143
144 #[generator(static, yield ReadSeekWriteRequest -> Response)]
145 pub fn insert_path_into_path_region(&mut self, path: &str) -> u32 {
146 log::trace!(target: "silpkg",
147 "Inserting path {path} at {}/{}",
148 self.path_region_empty_offset, self.path_region_size
149 );
150 if self.path_region_empty_offset + path.len() as u32 + 1 >= self.path_region_size {
151 self.reserve_path_space(path.len() as u32 + 1 + PREALLOCATED_PATH_LEN as u32 * 32)
152 .await;
153 }
154 let offset = self.path_region_empty_offset;
155
156 request!(seek SeekFrom::Start(
157 self.path_region_offset() + self.path_region_empty_offset as u64,
158 ));
159
160 request!(write all path);
161 request!(write u8 0);
162
163 self.path_region_empty_offset += path.len() as u32 + 1;
164
165 offset
166 }
167
168 #[generator(static, yield ReadSeekWriteRequest -> Response)]
169 pub fn remove(&mut self, path: &str) -> Result<(), RemoveError> {
170 if let Some(entry_idx) = self.path_to_entry_index_map.remove(path) {
171 self.entries[entry_idx] = None;
172
173 request!(seek SeekFrom::Start(
174 Self::entry_list_offset() + entry_idx as u64 * ENTRY_SIZE,
175 ));
176 request!(write all [0x00; ENTRY_SIZE as usize]);
177
178 Ok(())
183 } else {
184 Err(RemoveError::NotFound)
185 }
186 }
187
188 #[generator(static, yield ReadSeekWriteRequest -> Response)]
189 pub fn rename(&mut self, src: &str, dst: String) -> Result<(), RenameError> {
190 if !self.path_to_entry_index_map.contains_key(src) {
191 return Err(RenameError::NotFound);
192 }
193
194 if self.path_to_entry_index_map.contains_key(&dst) {
195 return Err(RenameError::AlreadyExists);
196 }
197
198 let entry_idx = self.path_to_entry_index_map.remove(src).unwrap();
199 let mut entry = self.entries[entry_idx].as_mut().unwrap();
200
201 debug_assert_eq!(src, entry.path);
202 entry.path = dst.clone();
203
204 if entry.relative_path_offset + src.len() as u32 == self.path_region_empty_offset {
207 let relative_path_offset = entry.relative_path_offset;
208 self.reserve_path_space((dst.len() - src.len()) as u32)
209 .await;
210
211 request!(seek SeekFrom::Start(relative_path_offset.into()));
212 request!(write all dst.clone().into_bytes());
213 entry = self.entries[entry_idx].as_mut().unwrap();
214 } else {
217 let new_relative_path_offset = self.insert_path_into_path_region(&dst).await;
218 entry = self.entries[entry_idx].as_mut().unwrap();
219 entry.relative_path_offset = new_relative_path_offset;
220 }
221
222 self.path_to_entry_index_map.insert(dst, entry_idx);
223
224 request!(seek SeekFrom::Start(Self::entry_list_offset() + entry_idx as u64 * ENTRY_SIZE));
225 entry.write().await;
226
227 Ok(())
228 }
229
230 #[generator(static, yield ReadSeekWriteRequest -> Response)]
231 pub fn replace(&mut self, src: &str, dst: String) -> Result<(), ReplaceError> {
232 let res = (
233 self.path_to_entry_index_map.get(src).copied(),
234 self.path_to_entry_index_map.get(&dst).copied(),
235 );
236 match res {
237 (Some(one_idx), Some(two_idx)) => {
238 let one = self.entries[one_idx].take().unwrap();
239 self.path_to_entry_index_map.remove(src);
240
241 let two = self.entries[two_idx].as_mut().unwrap();
242 two.data_offset = one.data_offset;
243 two.data_size = one.data_size;
244 two.unpacked_size = one.unpacked_size;
245 two.flags = one.flags;
246
247 request!(seek SeekFrom::Start(Self::entry_list_offset() + one_idx as u64 * ENTRY_SIZE));
248 Entry::write_empty().await;
249
250 request!(seek SeekFrom::Start(Self::entry_list_offset() + two_idx as u64 * ENTRY_SIZE));
251 two.write().await;
252
253 Ok(())
254 }
255 (Some(_), None) => {
256 self.rename(src, dst).await.map_err(|x| match x {
257 RenameError::NotFound | RenameError::AlreadyExists => unreachable!(),
258 RenameError::Io(err) => ReplaceError::Io(err),
259 })?;
260
261 self.path_to_entry_index_map.remove(src);
262
263 Ok(())
264 }
265 (None, _) => Err(ReplaceError::NotFound),
266 }
267 }
268
269 #[generator(static, yield ReadSeekWriteRequest -> Response)]
270 fn write_packed_path_region_at(&mut self, offset: u64) -> u64 {
271 request!(seek SeekFrom::Start(offset));
272
273 let mut size = 0;
274 for entry in self.entries.iter_mut().map(|e| e.as_mut().unwrap()) {
275 entry.relative_path_offset = (request!(stream pos) - offset) as u32;
276 request!(write all entry.path.clone());
278 request!(write u8 0);
279 size += entry.path.len() + 1;
280 }
281
282 size as u64
283 }
284
285 #[generator(static, yield ReadSeekWriteTruncateRequest -> Response)]
286 pub fn repack(&mut self) -> Result<(), RepackError> {
287 for entry in core::mem::take(&mut self.entries) {
289 if entry.is_some() {
290 self.entries.push(entry);
291 }
292 }
293
294 self.entries.sort_by(|a, b| {
295 let ea = a.as_ref().unwrap();
296 let eb = b.as_ref().unwrap();
297
298 match ea.data_offset.cmp(&eb.data_offset) {
299 Ordering::Equal => ea.data_size.cmp(&eb.data_size),
300 ord => ord,
301 }
302 });
303
304 for window in self.entries.windows(2) {
306 if let [Some(a), Some(b)] = window {
307 if a.data_offset + a.data_size > b.data_offset {
308 return Err(RepackError::OverlappingEntries);
309 }
310 } else {
311 unreachable!()
312 };
313 }
314
315 let path_region_size: usize = self
316 .entries
317 .iter()
318 .map(|entry| entry.as_ref().unwrap().path.len() + 1)
319 .sum();
320
321 let path_region_offset = self.path_region_offset();
322 let data_region_start = path_region_offset + path_region_size as u64;
323
324 assert!(data_region_start <= self.data_region_offset());
325
326 log::trace!(target: "silpkg", "Packing path region");
328 self.path_region_size = self.write_packed_path_region_at(path_region_offset).await as u32;
329 assert_eq!(self.path_region_size, path_region_size as u32);
330
331 let mut current_data_offset = data_region_start as u32;
334 log::trace!(target: "silpkg", "Defragmenting data region");
337 for entry in self.entries.iter_mut().map(|e| e.as_mut().unwrap()) {
338 if current_data_offset != entry.data_offset {
339 request!(copy entry.data_offset.into(), entry.data_size.into(), current_data_offset.into());
340
341 entry.data_offset = current_data_offset;
342 }
343
344 current_data_offset += entry.data_size;
345 }
346
347 self.entries
348 .sort_by_key(|entry| entry.as_ref().unwrap().path_hash);
349
350 for (i, entry) in self.entries.iter().enumerate() {
352 *self
353 .path_to_entry_index_map
354 .get_mut(&entry.as_ref().unwrap().path)
355 .unwrap() = i;
356 }
357
358 log::trace!(target: "silpkg", "Rewriting entry list");
360 request!(seek SeekFrom::Start(MAGIC.len() as u64 + 4));
361 request!(write u32 be self.entries.len() as u32);
362 request!(write u32 be path_region_size as u32);
363
364 for maybe_entry in self.entries.iter() {
365 match maybe_entry {
366 Some(entry) => entry.write().await,
367 None => request!(write repeated 0, ENTRY_SIZE),
368 }
369 }
370
371 request!(truncate current_data_offset.into());
372
373 Ok(())
374 }
375
376 #[generator(static, yield ReadSeekWriteRequest -> Response, use<'a>)]
377 pub fn insert<'a>(
378 &'a mut self,
379 path: String,
380 flags: Flags,
381 ) -> Result<WriteHandle<'a>, InsertError> {
382 if self.path_to_entry_index_map.contains_key(&path) {
383 return Err(InsertError::AlreadyExists);
384 }
385
386 let entry_slot = match self.entries.iter().enumerate().find(|(_i, o)| o.is_none()) {
387 Some((i, _o)) => i,
388 None => {
389 let i = self.entries.len();
390 self.reserve_entries(PREALLOCATED_ENTRY_COUNT).await;
391 i
392 }
393 };
394
395 assert!(self
396 .path_to_entry_index_map
397 .insert(path.clone(), entry_slot)
398 .is_none());
399
400 let relative_path_offset = self.insert_path_into_path_region(&path).await;
401 let data_offset = request!(seek SeekFrom::End(0));
402
403 Ok(WriteHandle {
404 inner: match flags.compression {
405 EntryCompression::Deflate(level) => DataWriteHandle::Deflate(DeflateWriteHandle {
406 offset: data_offset,
407 size: 0,
408 unpacked_size: 0,
409 compress: Compress::new(level, true),
410 }),
411 EntryCompression::None => DataWriteHandle::Raw(RawReadWriteHandle {
412 cursor: 0,
413 offset: data_offset,
414 size: 0,
415 }),
416 },
417
418 state: self,
419 path,
420 relative_path_offset,
421 entry_slot,
422 flags,
423 })
424 }
425}
426
427pub trait GeneratorWrite {
428 #[generator(static, yield ReadSeekWriteRequest -> Response, !use)]
429 fn write(&mut self, buf: &[u8]) -> usize;
430}
431
432pub struct DeflateWriteHandle {
433 offset: u64,
435 size: u64,
436 unpacked_size: u64,
437 compress: flate2::Compress,
438}
439
440pub enum DataWriteHandle {
441 Raw(RawReadWriteHandle),
442 Deflate(DeflateWriteHandle),
443}
444
445pub struct WriteHandle<'a> {
446 inner: DataWriteHandle,
447
448 state: &'a mut PkgState,
450 path: String,
451 relative_path_offset: u32,
452 entry_slot: usize,
453 flags: Flags,
454}
455
456impl<'b> WriteHandle<'b> {
457 pub fn inner_mut(&mut self) -> &mut DataWriteHandle {
458 &mut self.inner
459 }
460
461 #[generator(static, yield ReadSeekWriteRequest -> Response, use<'_, 'b>)]
462 fn flush_internal(&mut self) -> () {
463 match &mut self.inner {
464 DataWriteHandle::Deflate(deflate) => deflate.flush().await,
465 _ => (),
466 }
467
468 log::trace!("Updating entry {} with written data", self.entry_slot);
469
470 let entry = match self.inner {
471 DataWriteHandle::Raw(RawReadWriteHandle {
472 offset,
473 size: unpacked_size @ size,
474 ..
475 })
476 | DataWriteHandle::Deflate(DeflateWriteHandle {
477 offset,
478 size,
479 unpacked_size,
480 ..
481 }) => Entry {
482 data_offset: offset as u32,
483 data_size: size as u32,
484 unpacked_size: unpacked_size as u32,
485 path_hash: pkg_path_hash(&self.path),
486 relative_path_offset: self.relative_path_offset,
487 path: self.path.clone(),
488 flags: match self.flags.compression {
489 EntryCompression::Deflate(_) => RawFlags::DEFLATED,
490 EntryCompression::None => RawFlags::empty(),
491 },
492 },
493 };
494
495 request!(seek SeekFrom::Start(
496 PkgState::entry_list_offset() + self.entry_slot as u64 * ENTRY_SIZE,
497 ));
498
499 entry.write().await;
500 self.state.entries[self.entry_slot] = Some(entry);
501
502 Default::default()
503 }
504
505 #[generator(static, yield ReadSeekWriteRequest -> Response, use<'_, 'b>)]
506 pub fn flush(&mut self) -> () {
507 let (offset, cursor) = match self.inner {
508 DataWriteHandle::Raw(RawReadWriteHandle { cursor, offset, .. })
509 | DataWriteHandle::Deflate(DeflateWriteHandle {
510 size: cursor,
511 offset,
512 ..
513 }) => (offset, cursor),
514 };
515
516 self.flush_internal().await;
517 request!(seek SeekFrom::Start(offset + cursor));
518
519 Default::default()
520 }
521
522 #[generator(static, yield ReadSeekWriteRequest -> Response, use<'b>)]
523 pub fn finish(mut self) {
524 self.flush_internal().await;
525 }
526}
527
528impl GeneratorWrite for RawReadWriteHandle {
529 #[generator(static, yield ReadSeekWriteRequest -> Response, !use)]
530 fn write(&mut self, buf: &[u8]) -> usize {
531 log::trace!("Writing entry data to {}", self.offset);
532
533 let written = request!(write buf);
534 self.size += written as u64;
535 self.cursor += written as u64;
536
537 written
538 }
539}
540
541impl GeneratorWrite for DeflateWriteHandle {
542 #[generator(static, yield ReadSeekWriteRequest -> Response, !use)]
543 fn write(&mut self, mut buf: &[u8]) -> usize {
544 log::trace!("Writing compressed entry data at {}", self.offset);
545
546 let mut output = 0;
547 let mut written = 0;
548
549 let mut out = Vec::with_capacity(BUFFER_SIZE as usize);
550 loop {
551 let prev_in = self.compress.total_in();
552 let prev_out = self.compress.total_out();
553
554 let status = self
556 .compress
557 .compress_vec(buf, &mut out, flate2::FlushCompress::None)
558 .unwrap();
559
560 request!(write all out.clone());
569 out.clear();
570
571 let output_now = self.compress.total_out() - prev_out;
572 let written_now = (self.compress.total_in() - prev_in) as usize;
573
574 output += output_now;
575 written += written_now;
576 buf = &buf[written_now..];
577
578 match status {
579 flate2::Status::Ok if buf.is_empty() => break,
580 flate2::Status::Ok | flate2::Status::BufError => {}
581 flate2::Status::StreamEnd => unreachable!(),
582 };
583 }
584
585 self.size += output;
586 self.unpacked_size += written as u64;
587
588 written
589 }
590}
591
592impl DeflateWriteHandle {
593 #[generator(static, yield ReadSeekWriteRequest -> Response)]
594 pub fn flush(&mut self) {
595 let mut out = Vec::with_capacity(BUFFER_SIZE as usize);
596
597 loop {
598 self.compress
599 .compress_vec(&[], &mut out, flate2::FlushCompress::Finish)
600 .unwrap();
601
602 if out.is_empty() {
603 break;
604 } else {
605 request!(write all out.clone());
607 self.size += out.len() as u64;
608 out.clear();
609 }
610 }
611 }
612}
613
614impl GeneratorWrite for WriteHandle<'_> {
615 #[generator(static, yield ReadSeekWriteRequest -> Response, !use)]
616 fn write(&mut self, buf: &[u8]) -> usize {
617 match &mut self.inner {
618 DataWriteHandle::Raw(h) => h.write(buf).await,
619 DataWriteHandle::Deflate(h) => h.write(buf).await,
620 }
621 }
622}
623
624impl WriteHandle<'_> {
625 pub fn is_compressed(&self) -> bool {
626 match self.inner {
627 DataWriteHandle::Raw(_) => false,
628 DataWriteHandle::Deflate(_) => true,
629 }
630 }
631
632 pub fn is_seekable(&self) -> bool {
633 !self.is_compressed()
634 }
635}