1use std::borrow::Borrow;
2use std::collections::HashMap;
3use std::fs::File;
4use std::io;
5use std::io::{BufWriter, Read, Seek, Write};
6use std::path::{Path, PathBuf};
7
8use crate::resource::pdefs::{PartitionId, PartitionType};
9use crate::resource::resource_package::{
10 ChunkType, PackageHeader, PackageMetadata, PackageOffsetFlags, PackageOffsetInfo,
11 PackageVersion, ResourceHeader, ResourcePackage, ResourcePackageSource,
12 ResourceReferenceCountAndFlags, ResourceReferenceFlags,
13};
14use crate::resource::resource_partition::PatchId;
15use crate::resource::runtime_resource_id::RuntimeResourceID;
16use crate::{GlacierResource, GlacierResourceError, WoaVersion};
17use binrw::BinWrite;
18use binrw::__private::Required;
19use binrw::io::Cursor;
20use binrw::meta::WriteEndian;
21use indexmap::{IndexMap, IndexSet};
22use lzzzz::{lz4, lz4_hc};
23use thiserror::Error;
24
25enum PackageResourceBlob {
28 File {
29 path: PathBuf,
30 size: u32,
31 compression_level: Option<i32>,
32 should_scramble: bool,
33 },
34 FileAtOffset {
35 path: PathBuf,
36 offset: u64,
37 size: u32,
38 compressed_size: Option<u32>,
39 is_scrambled: bool,
40 },
41 Memory {
42 data: Vec<u8>,
43 compression_level: Option<i32>,
44 should_scramble: bool,
45 },
46 CompressedMemory {
47 data: Vec<u8>,
48 decompressed_size: Option<u32>,
49 is_scrambled: bool,
50 },
51}
52
53impl PackageResourceBlob {
54 pub fn size(&self) -> u32 {
56 match self {
57 PackageResourceBlob::File { size, .. } => *size,
58 PackageResourceBlob::FileAtOffset { size, .. } => *size,
59 PackageResourceBlob::Memory { data, .. } => data.len() as u32,
60 PackageResourceBlob::CompressedMemory {
61 data,
62 decompressed_size,
63 ..
64 } => match decompressed_size {
65 Some(size) => *size,
66 None => data.len() as u32,
67 },
68 }
69 }
70}
71
72pub struct PackageResourceBuilder {
74 rrid: RuntimeResourceID,
75 blob: PackageResourceBlob,
76 resource_type: [u8; 4],
77 system_memory_requirement: u32,
78 video_memory_requirement: u32,
79 references: Vec<(RuntimeResourceID, ResourceReferenceFlags)>,
81}
82
83#[derive(Debug, Error)]
84pub enum PackageResourceBuilderError {
85 #[error("Error reading the file: {0}")]
86 IoError(#[from] io::Error),
87
88 #[error("File is too large")]
89 FileTooLarge,
90
91 #[error("The offset you provided is after the end of the file")]
92 InvalidFileOffset,
93
94 #[error("The size you provided extends beyond the end of the file")]
95 InvalidFileBlobSize,
96
97 #[error("Resource types must be exactly 4 characters")]
98 InvalidResourceType,
99
100 #[error("Internal Glacier resource error")]
101 GlacierResourceError(#[from] GlacierResourceError),
102}
103
104impl PackageResourceBuilder {
106 fn resource_type_to_bytes(resource_type: &str) -> Result<[u8; 4], PackageResourceBuilderError> {
109 resource_type
110 .chars()
111 .rev()
112 .collect::<String>()
113 .as_bytes()
114 .try_into()
115 .map_err(|_| PackageResourceBuilderError::InvalidResourceType)
116 }
117
118 pub fn from_file(
127 rrid: RuntimeResourceID,
128 resource_type: &str,
129 path: &Path,
130 compression_level: Option<i32>,
131 should_scramble: bool,
132 ) -> Result<Self, PackageResourceBuilderError> {
133 let file_size = path
134 .metadata()
135 .map_err(PackageResourceBuilderError::IoError)?
136 .len();
137
138 if file_size >= u32::MAX as u64 {
139 return Err(PackageResourceBuilderError::FileTooLarge);
140 }
141
142 Ok(Self {
143 rrid,
144 resource_type: Self::resource_type_to_bytes(resource_type)?,
145 system_memory_requirement: file_size as u32,
146 video_memory_requirement: u32::MAX,
147 references: vec![],
148 blob: PackageResourceBlob::File {
149 path: path.to_path_buf(),
150 size: file_size as u32,
151 compression_level,
152 should_scramble,
153 },
154 })
155 }
156
157 fn from_file_at_offset(
168 rrid: RuntimeResourceID,
169 resource_type: &str,
170 path: &Path,
171 offset: u64,
172 size: u32,
173 compressed_size: Option<u32>,
174 is_scrambled: bool,
175 ) -> Result<Self, PackageResourceBuilderError> {
176 let file_size = path
177 .metadata()
178 .map_err(PackageResourceBuilderError::IoError)?
179 .len();
180
181 if offset >= file_size {
182 return Err(PackageResourceBuilderError::InvalidFileOffset);
183 }
184
185 let read_size = compressed_size.unwrap_or(size);
186
187 if offset + read_size as u64 > file_size {
188 return Err(PackageResourceBuilderError::InvalidFileBlobSize);
189 }
190
191 Ok(Self {
192 rrid,
193 resource_type: Self::resource_type_to_bytes(resource_type)?,
194 system_memory_requirement: size,
195 video_memory_requirement: u32::MAX,
196 references: vec![],
197 blob: PackageResourceBlob::FileAtOffset {
198 path: path.to_path_buf(),
199 offset,
200 size,
201 compressed_size,
202 is_scrambled,
203 },
204 })
205 }
206
207 fn from_compressed_memory(
216 rrid: RuntimeResourceID,
217 resource_type: &str,
218 data: Vec<u8>,
219 decompressed_size: Option<u32>,
220 is_scrambled: bool,
221 ) -> Result<Self, PackageResourceBuilderError> {
222 if data.len() > u32::MAX as usize {
223 return Err(PackageResourceBuilderError::FileTooLarge);
224 }
225
226 let real_size = decompressed_size.unwrap_or(data.len() as u32);
227
228 Ok(Self {
229 rrid,
230 resource_type: Self::resource_type_to_bytes(resource_type)?,
231 system_memory_requirement: real_size,
232 video_memory_requirement: u32::MAX,
233 references: vec![],
234 blob: PackageResourceBlob::CompressedMemory {
235 data,
236 decompressed_size,
237 is_scrambled,
238 },
239 })
240 }
241
242 pub fn from_memory(
254 rrid: RuntimeResourceID,
255 resource_type: &str,
256 data: Vec<u8>,
257 compression_level: Option<i32>,
258 should_scramble: bool,
259 ) -> Result<Self, PackageResourceBuilderError> {
260 if data.len() > u32::MAX as usize {
261 return Err(PackageResourceBuilderError::FileTooLarge);
262 }
263
264 let real_size = data.len() as u32;
265
266 Ok(Self {
267 rrid,
268 resource_type: Self::resource_type_to_bytes(resource_type)?,
269 system_memory_requirement: real_size,
270 video_memory_requirement: u32::MAX,
271 references: vec![],
272 blob: PackageResourceBlob::Memory {
273 data,
274 compression_level,
275 should_scramble,
276 },
277 })
278 }
279
280 pub fn from_glacier_resource<G: GlacierResource>(
287 rrid: RuntimeResourceID,
288 glacier_resource: &G,
289 woa_version: WoaVersion
290 ) -> Result<Self, PackageResourceBuilderError> {
291 let system_memory_requirement = glacier_resource.system_memory_requirement();
292 let video_memory_requirement = glacier_resource.video_memory_requirement();
293 let data = glacier_resource
294 .serialize(woa_version)
295 .map_err(PackageResourceBuilderError::GlacierResourceError)?;
296
297 Ok(Self {
298 rrid,
299 resource_type: G::resource_type().into_iter().rev().collect::<Vec<_>>().try_into().unwrap(),
300 system_memory_requirement: u32::try_from(system_memory_requirement).unwrap_or(u32::MAX),
301 video_memory_requirement: u32::try_from(video_memory_requirement).unwrap_or(u32::MAX),
302 references: vec![],
303 blob: PackageResourceBlob::Memory {
304 data,
305 compression_level: if glacier_resource.should_compress() {Some(12)} else {None},
306 should_scramble: glacier_resource.should_scramble(),
307 },
308 })
309 }
310
311 pub fn with_reference(
319 &mut self,
320 rrid: RuntimeResourceID,
321 flags: ResourceReferenceFlags,
322 ) -> &mut Self {
323 self.references.push((rrid, flags));
324 self
325 }
326
327 pub fn with_references<I, P>(&mut self, refs: I) -> &mut Self
334 where
335 I: IntoIterator<Item = P>,
336 P: Borrow<(RuntimeResourceID, ResourceReferenceFlags)>,
337 (RuntimeResourceID, ResourceReferenceFlags): Copy,
338 {
339 self.references.extend(refs.into_iter().map(|p| *p.borrow()));
340 self
341 }
342 pub fn with_memory_requirements(
348 &mut self,
349 system_memory_requirement: u32,
350 video_memory_requirement: u32,
351 ) -> &mut Self {
352 self.system_memory_requirement = system_memory_requirement;
353 self.video_memory_requirement = video_memory_requirement;
354 self
355 }
356}
357
358pub struct PackageBuilder {
380 partition_id: PartitionId,
381 patch_id: PatchId,
382 use_legacy_references: bool,
383 resources: IndexMap<RuntimeResourceID, PackageResourceBuilder>,
384 unneeded_resources: IndexSet<RuntimeResourceID>,
385}
386
387#[derive(Debug, Error)]
388pub enum PackageBuilderError {
389 #[error("Error writing the file: {0}")]
390 IoError(#[from] io::Error),
391
392 #[error("Error serializing the package: {0}")]
393 SerializationError(#[from] binrw::Error),
394
395 #[error("Unneeded resources are only supported when building a patch package")]
396 UnneededResourcesNotSupported,
397
398 #[error("Building patch but no patch ID was provided")]
399 NoPatchId,
400
401 #[error("Too many resources in the package")]
402 TooManyResources,
403
404 #[error("A resource has too many references")]
405 TooManyReferences,
406
407 #[error("Resource type is not valid")]
408 InvalidResourceType,
409
410 #[error("Cannot build from a resource package without a source")]
411 NoSource,
412
413 #[error("Could not duplicate resource {0} from the source package: {1}")]
414 CannotDuplicateResource(RuntimeResourceID, PackageResourceBuilderError),
415
416 #[error("LZ4 compression error: {0}")]
417 Lz4CompressionError(#[from] lzzzz::Error),
418
419 #[error("Invalid partition id index cannot be greater than 255")]
420 InvalidPartitionIdIndex,
421
422 #[error("Patch id cannot be greater than 255")]
423 InvalidPatchId,
424}
425
426struct OffsetTableResult {
427 offset_table_size: u32,
428 resource_entry_offsets: HashMap<RuntimeResourceID, u64>,
429}
430
431struct MetadataTableResult {
432 metadata_table_size: u32,
433}
434
435struct XorWriter<'a, W: Write + Seek> {
437 writer: &'a mut W,
438}
439
440impl<W: Write + Seek> Write for XorWriter<'_, W> {
441 fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
442 let str_xor = [0xdc, 0x45, 0xa6, 0x9c, 0xd3, 0x72, 0x4c, 0xab];
443
444 for (index, byte) in buf.iter().enumerate() {
445 let xored_byte = *byte ^ str_xor[index % str_xor.len()];
446 self.writer.write_all(&[xored_byte])?;
447 }
448
449 Ok(buf.len())
450 }
451
452 fn flush(&mut self) -> Result<(), io::Error> {
453 self.writer.flush()
454 }
455}
456
457impl PackageBuilder {
458 pub fn new(chunk_id: u8, chunk_type: ChunkType) -> Self {
464 Self {
465 partition_id: PartitionId {
466 part_type: match chunk_type {
467 ChunkType::Standard => PartitionType::Standard,
468 ChunkType::Addon => PartitionType::Addon,
469 },
470 index: chunk_id as usize,
471 },
472 use_legacy_references: false,
473 patch_id: PatchId::Base,
474 resources: IndexMap::new(),
475 unneeded_resources: IndexSet::new(),
476 }
477 }
478
479 pub fn new_with_patch_id(partition_id: PartitionId, patch_id: PatchId) -> Self {
485 Self {
486 partition_id,
487 patch_id,
488 use_legacy_references: false,
489 resources: IndexMap::new(),
490 unneeded_resources: IndexSet::new(),
491 }
492 }
493
494 pub fn from_resource_package(
499 resource_package: &ResourcePackage,
500 ) -> Result<Self, PackageBuilderError> {
501 let source = resource_package
502 .source
503 .as_ref()
504 .ok_or(PackageBuilderError::NoSource)?;
505
506 let mut package = Self {
507 partition_id: PartitionId {
508 part_type: match resource_package
509 .metadata
510 .as_ref()
511 .map(|m| m.chunk_type)
512 .unwrap_or_default()
513 {
514 ChunkType::Standard => PartitionType::Standard,
515 ChunkType::Addon => PartitionType::Addon,
516 },
517 index: resource_package
518 .metadata
519 .as_ref()
520 .map(|m| m.chunk_id)
521 .unwrap_or_default() as usize,
522 },
523 patch_id: match resource_package
524 .metadata
525 .as_ref()
526 .map(|m| m.patch_id)
527 .unwrap_or_default()
528 {
529 0 => PatchId::Base,
530 x => PatchId::Patch(x as usize),
531 },
532 use_legacy_references: false,
533 resources: IndexMap::new(),
534 unneeded_resources: IndexSet::new(),
535 };
536
537 for (rrid, resource) in &resource_package.resources {
538 let mut builder = match source {
539 ResourcePackageSource::File(source_path) => {
540 PackageResourceBuilder::from_file_at_offset(
541 *rrid,
542 &resource.data_type(),
543 source_path,
544 resource.entry.data_offset,
545 resource.header.data_size,
546 resource.compressed_size(),
547 resource.is_scrambled(),
548 )
549 .map_err(|e| PackageBuilderError::CannotDuplicateResource(*rrid, e))?
550 }
551
552 ResourcePackageSource::Memory(source_data) => {
553 let read_size = resource
554 .compressed_size()
555 .unwrap_or(resource.header.data_size);
556
557 let start_offset = resource.entry.data_offset as usize;
558 let end_offset = start_offset + read_size as usize;
559
560 let decompressed_size = if resource.is_compressed() {
561 Some(resource.header.data_size)
562 } else {
563 None
564 };
565
566 PackageResourceBuilder::from_compressed_memory(
567 *rrid,
568 &resource.data_type(),
569 source_data[start_offset..end_offset].to_vec(),
570 decompressed_size,
571 resource.is_scrambled(),
572 )
573 .map_err(|e| PackageBuilderError::CannotDuplicateResource(*rrid, e))?
574 }
575 };
576
577 builder.with_memory_requirements(
578 resource.system_memory_requirement(),
579 resource.video_memory_requirement(),
580 );
581
582 for (rrid, flags) in resource.references() {
583 builder.with_reference(*rrid, *flags);
584 }
585
586 package.with_resource(builder);
587 }
588
589 for rrid in resource_package.unneeded_resource_ids() {
590 package.with_unneeded_resource(*rrid);
591 }
592
593 Ok(package)
594 }
595
596 pub fn with_partition_id(&mut self, partition_id: &PartitionId) -> &mut Self {
598 self.partition_id = partition_id.clone();
599 self
600 }
601
602 pub fn with_patch_id(&mut self, patch_id: &PatchId) -> &mut Self {
604 self.patch_id = *patch_id;
605 self
606 }
607
608 pub fn use_legacy_references(&mut self) -> &mut Self {
610 self.use_legacy_references = true;
611 self
612 }
613
614 pub fn with_resource(&mut self, resource: PackageResourceBuilder) -> &mut Self {
621 self.resources.insert(resource.rrid, resource);
622 self
623 }
624
625 pub fn with_resources<I>(&mut self, resources: I) -> &mut Self
632 where
633 I: IntoIterator<Item = PackageResourceBuilder>,
634 {
635 self.resources
636 .extend(resources.into_iter().map(|r| (r.rrid, r)));
637 self
638 }
639
640 pub fn with_unneeded_resource(&mut self, rrid: RuntimeResourceID) -> &mut Self {
645 self.unneeded_resources.insert(rrid);
646 self
647 }
648
649
650 pub fn with_unneeded_resources<I, P>(&mut self, rrids: I) -> &mut Self
655 where
656 I: IntoIterator<Item = P>,
657 P: Borrow<RuntimeResourceID>,
658 RuntimeResourceID: Copy,
659 {
660 self.unneeded_resources.extend(rrids.into_iter().map(|p| *p.borrow()));
661 self
662 }
663
664 fn backpatch<W: Write + Seek, T: BinWrite + WriteEndian>(
666 writer: &mut W,
667 patch_offset: u64,
668 data: &T,
669 ) -> Result<(), PackageBuilderError>
670 where
671 for<'a> T::Args<'a>: Required,
672 {
673 let current_offset = writer
674 .stream_position()
675 .map_err(PackageBuilderError::IoError)?;
676 writer
677 .seek(io::SeekFrom::Start(patch_offset))
678 .map_err(PackageBuilderError::IoError)?;
679 data.write(writer)
680 .map_err(PackageBuilderError::SerializationError)?;
681 writer
682 .seek(io::SeekFrom::Start(current_offset))
683 .map_err(PackageBuilderError::IoError)?;
684 Ok(())
685 }
686
687 fn write_offset_table<W: Write + Seek>(
689 &self,
690 writer: &mut W,
691 ) -> Result<OffsetTableResult, PackageBuilderError> {
692 let mut resource_entry_offsets = HashMap::new();
694 let offset_table_start = writer
695 .stream_position()
696 .map_err(PackageBuilderError::IoError)?;
697
698 for (rrid, _) in &self.resources {
699 let current_offset = writer
700 .stream_position()
701 .map_err(PackageBuilderError::IoError)?;
702
703 let resource_entry = PackageOffsetInfo {
704 runtime_resource_id: *rrid,
705 data_offset: 0,
706 flags: PackageOffsetFlags::new(),
707 };
708
709 resource_entry
710 .write(writer)
711 .map_err(PackageBuilderError::SerializationError)?;
712 resource_entry_offsets.insert(*rrid, current_offset);
713 }
714
715 let offset_table_end = writer
717 .stream_position()
718 .map_err(PackageBuilderError::IoError)?;
719 let offset_table_size = offset_table_end - offset_table_start;
720
721 if offset_table_size > u32::MAX as u64 {
722 return Err(PackageBuilderError::TooManyResources);
723 }
724
725 Ok(OffsetTableResult {
726 offset_table_size: offset_table_size as u32,
727 resource_entry_offsets,
728 })
729 }
730
731 fn write_metadata_table<W: Write + Seek>(
733 &self,
734 writer: &mut W,
735 legacy_references: bool,
736 ) -> Result<MetadataTableResult, PackageBuilderError> {
737 let metadata_table_start = writer
738 .stream_position()
739 .map_err(PackageBuilderError::IoError)?;
740
741 for (_, resource) in &self.resources {
742 let metadata_offset = writer
743 .stream_position()
744 .map_err(PackageBuilderError::IoError)?;
745
746 let mut resource_metadata = ResourceHeader {
749 resource_type: resource.resource_type,
750 references_chunk_size: 0,
751 states_chunk_size: 0,
752 data_size: resource.blob.size(),
753 system_memory_requirement: resource.system_memory_requirement,
754 video_memory_requirement: resource.video_memory_requirement,
755 references: Vec::new(),
756 };
757
758 resource_metadata
759 .write(writer)
760 .map_err(PackageBuilderError::SerializationError)?;
761
762 if !resource.references.is_empty() {
764 let reference_table_start = writer
765 .stream_position()
766 .map_err(PackageBuilderError::IoError)?;
767
768 let reference_count_and_flags = ResourceReferenceCountAndFlags::new()
769 .with_reference_count(resource.references.len() as u32)
770 .with_is_new_format(!legacy_references)
771 .with_always_true(true);
772
773 reference_count_and_flags
774 .write(writer)
775 .map_err(PackageBuilderError::SerializationError)?;
776
777 if legacy_references {
780 for (rrid, _) in &resource.references {
781 rrid.write(writer)
782 .map_err(PackageBuilderError::SerializationError)?;
783 }
784
785 for (_, flags) in &resource.references {
786 flags
787 .to_legacy()
788 .write(writer)
789 .map_err(PackageBuilderError::SerializationError)?;
790 }
791 } else {
792 for (_, flags) in &resource.references {
793 flags
794 .to_standard()
795 .write(writer)
796 .map_err(PackageBuilderError::SerializationError)?;
797 }
798
799 for (rrid, _) in &resource.references {
800 rrid.write(writer)
801 .map_err(PackageBuilderError::SerializationError)?;
802 }
803 }
804
805 let reference_table_end = writer
806 .stream_position()
807 .map_err(PackageBuilderError::IoError)?;
808 let reference_table_size = reference_table_end - reference_table_start;
809
810 if reference_table_size > u32::MAX as u64 {
811 return Err(PackageBuilderError::TooManyReferences);
812 }
813
814 resource_metadata.references_chunk_size = reference_table_size as u32;
816 PackageBuilder::backpatch(writer, metadata_offset, &resource_metadata)?;
817 }
818 }
819
820 let metadata_table_end = writer
822 .stream_position()
823 .map_err(PackageBuilderError::IoError)?;
824 let metadata_table_size = metadata_table_end - metadata_table_start;
825
826 if metadata_table_size > u32::MAX as u64 {
827 return Err(PackageBuilderError::TooManyResources);
828 }
829
830 Ok(MetadataTableResult {
831 metadata_table_size: metadata_table_size as u32,
832 })
833 }
834
835 fn build_internal<W: Write + Seek>(
837 &self,
838 version: PackageVersion,
839 writer: &mut W,
840 ) -> Result<(), PackageBuilderError> {
841 if !self.unneeded_resources.is_empty() && self.patch_id.is_base() {
843 return Err(PackageBuilderError::UnneededResourcesNotSupported);
844 }
845
846 let mut header = ResourcePackage {
848 source: None,
849 magic: match version {
850 PackageVersion::RPKGv1 => *b"GKPR",
851 PackageVersion::RPKGv2 => *b"2KPR",
852 },
853 metadata: match version {
854 PackageVersion::RPKGv1 => None,
855 PackageVersion::RPKGv2 => Some(PackageMetadata {
856 unknown: 1,
857 chunk_id: self.partition_id.index as u8,
858 chunk_type: match self.partition_id.part_type {
859 PartitionType::Addon => ChunkType::Addon,
860 _ => ChunkType::Standard,
861 },
862 patch_id: match self.patch_id {
863 PatchId::Base => 0,
864 PatchId::Patch(x) => x as u8,
865 },
866 language_tag: *b"xx",
867 }),
868 },
869 header: PackageHeader {
870 file_count: self.resources.len() as u32,
871 offset_table_size: 0,
872 metadata_table_size: 0,
873 },
874 unneeded_resource_count: self.unneeded_resources.len() as u32,
875 unneeded_resources: Some(self.unneeded_resources.iter().copied().collect()),
876 resources: IndexMap::new(),
877 };
878
879 header
881 .write_args(writer, (self.patch_id.is_patch(),))
882 .map_err(PackageBuilderError::SerializationError)?;
883
884 let offset_table_result = self.write_offset_table(writer)?;
885 let metadata_table_result =
886 self.write_metadata_table(writer, self.use_legacy_references)?;
887
888 header.header.offset_table_size = offset_table_result.offset_table_size;
890 header.header.metadata_table_size = metadata_table_result.metadata_table_size;
891 PackageBuilder::backpatch(writer, 0, &header)?;
892
893 for (rrid, resource) in &self.resources {
895 let data_offset = writer
896 .stream_position()
897 .map_err(PackageBuilderError::IoError)?;
898
899 let (compressed_size, is_scrambled) = match &resource.blob {
900 PackageResourceBlob::File {
901 path,
902 size,
903 compression_level,
904 should_scramble,
905 } => {
906 let mut file = File::open(path).map_err(PackageBuilderError::IoError)?;
907
908 let mut data_writer: Box<dyn Write> = match should_scramble {
910 true => Box::new(XorWriter { writer }),
911 false => Box::new(&mut *writer),
912 };
913
914 let compressed_size = match compression_level {
915 Some(level) => {
916 let mut compressed_buffer =
918 vec![0; lz4::max_compressed_size(*size as usize)];
919 let mut decompressed_data = vec![0; *size as usize];
920 file.read_exact(&mut decompressed_data)
921 .map_err(PackageBuilderError::IoError)?;
922
923 let compressed_size = match version {
924 PackageVersion::RPKGv1 => lz4::compress(
925 &decompressed_data,
926 &mut compressed_buffer,
927 *level,
928 )?,
929 PackageVersion::RPKGv2 => lz4_hc::compress(
930 &decompressed_data,
931 &mut compressed_buffer,
932 *level,
933 )?,
934 };
935
936 data_writer
938 .write_all(&compressed_buffer[..compressed_size])
939 .map_err(PackageBuilderError::IoError)?;
940
941 Some(compressed_size as u32)
942 }
943
944 None => {
945 io::copy(&mut file, &mut data_writer)
946 .map_err(PackageBuilderError::IoError)?;
947 None
948 }
949 };
950
951 (compressed_size, *should_scramble)
952 }
953
954 PackageResourceBlob::FileAtOffset {
955 path,
956 offset,
957 size,
958 compressed_size,
959 is_scrambled,
960 } => {
961 let size_to_copy = compressed_size.unwrap_or_else(|| *size);
962
963 let mut file = File::open(path).map_err(PackageBuilderError::IoError)?;
964 file.seek(io::SeekFrom::Start(*offset))
965 .map_err(PackageBuilderError::IoError)?;
966 io::copy(&mut file.take(size_to_copy as u64), writer)
967 .map_err(PackageBuilderError::IoError)?;
968
969 (*compressed_size, *is_scrambled)
970 }
971
972 PackageResourceBlob::CompressedMemory {
973 data,
974 decompressed_size,
975 is_scrambled,
976 } => {
977 writer
978 .write_all(data)
979 .map_err(PackageBuilderError::IoError)?;
980 let compressed_size = decompressed_size.map(|_| data.len() as u32);
981 (compressed_size, *is_scrambled)
982 }
983
984 PackageResourceBlob::Memory {
985 data,
986 compression_level,
987 should_scramble,
988 } => {
989 let mut data_writer: Box<dyn Write> = match should_scramble {
991 true => Box::new(XorWriter { writer }),
992 false => Box::new(&mut *writer),
993 };
994
995 let compressed_size = match compression_level {
996 Some(level) => {
997 let mut compressed_buffer =
999 vec![0; lz4::max_compressed_size(data.len())];
1000 let compressed_size = match version {
1001 PackageVersion::RPKGv1 => {
1002 lz4::compress(data, &mut compressed_buffer, *level)?
1003 }
1004 PackageVersion::RPKGv2 => {
1005 lz4_hc::compress(data, &mut compressed_buffer, *level)?
1006 }
1007 };
1008
1009 data_writer
1011 .write_all(&compressed_buffer[..compressed_size])
1012 .map_err(PackageBuilderError::IoError)?;
1013
1014 Some(compressed_size as u32)
1015 }
1016
1017 None => {
1018 data_writer
1019 .write_all(data)
1020 .map_err(PackageBuilderError::IoError)?;
1021 None
1022 }
1023 };
1024
1025 (compressed_size, *should_scramble)
1026 }
1027 };
1028
1029 let final_compressed_size = compressed_size.unwrap_or(0);
1032
1033 let offset_info = PackageOffsetInfo {
1034 runtime_resource_id: *rrid,
1035 data_offset,
1036 flags: PackageOffsetFlags::new()
1037 .with_compressed_size(final_compressed_size)
1038 .with_is_scrambled(is_scrambled),
1039 };
1040
1041 let patch_offset = offset_table_result.resource_entry_offsets[rrid];
1042 PackageBuilder::backpatch(writer, patch_offset, &offset_info)?;
1043 }
1044
1045 Ok(())
1046 }
1047
1048 #[deprecated(since = "1.1.1", note = "use `build_to_file` instead")]
1049 pub fn build(
1050 self,
1051 version: PackageVersion,
1052 output_path: &Path,
1053 ) -> Result<(), PackageBuilderError> {
1054 self.build_to_file(version, output_path)
1055 }
1056
1057 pub fn build_to_writer<W: Write + Seek>(self, version: PackageVersion, writer: &mut W) -> Result<(), PackageBuilderError>{
1063 self.build_internal(version, writer)
1064 }
1065
1066 pub fn build_to_file<P: AsRef<Path>>(self, version: PackageVersion, output_path: P) -> Result<(), PackageBuilderError> {
1072 let output_path: &Path = output_path.as_ref();
1073 let output_file = match output_path.is_dir() {
1074 true => output_path.join(self.partition_id.to_filename(self.patch_id)),
1075 false => output_path.to_path_buf(),
1076 };
1077
1078 let file = File::create(output_file).map_err(PackageBuilderError::IoError)?;
1079 let mut writer = BufWriter::new(file);
1080 let result = self.build_internal(version, &mut writer);
1081 writer.flush()?;
1082 result
1083 }
1084
1085 #[deprecated(since = "1.1.1", note = "use `build_to_vec` instead")]
1086 pub fn build_in_memory(self, version: PackageVersion) -> Result<Vec<u8>, PackageBuilderError> {
1087 self.build_to_vec(version)
1088 }
1089
1090 pub fn build_to_vec(self, version: PackageVersion) -> Result<Vec<u8>, PackageBuilderError> {
1095 let mut writer = Cursor::new(vec![]);
1096
1097 self.build_internal(version, &mut writer)?;
1098 Ok(writer.into_inner())
1099 }
1100}