rpkg_rs/resource/
package_builder.rs

1use std::collections::HashMap;
2use std::fs::File;
3use std::io;
4use std::io::{BufWriter, Read, Seek, Write};
5use std::path::{Path, PathBuf};
6
7use crate::resource::pdefs::{PartitionId, PartitionType};
8use crate::resource::resource_package::{
9    ChunkType, PackageHeader, PackageMetadata, PackageOffsetFlags, PackageOffsetInfo,
10    PackageVersion, ResourceHeader, ResourcePackage, ResourcePackageSource,
11    ResourceReferenceCountAndFlags, ResourceReferenceFlags,
12};
13use crate::resource::resource_partition::PatchId;
14use crate::resource::runtime_resource_id::RuntimeResourceID;
15use crate::{GlacierResource, GlacierResourceError, WoaVersion};
16use binrw::BinWrite;
17use binrw::__private::Required;
18use binrw::io::Cursor;
19use binrw::meta::WriteEndian;
20use indexmap::{IndexMap, IndexSet};
21use lzzzz::{lz4, lz4_hc};
22use thiserror::Error;
23
24/// `PackageResourceBlob` is an enum representing various types of package resource stores, which can
25/// include files, file sections, and memory buffers, optionally compressed or scrambled.
26enum PackageResourceBlob {
27    File {
28        path: PathBuf,
29        size: u32,
30        compression_level: Option<i32>,
31        should_scramble: bool,
32    },
33    FileAtOffset {
34        path: PathBuf,
35        offset: u64,
36        size: u32,
37        compressed_size: Option<u32>,
38        is_scrambled: bool,
39    },
40    Memory {
41        data: Vec<u8>,
42        compression_level: Option<i32>,
43        should_scramble: bool,
44    },
45    CompressedMemory {
46        data: Vec<u8>,
47        decompressed_size: Option<u32>,
48        is_scrambled: bool,
49    },
50}
51
52impl PackageResourceBlob {
53    /// The (uncompressed) size of the resource blob in bytes.
54    pub fn size(&self) -> u32 {
55        match self {
56            PackageResourceBlob::File { size, .. } => *size,
57            PackageResourceBlob::FileAtOffset { size, .. } => *size,
58            PackageResourceBlob::Memory { data, .. } => data.len() as u32,
59            PackageResourceBlob::CompressedMemory {
60                data,
61                decompressed_size,
62                ..
63            } => match decompressed_size {
64                Some(size) => *size,
65                None => data.len() as u32,
66            },
67        }
68    }
69}
70
71/// A builder for creating a resource within a ResourcePackage
72pub struct PackageResourceBuilder {
73    rrid: RuntimeResourceID,
74    blob: PackageResourceBlob,
75    resource_type: [u8; 4],
76    system_memory_requirement: u32,
77    video_memory_requirement: u32,
78    // We store references in a vector because their order is important and there can be duplicates.
79    references: Vec<(RuntimeResourceID, ResourceReferenceFlags)>,
80}
81
82#[derive(Debug, Error)]
83pub enum PackageResourceBuilderError {
84    #[error("Error reading the file: {0}")]
85    IoError(#[from] io::Error),
86
87    #[error("File is too large")]
88    FileTooLarge,
89
90    #[error("The offset you provided is after the end of the file")]
91    InvalidFileOffset,
92
93    #[error("The size you provided extends beyond the end of the file")]
94    InvalidFileBlobSize,
95
96    #[error("Resource types must be exactly 4 characters")]
97    InvalidResourceType,
98
99    #[error("Internal Glacier resource error")]
100    GlacierResourceError(#[from] GlacierResourceError),
101}
102
103/// A builder for creating a resource within a ResourcePackage.
104impl PackageResourceBuilder {
105    /// Converts a resource type string to a byte array.
106    /// Characters are reversed since everything is little endian.
107    fn resource_type_to_bytes(resource_type: &str) -> Result<[u8; 4], PackageResourceBuilderError> {
108        resource_type
109            .chars()
110            .rev()
111            .collect::<String>()
112            .as_bytes()
113            .try_into()
114            .map_err(|_| PackageResourceBuilderError::InvalidResourceType)
115    }
116
117    /// Create a new resource builder from a file on disk.
118    ///
119    /// # Arguments
120    /// * `rrid` - The resource ID of the resource.
121    /// * `resource_type` - The type of the resource.
122    /// * `path` - The path to the file.
123    /// * `compression_level` - The compression level to use for the file, or None for no compression.
124    /// * `should_scramble` - Whether the file data should be scrambled.
125    pub fn from_file(
126        rrid: RuntimeResourceID,
127        resource_type: &str,
128        path: &Path,
129        compression_level: Option<i32>,
130        should_scramble: bool,
131    ) -> Result<Self, PackageResourceBuilderError> {
132        let file_size = path
133            .metadata()
134            .map_err(PackageResourceBuilderError::IoError)?
135            .len();
136
137        if file_size >= u32::MAX as u64 {
138            return Err(PackageResourceBuilderError::FileTooLarge);
139        }
140
141        Ok(Self {
142            rrid,
143            resource_type: Self::resource_type_to_bytes(resource_type)?,
144            system_memory_requirement: file_size as u32,
145            video_memory_requirement: u32::MAX,
146            references: vec![],
147            blob: PackageResourceBlob::File {
148                path: path.to_path_buf(),
149                size: file_size as u32,
150                compression_level,
151                should_scramble,
152            },
153        })
154    }
155
156    /// Create a new resource builder from a file on disk, but only reading a part of it.
157    ///
158    /// # Arguments
159    /// * `rrid` - The resource ID of the resource.
160    /// * `resource_type` - The type of the resource.
161    /// * `path` - The path to the file.
162    /// * `offset` - The offset of the file to start reading from.
163    /// * `size` - The size of the data.
164    /// * `compressed_size` - The compressed size of the data, if the resource is compressed.
165    /// * `is_scrambled` - Whether the data is scrambled.
166    fn from_file_at_offset(
167        rrid: RuntimeResourceID,
168        resource_type: &str,
169        path: &Path,
170        offset: u64,
171        size: u32,
172        compressed_size: Option<u32>,
173        is_scrambled: bool,
174    ) -> Result<Self, PackageResourceBuilderError> {
175        let file_size = path
176            .metadata()
177            .map_err(PackageResourceBuilderError::IoError)?
178            .len();
179
180        if offset >= file_size {
181            return Err(PackageResourceBuilderError::InvalidFileOffset);
182        }
183
184        let read_size = compressed_size.unwrap_or(size);
185
186        if offset + read_size as u64 > file_size {
187            return Err(PackageResourceBuilderError::InvalidFileBlobSize);
188        }
189
190        Ok(Self {
191            rrid,
192            resource_type: Self::resource_type_to_bytes(resource_type)?,
193            system_memory_requirement: size,
194            video_memory_requirement: u32::MAX,
195            references: vec![],
196            blob: PackageResourceBlob::FileAtOffset {
197                path: path.to_path_buf(),
198                offset,
199                size,
200                compressed_size,
201                is_scrambled,
202            },
203        })
204    }
205
206    /// Create a new resource builder from a (possibly compressed) in-memory blob.
207    ///
208    /// # Arguments
209    /// * `rrid` - The resource ID of the resource.
210    /// * `resource_type` - The type of the resource.
211    /// * `data` - The data of the resource.
212    /// * `decompressed_size` - The decompressed size of the data, if the resource is compressed.
213    /// * `is_scrambled` - Whether the data is scrambled.
214    fn from_compressed_memory(
215        rrid: RuntimeResourceID,
216        resource_type: &str,
217        data: Vec<u8>,
218        decompressed_size: Option<u32>,
219        is_scrambled: bool,
220    ) -> Result<Self, PackageResourceBuilderError> {
221        if data.len() > u32::MAX as usize {
222            return Err(PackageResourceBuilderError::FileTooLarge);
223        }
224
225        let real_size = decompressed_size.unwrap_or(data.len() as u32);
226
227        Ok(Self {
228            rrid,
229            resource_type: Self::resource_type_to_bytes(resource_type)?,
230            system_memory_requirement: real_size,
231            video_memory_requirement: u32::MAX,
232            references: vec![],
233            blob: PackageResourceBlob::CompressedMemory {
234                data,
235                decompressed_size,
236                is_scrambled,
237            },
238        })
239    }
240
241    /// Create a new resource builder from an in-memory blob.
242    ///
243    /// This is similar to `from_compressed_memory`, but it expects the data to be uncompressed and
244    /// can optionally compress and scramble it.
245    ///
246    /// # Arguments
247    /// * `rrid` - The resource ID of the resource.
248    /// * `resource_type` - The type of the resource.
249    /// * `data` - The data of the resource.
250    /// * `compression_level` - The compression level to use for the data, or None for no compression.
251    /// * `should_scramble` - Whether the data should be scrambled.
252    pub fn from_memory(
253        rrid: RuntimeResourceID,
254        resource_type: &str,
255        data: Vec<u8>,
256        compression_level: Option<i32>,
257        should_scramble: bool,
258    ) -> Result<Self, PackageResourceBuilderError> {
259        if data.len() > u32::MAX as usize {
260            return Err(PackageResourceBuilderError::FileTooLarge);
261        }
262
263        let real_size = data.len() as u32;
264
265        Ok(Self {
266            rrid,
267            resource_type: Self::resource_type_to_bytes(resource_type)?,
268            system_memory_requirement: real_size,
269            video_memory_requirement: u32::MAX,
270            references: vec![],
271            blob: PackageResourceBlob::Memory {
272                data,
273                compression_level,
274                should_scramble,
275            },
276        })
277    }
278
279    /// Create a new resource builder from a a GlacierResource.
280    ///
281    /// # Arguments
282    /// * `rrid` - The resource ID of the resource.
283    /// * `glacier_resource` - A reference to an object implementing the `GlacierResource` trait.
284    /// * `woa_version` - The HITMAN game version you want to construct the GlacierResource for
285    pub fn from_glacier_resource<G: GlacierResource>(
286        rrid: RuntimeResourceID,
287        glacier_resource: &G,
288        woa_version: WoaVersion
289    ) -> Result<Self, PackageResourceBuilderError> {
290        let system_memory_requirement = glacier_resource.system_memory_requirement();
291        let video_memory_requirement = glacier_resource.video_memory_requirement();
292        let data = glacier_resource
293            .serialize(woa_version)
294            .map_err(PackageResourceBuilderError::GlacierResourceError)?;
295
296        Ok(Self {
297            rrid,
298            resource_type: G::resource_type().into_iter().rev().collect::<Vec<_>>().try_into().unwrap(),
299            system_memory_requirement: u32::try_from(system_memory_requirement).unwrap_or(u32::MAX),
300            video_memory_requirement: u32::try_from(video_memory_requirement).unwrap_or(u32::MAX),
301            references: vec![],
302            blob: PackageResourceBlob::Memory {
303                data,
304                compression_level: if glacier_resource.should_compress() {Some(12)} else {None},
305                should_scramble: glacier_resource.should_scramble(),
306            },
307        })
308    }
309
310    /// Adds a reference to the resource.
311    ///
312    /// This specifies that this resource depends on / references another resource.
313    ///
314    /// # Arguments
315    /// * `rrid` - The resource ID of the reference.
316    /// * `flags` - The flags of the reference.
317    pub fn with_reference(
318        &mut self,
319        rrid: RuntimeResourceID,
320        flags: ResourceReferenceFlags,
321    ) -> &mut Self {
322        self.references.push((rrid, flags));
323        self
324    }
325
326    /// Sets the memory requirements of the resource.
327    ///
328    /// # Arguments
329    /// * `system_memory_requirement` - The system memory requirement of the resource.
330    /// * `video_memory_requirement` - The video memory requirement of the resource.
331    pub fn with_memory_requirements(
332        &mut self,
333        system_memory_requirement: u32,
334        video_memory_requirement: u32,
335    ) -> &mut Self {
336        self.system_memory_requirement = system_memory_requirement;
337        self.video_memory_requirement = video_memory_requirement;
338        self
339    }
340}
341
342/// A builder for creating a ResourcePackage.
343/// ```
344/// # use rpkg_rs::resource::package_builder::{PackageBuilderError, PackageResourceBuilder};
345/// # use rpkg_rs::resource::pdefs::{PartitionId, PartitionType};
346/// # use rpkg_rs::resource::resource_package::PackageVersion;
347/// # use rpkg_rs::resource::resource_partition::PatchId;
348/// # use rpkg_rs::resource::runtime_resource_id::RuntimeResourceID;
349/// # use rpkg_rs::resource::package_builder::PackageBuilder;
350/// # use std::error::Error;
351/// # use std::fs;
352/// # fn main() -> Result<(), Box<dyn Error>>{
353/// #   let temp_dir = tempfile::tempdir()?;
354/// #   let output_path = temp_dir.path();
355///
356///     let mut builder = PackageBuilder::new_with_patch_id(PartitionId::default(), PatchId::Base);
357///     builder.with_resource(PackageResourceBuilder::from_memory(RuntimeResourceID::default(), "TYPE", vec![0,1,2,3,4,5], None, false).unwrap());
358///     builder.build(PackageVersion::RPKGv2, output_path)?;
359///
360///     assert!(temp_dir.path().join("chunk0.rpkg").exists());
361/// #   Ok(())
362/// # }
363pub struct PackageBuilder {
364    partition_id: PartitionId,
365    patch_id: PatchId,
366    use_legacy_references: bool,
367    resources: IndexMap<RuntimeResourceID, PackageResourceBuilder>,
368    unneeded_resources: IndexSet<RuntimeResourceID>,
369}
370
371#[derive(Debug, Error)]
372pub enum PackageBuilderError {
373    #[error("Error writing the file: {0}")]
374    IoError(#[from] io::Error),
375
376    #[error("Error serializing the package: {0}")]
377    SerializationError(#[from] binrw::Error),
378
379    #[error("Unneeded resources are only supported when building a patch package")]
380    UnneededResourcesNotSupported,
381
382    #[error("Building patch but no patch ID was provided")]
383    NoPatchId,
384
385    #[error("Too many resources in the package")]
386    TooManyResources,
387
388    #[error("A resource has too many references")]
389    TooManyReferences,
390
391    #[error("Resource type is not valid")]
392    InvalidResourceType,
393
394    #[error("Cannot build from a resource package without a source")]
395    NoSource,
396
397    #[error("Could not duplicate resource {0} from the source package: {1}")]
398    CannotDuplicateResource(RuntimeResourceID, PackageResourceBuilderError),
399
400    #[error("LZ4 compression error: {0}")]
401    Lz4CompressionError(#[from] lzzzz::Error),
402
403    #[error("Invalid partition id index cannot be greater than 255")]
404    InvalidPartitionIdIndex,
405
406    #[error("Patch id cannot be greater than 255")]
407    InvalidPatchId,
408}
409
410struct OffsetTableResult {
411    offset_table_size: u32,
412    resource_entry_offsets: HashMap<RuntimeResourceID, u64>,
413}
414
415struct MetadataTableResult {
416    metadata_table_size: u32,
417}
418
419/// A writer that xors the data with a predefined key.
420struct XorWriter<'a, W: Write + Seek> {
421    writer: &'a mut W,
422}
423
424impl<W: Write + Seek> Write for XorWriter<'_, W> {
425    fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
426        let str_xor = [0xdc, 0x45, 0xa6, 0x9c, 0xd3, 0x72, 0x4c, 0xab];
427
428        for (index, byte) in buf.iter().enumerate() {
429            let xored_byte = *byte ^ str_xor[index % str_xor.len()];
430            self.writer.write_all(&[xored_byte])?;
431        }
432
433        Ok(buf.len())
434    }
435
436    fn flush(&mut self) -> Result<(), io::Error> {
437        self.writer.flush()
438    }
439}
440
441impl PackageBuilder {
442    /// Creates a new package builder.
443    ///
444    /// # Arguments
445    /// * `chunk_id` - The chunk ID of the package. e.g. chunk0
446    /// * `chunk_type` - The chunk type of the package.
447    pub fn new(chunk_id: u8, chunk_type: ChunkType) -> Self {
448        Self {
449            partition_id: PartitionId {
450                part_type: match chunk_type {
451                    ChunkType::Standard => PartitionType::Standard,
452                    ChunkType::Addon => PartitionType::Addon,
453                },
454                index: chunk_id as usize,
455            },
456            use_legacy_references: false,
457            patch_id: PatchId::Base,
458            resources: IndexMap::new(),
459            unneeded_resources: IndexSet::new(),
460        }
461    }
462
463    /// Creates a new package builder using the given partition id and patch id.
464    ///
465    /// # Arguments
466    /// * `partition_id` - The partition id of the package.
467    /// * `patch_id` - The patch id of the package.
468    pub fn new_with_patch_id(partition_id: PartitionId, patch_id: PatchId) -> Self {
469        Self {
470            partition_id,
471            patch_id,
472            use_legacy_references: false,
473            resources: IndexMap::new(),
474            unneeded_resources: IndexSet::new(),
475        }
476    }
477
478    /// Creates a new package builder by duplicating an existing ResourcePackage.
479    ///
480    /// # Arguments
481    /// * `resource_package` - The ResourcePackage to duplicate.
482    pub fn from_resource_package(
483        resource_package: &ResourcePackage,
484    ) -> Result<Self, PackageBuilderError> {
485        let source = resource_package
486            .source
487            .as_ref()
488            .ok_or(PackageBuilderError::NoSource)?;
489
490        let mut package = Self {
491            partition_id: PartitionId {
492                part_type: match resource_package
493                    .metadata
494                    .as_ref()
495                    .map(|m| m.chunk_type)
496                    .unwrap_or_default()
497                {
498                    ChunkType::Standard => PartitionType::Standard,
499                    ChunkType::Addon => PartitionType::Addon,
500                },
501                index: resource_package
502                    .metadata
503                    .as_ref()
504                    .map(|m| m.chunk_id)
505                    .unwrap_or_default() as usize,
506            },
507            patch_id: match resource_package
508                .metadata
509                .as_ref()
510                .map(|m| m.patch_id)
511                .unwrap_or_default()
512            {
513                0 => PatchId::Base,
514                x => PatchId::Patch(x as usize),
515            },
516            use_legacy_references: false,
517            resources: IndexMap::new(),
518            unneeded_resources: IndexSet::new(),
519        };
520
521        for (rrid, resource) in &resource_package.resources {
522            let mut builder = match source {
523                ResourcePackageSource::File(source_path) => {
524                    PackageResourceBuilder::from_file_at_offset(
525                        *rrid,
526                        &resource.data_type(),
527                        source_path,
528                        resource.entry.data_offset,
529                        resource.header.data_size,
530                        resource.compressed_size(),
531                        resource.is_scrambled(),
532                    )
533                    .map_err(|e| PackageBuilderError::CannotDuplicateResource(*rrid, e))?
534                }
535
536                ResourcePackageSource::Memory(source_data) => {
537                    let read_size = resource
538                        .compressed_size()
539                        .unwrap_or(resource.header.data_size);
540
541                    let start_offset = resource.entry.data_offset as usize;
542                    let end_offset = start_offset + read_size as usize;
543
544                    let decompressed_size = if resource.is_compressed() {
545                        Some(resource.header.data_size)
546                    } else {
547                        None
548                    };
549
550                    PackageResourceBuilder::from_compressed_memory(
551                        *rrid,
552                        &resource.data_type(),
553                        source_data[start_offset..end_offset].to_vec(),
554                        decompressed_size,
555                        resource.is_scrambled(),
556                    )
557                    .map_err(|e| PackageBuilderError::CannotDuplicateResource(*rrid, e))?
558                }
559            };
560
561            builder.with_memory_requirements(
562                resource.system_memory_requirement(),
563                resource.video_memory_requirement(),
564            );
565
566            for (rrid, flags) in resource.references() {
567                builder.with_reference(*rrid, *flags);
568            }
569
570            package.with_resource(builder);
571        }
572
573        for rrid in resource_package.unneeded_resource_ids() {
574            package.with_unneeded_resource(*rrid);
575        }
576
577        Ok(package)
578    }
579
580    /// Sets the partition ID of the package.
581    pub fn with_partition_id(&mut self, partition_id: &PartitionId) -> &mut Self {
582        self.partition_id = partition_id.clone();
583        self
584    }
585
586    /// Sets the patch ID of the package.
587    pub fn with_patch_id(&mut self, patch_id: &PatchId) -> &mut Self {
588        self.patch_id = *patch_id;
589        self
590    }
591
592    /// When this flag is set it will build the reference flags with the legacy format
593    pub fn use_legacy_references(&mut self) -> &mut Self {
594        self.use_legacy_references = true;
595        self
596    }
597
598    /// Adds a resource to the package.
599    ///
600    /// If a resource with the same resource ID already exists, it will be overwritten.
601    ///
602    /// # Arguments
603    /// * `resource` - The resource to add to the package.
604    pub fn with_resource(&mut self, resource: PackageResourceBuilder) -> &mut Self {
605        self.resources.insert(resource.rrid, resource);
606        self
607    }
608
609    /// Adds an unneeded resource to the package.
610    ///
611    /// # Arguments
612    /// * `rrid` - The resource ID of the resource.
613    pub fn with_unneeded_resource(&mut self, rrid: RuntimeResourceID) -> &mut Self {
614        self.unneeded_resources.insert(rrid);
615        self
616    }
617
618    /// Patches data at a given offset and returns to the previous position.
619    fn backpatch<W: Write + Seek, T: BinWrite + WriteEndian>(
620        writer: &mut W,
621        patch_offset: u64,
622        data: &T,
623    ) -> Result<(), PackageBuilderError>
624    where
625        for<'a> T::Args<'a>: Required,
626    {
627        let current_offset = writer
628            .stream_position()
629            .map_err(PackageBuilderError::IoError)?;
630        writer
631            .seek(io::SeekFrom::Start(patch_offset))
632            .map_err(PackageBuilderError::IoError)?;
633        data.write(writer)
634            .map_err(PackageBuilderError::SerializationError)?;
635        writer
636            .seek(io::SeekFrom::Start(current_offset))
637            .map_err(PackageBuilderError::IoError)?;
638        Ok(())
639    }
640
641    /// Writes the offset table to the given writer.
642    fn write_offset_table<W: Write + Seek>(
643        &self,
644        writer: &mut W,
645    ) -> Result<OffsetTableResult, PackageBuilderError> {
646        // We need to keep a map of rrid => offset to patch the data offsets later.
647        let mut resource_entry_offsets = HashMap::new();
648        let offset_table_start = writer
649            .stream_position()
650            .map_err(PackageBuilderError::IoError)?;
651
652        for (rrid, _) in &self.resources {
653            let current_offset = writer
654                .stream_position()
655                .map_err(PackageBuilderError::IoError)?;
656
657            let resource_entry = PackageOffsetInfo {
658                runtime_resource_id: *rrid,
659                data_offset: 0,
660                flags: PackageOffsetFlags::new(),
661            };
662
663            resource_entry
664                .write(writer)
665                .map_err(PackageBuilderError::SerializationError)?;
666            resource_entry_offsets.insert(*rrid, current_offset);
667        }
668
669        // Write the offset table size.
670        let offset_table_end = writer
671            .stream_position()
672            .map_err(PackageBuilderError::IoError)?;
673        let offset_table_size = offset_table_end - offset_table_start;
674
675        if offset_table_size > u32::MAX as u64 {
676            return Err(PackageBuilderError::TooManyResources);
677        }
678
679        Ok(OffsetTableResult {
680            offset_table_size: offset_table_size as u32,
681            resource_entry_offsets,
682        })
683    }
684
685    /// Writes the metadata table to the given writer.
686    fn write_metadata_table<W: Write + Seek>(
687        &self,
688        writer: &mut W,
689        legacy_references: bool,
690    ) -> Result<MetadataTableResult, PackageBuilderError> {
691        let metadata_table_start = writer
692            .stream_position()
693            .map_err(PackageBuilderError::IoError)?;
694
695        for (_, resource) in &self.resources {
696            let metadata_offset = writer
697                .stream_position()
698                .map_err(PackageBuilderError::IoError)?;
699
700            // Write the resource metadata followed by the references table if there are any.
701            // We set the references chunk size to 0, and we'll patch it later.
702            let mut resource_metadata = ResourceHeader {
703                resource_type: resource.resource_type,
704                references_chunk_size: 0,
705                states_chunk_size: 0,
706                data_size: resource.blob.size(),
707                system_memory_requirement: resource.system_memory_requirement,
708                video_memory_requirement: resource.video_memory_requirement,
709                references: Vec::new(),
710            };
711
712            resource_metadata
713                .write(writer)
714                .map_err(PackageBuilderError::SerializationError)?;
715
716            // Write the references table if there are any.
717            if !resource.references.is_empty() {
718                let reference_table_start = writer
719                    .stream_position()
720                    .map_err(PackageBuilderError::IoError)?;
721
722                let reference_count_and_flags = ResourceReferenceCountAndFlags::new()
723                    .with_reference_count(resource.references.len() as u32)
724                    .with_is_new_format(!legacy_references)
725                    .with_always_true(true);
726
727                reference_count_and_flags
728                    .write(writer)
729                    .map_err(PackageBuilderError::SerializationError)?;
730
731                // In legacy mode, we write resource ids first, then flags.
732                // In new mode, we do the opposite. We also use the appropriate version of the flags.
733                if legacy_references {
734                    for (rrid, _) in &resource.references {
735                        rrid.write(writer)
736                            .map_err(PackageBuilderError::SerializationError)?;
737                    }
738
739                    for (_, flags) in &resource.references {
740                        flags
741                            .to_legacy()
742                            .write(writer)
743                            .map_err(PackageBuilderError::SerializationError)?;
744                    }
745                } else {
746                    for (_, flags) in &resource.references {
747                        flags
748                            .to_standard()
749                            .write(writer)
750                            .map_err(PackageBuilderError::SerializationError)?;
751                    }
752
753                    for (rrid, _) in &resource.references {
754                        rrid.write(writer)
755                            .map_err(PackageBuilderError::SerializationError)?;
756                    }
757                }
758
759                let reference_table_end = writer
760                    .stream_position()
761                    .map_err(PackageBuilderError::IoError)?;
762                let reference_table_size = reference_table_end - reference_table_start;
763
764                if reference_table_size > u32::MAX as u64 {
765                    return Err(PackageBuilderError::TooManyReferences);
766                }
767
768                // Calculate the size and patch the metadata.
769                resource_metadata.references_chunk_size = reference_table_size as u32;
770                PackageBuilder::backpatch(writer, metadata_offset, &resource_metadata)?;
771            }
772        }
773
774        // Write the metadata table size.
775        let metadata_table_end = writer
776            .stream_position()
777            .map_err(PackageBuilderError::IoError)?;
778        let metadata_table_size = metadata_table_end - metadata_table_start;
779
780        if metadata_table_size > u32::MAX as u64 {
781            return Err(PackageBuilderError::TooManyResources);
782        }
783
784        Ok(MetadataTableResult {
785            metadata_table_size: metadata_table_size as u32,
786        })
787    }
788
789    /// Builds the package, writing it to the given writer.
790    fn build_internal<W: Write + Seek>(
791        &self,
792        version: PackageVersion,
793        writer: &mut W,
794    ) -> Result<(), PackageBuilderError> {
795        // Perform some basic validation.
796        if !self.unneeded_resources.is_empty() && self.patch_id.is_base() {
797            return Err(PackageBuilderError::UnneededResourcesNotSupported);
798        }
799
800        // First create a base header. We'll fill it and patch it later.
801        let mut header = ResourcePackage {
802            source: None,
803            magic: match version {
804                PackageVersion::RPKGv1 => *b"GKPR",
805                PackageVersion::RPKGv2 => *b"2KPR",
806            },
807            metadata: match version {
808                PackageVersion::RPKGv1 => None,
809                PackageVersion::RPKGv2 => Some(PackageMetadata {
810                    unknown: 1,
811                    chunk_id: self.partition_id.index as u8,
812                    chunk_type: match self.partition_id.part_type {
813                        PartitionType::Addon => ChunkType::Addon,
814                        _ => ChunkType::Standard,
815                    },
816                    patch_id: match self.patch_id {
817                        PatchId::Base => 0,
818                        PatchId::Patch(x) => x as u8,
819                    },
820                    language_tag: *b"xx",
821                }),
822            },
823            header: PackageHeader {
824                file_count: self.resources.len() as u32,
825                offset_table_size: 0,
826                metadata_table_size: 0,
827            },
828            unneeded_resource_count: self.unneeded_resources.len() as u32,
829            unneeded_resources: Some(self.unneeded_resources.iter().copied().collect()),
830            resources: IndexMap::new(),
831        };
832
833        // Write the header and the tables.
834        header
835            .write_args(writer, (self.patch_id.is_patch(),))
836            .map_err(PackageBuilderError::SerializationError)?;
837
838        let offset_table_result = self.write_offset_table(writer)?;
839        let metadata_table_result =
840            self.write_metadata_table(writer, self.use_legacy_references)?;
841
842        // Now that we're done writing the tables, let's patch the header.
843        header.header.offset_table_size = offset_table_result.offset_table_size;
844        header.header.metadata_table_size = metadata_table_result.metadata_table_size;
845        PackageBuilder::backpatch(writer, 0, &header)?;
846
847        // Write the resource data.
848        for (rrid, resource) in &self.resources {
849            let data_offset = writer
850                .stream_position()
851                .map_err(PackageBuilderError::IoError)?;
852
853            let (compressed_size, is_scrambled) = match &resource.blob {
854                PackageResourceBlob::File {
855                    path,
856                    size,
857                    compression_level,
858                    should_scramble,
859                } => {
860                    let mut file = File::open(path).map_err(PackageBuilderError::IoError)?;
861
862                    // Wrap our writer in a XorWriter if we should scramble.
863                    let mut data_writer: Box<dyn Write> = match should_scramble {
864                        true => Box::new(XorWriter { writer }),
865                        false => Box::new(&mut *writer),
866                    };
867
868                    let compressed_size = match compression_level {
869                        Some(level) => {
870                            // TODO: Switch to streaming API.
871                            let mut compressed_buffer =
872                                vec![0; lz4::max_compressed_size(*size as usize)];
873                            let mut decompressed_data = vec![0; *size as usize];
874                            file.read_exact(&mut decompressed_data)
875                                .map_err(PackageBuilderError::IoError)?;
876
877                            let compressed_size = match version {
878                                PackageVersion::RPKGv1 => lz4::compress(
879                                    &decompressed_data,
880                                    &mut compressed_buffer,
881                                    *level,
882                                )?,
883                                PackageVersion::RPKGv2 => lz4_hc::compress(
884                                    &decompressed_data,
885                                    &mut compressed_buffer,
886                                    *level,
887                                )?,
888                            };
889
890                            // Write the compressed data.
891                            data_writer
892                                .write_all(&compressed_buffer[..compressed_size])
893                                .map_err(PackageBuilderError::IoError)?;
894
895                            Some(compressed_size as u32)
896                        }
897
898                        None => {
899                            io::copy(&mut file, &mut data_writer)
900                                .map_err(PackageBuilderError::IoError)?;
901                            None
902                        }
903                    };
904
905                    (compressed_size, *should_scramble)
906                }
907
908                PackageResourceBlob::FileAtOffset {
909                    path,
910                    offset,
911                    size,
912                    compressed_size,
913                    is_scrambled,
914                } => {
915                    let size_to_copy = compressed_size.unwrap_or_else(|| *size);
916
917                    let mut file = File::open(path).map_err(PackageBuilderError::IoError)?;
918                    file.seek(io::SeekFrom::Start(*offset))
919                        .map_err(PackageBuilderError::IoError)?;
920                    io::copy(&mut file.take(size_to_copy as u64), writer)
921                        .map_err(PackageBuilderError::IoError)?;
922
923                    (*compressed_size, *is_scrambled)
924                }
925
926                PackageResourceBlob::CompressedMemory {
927                    data,
928                    decompressed_size,
929                    is_scrambled,
930                } => {
931                    writer
932                        .write_all(data)
933                        .map_err(PackageBuilderError::IoError)?;
934                    let compressed_size = decompressed_size.map(|_| data.len() as u32);
935                    (compressed_size, *is_scrambled)
936                }
937
938                PackageResourceBlob::Memory {
939                    data,
940                    compression_level,
941                    should_scramble,
942                } => {
943                    // Wrap our writer in a XorWriter if we should scramble.
944                    let mut data_writer: Box<dyn Write> = match should_scramble {
945                        true => Box::new(XorWriter { writer }),
946                        false => Box::new(&mut *writer),
947                    };
948
949                    let compressed_size = match compression_level {
950                        Some(level) => {
951                            // TODO: Switch to streaming API.
952                            let mut compressed_buffer =
953                                vec![0; lz4::max_compressed_size(data.len())];
954                            let compressed_size = match version {
955                                PackageVersion::RPKGv1 => {
956                                    lz4::compress(data, &mut compressed_buffer, *level)?
957                                }
958                                PackageVersion::RPKGv2 => {
959                                    lz4_hc::compress(data, &mut compressed_buffer, *level)?
960                                }
961                            };
962
963                            // Write the compressed data.
964                            data_writer
965                                .write_all(&compressed_buffer[..compressed_size])
966                                .map_err(PackageBuilderError::IoError)?;
967
968                            Some(compressed_size as u32)
969                        }
970
971                        None => {
972                            data_writer
973                                .write_all(data)
974                                .map_err(PackageBuilderError::IoError)?;
975                            None
976                        }
977                    };
978
979                    (compressed_size, *should_scramble)
980                }
981            };
982
983            // Patch the offset info.
984            // If the resource is not compressed, we set the compressed size to 0.
985            let final_compressed_size = compressed_size.unwrap_or(0);
986
987            let offset_info = PackageOffsetInfo {
988                runtime_resource_id: *rrid,
989                data_offset,
990                flags: PackageOffsetFlags::new()
991                    .with_compressed_size(final_compressed_size)
992                    .with_is_scrambled(is_scrambled),
993            };
994
995            let patch_offset = offset_table_result.resource_entry_offsets[rrid];
996            PackageBuilder::backpatch(writer, patch_offset, &offset_info)?;
997        }
998
999        Ok(())
1000    }
1001
1002    #[deprecated(since = "1.1.1", note = "use `build_to_file` instead")]
1003    pub fn build(
1004        self,
1005        version: PackageVersion,
1006        output_path: &Path,
1007    ) -> Result<(), PackageBuilderError> {
1008        self.build_to_file(version, output_path)
1009    }
1010
1011    /// Builds the package for the given version and writes it to the given writer.
1012    ///
1013    /// # Arguments
1014    /// * `version` - The version of the package to build.
1015    /// * `writer` - The struct implementing the Write and Seek traits.
1016    pub fn build_to_writer<W: Write + Seek>(self, version: PackageVersion, writer: &mut W) -> Result<(), PackageBuilderError>{
1017        self.build_internal(version, writer)
1018    }
1019    
1020    /// Builds the package for the given version and writes it to the given path.
1021    ///
1022    /// # Arguments
1023    /// * `version` - The version of the package to build.
1024    /// * `output_path` - The path to the output file.
1025    pub fn build_to_file<P: AsRef<Path>>(self, version: PackageVersion, output_path: P) -> Result<(), PackageBuilderError> {
1026        let output_path: &Path = output_path.as_ref();
1027        let output_file = match output_path.is_dir() {
1028            true => output_path.join(self.partition_id.to_filename(self.patch_id)),
1029            false => output_path.to_path_buf(),
1030        };
1031
1032        let file = File::create(output_file).map_err(PackageBuilderError::IoError)?;
1033        let mut writer = BufWriter::new(file);
1034        let result = self.build_internal(version, &mut writer);
1035        writer.flush()?;
1036        result
1037    }
1038
1039    #[deprecated(since = "1.1.1", note = "use `build_to_vec` instead")]
1040    pub fn build_in_memory(self, version: PackageVersion) -> Result<Vec<u8>, PackageBuilderError> {
1041        self.build_to_vec(version)
1042    }
1043
1044    /// Builds the package for the given version and returns it as a byte vector.
1045    ///
1046    /// # Arguments
1047    /// * `version` - The version of the package to build.
1048    pub fn build_to_vec(self, version: PackageVersion) -> Result<Vec<u8>, PackageBuilderError> {
1049        let mut writer = Cursor::new(vec![]);
1050
1051        self.build_internal(version, &mut writer)?;
1052        Ok(writer.into_inner())
1053    }
1054}