1use super::compression::BundleCompression;
7use super::header::BundleHeader;
8use super::types::{AssetBundle, BundleFileInfo, BundleLoadOptions, DirectoryNode};
9use crate::compression::CompressionType;
10use crate::data_view::DataView;
11use crate::error::{BinaryError, Result};
12use crate::reader::{BinaryReader, ByteOrder};
13use crate::shared_bytes::SharedBytes;
14use crate::unity_version::UnityVersion;
15use std::ops::Range;
16
17pub struct BundleParser;
22
23impl BundleParser {
24 pub fn from_bytes(data: Vec<u8>) -> Result<AssetBundle> {
26 Self::from_bytes_with_options(data, BundleLoadOptions::default())
27 }
28
29 pub fn from_slice(data: &[u8]) -> Result<AssetBundle> {
33 Self::from_slice_with_options(data, BundleLoadOptions::default())
34 }
35
36 pub fn from_shared_range(data: SharedBytes, range: Range<usize>) -> Result<AssetBundle> {
38 Self::from_shared_range_with_options(data, range, BundleLoadOptions::default())
39 }
40
41 pub fn from_shared_range_with_options(
43 data: SharedBytes,
44 range: Range<usize>,
45 options: BundleLoadOptions,
46 ) -> Result<AssetBundle> {
47 let view = DataView::from_shared_range(data, range)?;
48 Self::from_view_with_options(view, options)
49 }
50
51 pub fn from_bytes_with_options(
53 data: Vec<u8>,
54 options: BundleLoadOptions,
55 ) -> Result<AssetBundle> {
56 let shared = SharedBytes::from_vec(data);
57 let len = shared.len();
58 Self::from_shared_range_with_options(shared, 0..len, options)
59 }
60
61 pub fn from_slice_with_options(data: &[u8], options: BundleLoadOptions) -> Result<AssetBundle> {
63 let shared = SharedBytes::from_vec(data.to_vec());
66 let len = shared.len();
67 Self::from_shared_range_with_options(shared, 0..len, options)
68 }
69
70 fn from_view_with_options(view: DataView, options: BundleLoadOptions) -> Result<AssetBundle> {
71 let bytes = view.as_bytes();
72 let mut reader = BinaryReader::new(bytes, ByteOrder::Big);
73
74 let header = BundleHeader::from_reader(&mut reader)?;
76
77 if options.validate {
78 header.validate()?;
79 if header.size > bytes.len() as u64 {
80 return Err(BinaryError::invalid_data(format!(
81 "Bundle header size {} exceeds available bytes {}",
82 header.size,
83 bytes.len()
84 )));
85 }
86 }
87
88 let mut bundle = AssetBundle::new_empty(header);
89 if bundle.header.is_legacy() {
90 bundle.set_legacy_source(view.clone());
91 }
92
93 match bundle.header.signature.as_str() {
94 "UnityFS" => {
95 Self::parse_unity_fs(&mut bundle, &view, &mut reader, &options)?;
96 }
97 "UnityWeb" | "UnityRaw" => {
98 Self::parse_legacy(&mut bundle, &mut reader, &options)?;
99 }
100 _ => {
101 return Err(BinaryError::unsupported(format!(
102 "Unsupported bundle format: {}",
103 bundle.header.signature
104 )));
105 }
106 }
107
108 if options.validate {
109 bundle.validate()?;
110 }
111
112 Ok(bundle)
113 }
114
115 fn parse_unity_fs(
117 bundle: &mut AssetBundle,
118 source: &DataView,
119 reader: &mut BinaryReader,
120 options: &BundleLoadOptions,
121 ) -> Result<()> {
122 let block_data_start = Self::read_blocks_info(bundle, reader, options)?;
124
125 if options.decompress_blocks || options.load_assets {
127 let blocks_data = Self::read_blocks(bundle, reader, options)?;
128 Self::parse_files(bundle, blocks_data)?;
129
130 if options.load_assets {
132 Self::load_assets(bundle, options)?;
133 }
134 } else {
135 let start_usize = usize::try_from(block_data_start).map_err(|_| {
136 BinaryError::ResourceLimitExceeded(
137 "UnityFS block data start does not fit in usize".to_string(),
138 )
139 })?;
140 bundle.set_lazy_unityfs_source(
141 source.clone(),
142 start_usize,
143 options.max_memory,
144 options.max_unityfs_block_cache_memory,
145 options.max_compressed_block_size,
146 )?;
147 Self::parse_directory_lazy(bundle, reader)?;
149 }
150
151 Ok(())
152 }
153
154 fn parse_legacy(
156 bundle: &mut AssetBundle,
157 reader: &mut BinaryReader,
158 options: &BundleLoadOptions,
159 ) -> Result<()> {
160 let header_size = bundle.header.header_size() as usize;
162
163 reader.set_position(header_size as u64)?;
165
166 let compressed_size = reader.read_u32()?;
168 let uncompressed_size = reader.read_u32()?;
169 if let Some(max_memory) = options.max_memory
170 && (uncompressed_size as u64) > (max_memory as u64)
171 {
172 return Err(BinaryError::ResourceLimitExceeded(format!(
173 "Legacy bundle directory uncompressed size {} exceeds max_memory {}",
174 uncompressed_size, max_memory
175 )));
176 }
177
178 let skip_bytes = if bundle.header.version >= 2 { 4 } else { 0 };
180 if skip_bytes > 0 {
181 reader.skip_bytes(skip_bytes)?;
182 }
183
184 reader.set_position(header_size as u64)?;
186
187 if let Some(max) = options.max_legacy_directory_compressed_size
189 && (compressed_size as usize) > max
190 {
191 return Err(BinaryError::ResourceLimitExceeded(format!(
192 "Legacy bundle directory compressed size {} exceeds limit {}",
193 compressed_size, max
194 )));
195 }
196 let compressed_data = reader.read_bytes(compressed_size as usize)?;
197 let directory_data = if bundle.header.signature == "UnityWeb" {
198 crate::compression::decompress(
200 &compressed_data,
201 CompressionType::Lzma,
202 uncompressed_size as usize,
203 )
204 .or_else(|_| {
205 crate::compression::decompress(
207 &compressed_data,
208 CompressionType::Lzma,
209 compressed_data.len().saturating_mul(4),
210 )
211 })?
212 } else {
213 compressed_data
215 };
216
217 Self::parse_legacy_directory(bundle, &directory_data, header_size, options)?;
219
220 if options.load_assets {
222 Self::load_assets(bundle, options)?;
223 }
224
225 Ok(())
226 }
227
228 fn read_blocks_info(
230 bundle: &mut AssetBundle,
231 reader: &mut BinaryReader,
232 options: &BundleLoadOptions,
233 ) -> Result<u64> {
234 if bundle.header.version >= 7 {
239 reader.align_to(16)?;
240 } else if Self::should_probe_legacy_alignment(&bundle.header) {
241 let pre_align = reader.position();
242 let pad = (16 - (pre_align % 16)) % 16;
243 if pad != 0 {
244 let align_bytes = reader.read_bytes(pad as usize)?;
245 if align_bytes.iter().any(|&b| b != 0) {
246 reader.set_position(pre_align)?;
247 }
248 }
249 }
250
251 let start = reader.position();
252 let compressed_size = bundle.header.compressed_blocks_info_size as usize;
253
254 if let Some(max) = options.max_compressed_blocks_info_size
255 && compressed_size > max
256 {
257 return Err(BinaryError::ResourceLimitExceeded(format!(
258 "Blocks info compressed size {} exceeds limit {}",
259 compressed_size, max
260 )));
261 }
262
263 let blocks_info_data = if bundle.header.block_info_at_end() {
264 let len = reader.len();
265 if compressed_size > len {
266 return Err(BinaryError::not_enough_data(compressed_size, len));
267 }
268 let pos = (len - compressed_size) as u64;
269 reader.set_position(pos)?;
270 let bytes = reader.read_bytes(compressed_size)?;
271 reader.set_position(start)?;
272 bytes
273 } else {
274 reader.read_bytes(compressed_size)?
275 };
276
277 if let Some(max_blocks_info_size) = options.max_blocks_info_size {
279 let expected = bundle.header.uncompressed_blocks_info_size as usize;
280 if expected > max_blocks_info_size {
281 return Err(BinaryError::ResourceLimitExceeded(format!(
282 "Blocks info uncompressed size {} exceeds limit {}",
283 expected, max_blocks_info_size
284 )));
285 }
286 }
287 let uncompressed_data = BundleCompression::decompress_blocks_info_limited(
288 &bundle.header,
289 &blocks_info_data,
290 options.max_blocks_info_size,
291 )?;
292
293 bundle.blocks =
295 BundleCompression::parse_compression_blocks_limited(&uncompressed_data, options)?;
296
297 BundleCompression::validate_blocks(&bundle.blocks)?;
299
300 let total_uncompressed = bundle.blocks.iter().try_fold(0u64, |acc, b| {
301 acc.checked_add(b.uncompressed_size as u64).ok_or_else(|| {
302 BinaryError::ResourceLimitExceeded(
303 "Total uncompressed bundle data size overflow".to_string(),
304 )
305 })
306 })?;
307 bundle.set_decompressed_len(total_uncompressed);
308
309 Self::parse_directory_from_blocks_info(bundle, &uncompressed_data, options)?;
311
312 if (bundle.header.flags
314 & crate::compression::ArchiveFlags::BLOCK_INFO_NEEDS_PADDING_AT_START)
315 != 0
316 {
317 reader.align_to(16)?;
318 }
319
320 Ok(reader.position())
321 }
322
323 fn should_probe_legacy_alignment(header: &BundleHeader) -> bool {
324 let parsed = match UnityVersion::parse_version(&header.unity_revision)
327 .or_else(|_| UnityVersion::parse_version(&header.unity_version))
328 {
329 Ok(v) => v,
330 Err(_) => return false,
331 };
332 let (major, minor) = (parsed.major, parsed.minor);
333
334 major > 2019 || (major == 2019 && minor >= 4)
336 }
337
338 fn read_blocks(
340 bundle: &AssetBundle,
341 reader: &mut BinaryReader,
342 options: &BundleLoadOptions,
343 ) -> Result<Vec<u8>> {
344 if let Some(limit) = options.max_compressed_block_size {
345 for block in &bundle.blocks {
346 if (block.compressed_size as u64) > (limit as u64) {
347 return Err(BinaryError::ResourceLimitExceeded(format!(
348 "Block compressed size {} exceeds max_compressed_block_size {}",
349 block.compressed_size, limit
350 )));
351 }
352 }
353 }
354 BundleCompression::decompress_data_blocks_limited(
355 &bundle.header,
356 &bundle.blocks,
357 reader,
358 options.max_memory,
359 )
360 }
361
362 fn parse_files(bundle: &mut AssetBundle, blocks_data: Vec<u8>) -> Result<()> {
364 bundle.set_decompressed_data(blocks_data);
366
367 for node in &bundle.nodes {
369 let file_info = BundleFileInfo::new(node.name.clone(), node.offset, node.size);
370 bundle.files.push(file_info);
371 }
372
373 Ok(())
374 }
375
376 fn parse_directory_lazy(_bundle: &mut AssetBundle, _reader: &mut BinaryReader) -> Result<()> {
378 Ok(())
386 }
387
388 fn parse_directory_from_blocks_info(
390 bundle: &mut AssetBundle,
391 blocks_info_data: &[u8],
392 options: &BundleLoadOptions,
393 ) -> Result<()> {
394 let mut reader = BinaryReader::new(blocks_info_data, ByteOrder::Big);
395
396 reader.read_bytes(16)?;
398
399 let block_count_i32 = reader.read_i32()?;
401 if block_count_i32 < 0 {
402 return Err(BinaryError::invalid_data(format!(
403 "Negative compression block count: {}",
404 block_count_i32
405 )));
406 }
407 let block_count: usize = block_count_i32 as usize;
408 if block_count > options.max_blocks {
409 return Err(BinaryError::ResourceLimitExceeded(format!(
410 "Compression block count {} exceeds limit {}",
411 block_count, options.max_blocks
412 )));
413 }
414 let bytes_to_skip = block_count
415 .checked_mul(10)
416 .ok_or_else(|| BinaryError::invalid_data("Compression block table size overflow"))?;
417 reader.skip_bytes(bytes_to_skip)?;
418
419 let node_count_i32 = reader.read_i32()?;
421 if node_count_i32 < 0 {
422 return Err(BinaryError::invalid_data(format!(
423 "Negative directory node count: {}",
424 node_count_i32
425 )));
426 }
427 let node_count: usize = node_count_i32 as usize;
428 if node_count > options.max_nodes {
429 return Err(BinaryError::ResourceLimitExceeded(format!(
430 "Directory node count {} exceeds limit {}",
431 node_count, options.max_nodes
432 )));
433 }
434
435 let total_uncompressed: u64 = bundle
436 .blocks
437 .iter()
438 .map(|b| b.uncompressed_size as u64)
439 .sum();
440
441 for _i in 0..node_count {
443 let offset_i64 = reader.read_i64()?; if offset_i64 < 0 {
445 return Err(BinaryError::invalid_data(format!(
446 "Negative directory node offset: {}",
447 offset_i64
448 )));
449 }
450 let size_i64 = reader.read_i64()?; if size_i64 < 0 {
452 return Err(BinaryError::invalid_data(format!(
453 "Negative directory node size: {}",
454 size_i64
455 )));
456 }
457 let offset = offset_i64 as u64;
458 let size = size_i64 as u64;
459 let end = offset
460 .checked_add(size)
461 .ok_or_else(|| BinaryError::invalid_data("Directory node offset+size overflow"))?;
462 if end > total_uncompressed {
463 return Err(BinaryError::invalid_data(format!(
464 "Directory node exceeds decompressed data: end {} > {}",
465 end, total_uncompressed
466 )));
467 }
468 let flags = reader.read_u32()?;
469 let name = reader.read_cstring()?;
470
471 let node = DirectoryNode::new(name, offset, size, flags);
472 bundle.nodes.push(node);
473 }
474
475 Ok(())
476 }
477
478 #[allow(dead_code)]
480 fn parse_directory_from_data(bundle: &mut AssetBundle, data: &[u8]) -> Result<()> {
481 let mut reader = BinaryReader::new(data, ByteOrder::Big);
482
483 reader.set_position(0)?;
486
487 let node_count_i32 = reader.read_i32()?;
489 if node_count_i32 < 0 {
490 return Err(BinaryError::invalid_data(format!(
491 "Negative directory node count: {}",
492 node_count_i32
493 )));
494 }
495 let node_count = node_count_i32 as usize;
496
497 for _ in 0..node_count {
499 let offset = reader.read_u64()?;
500 let size = reader.read_u64()?;
501 let flags = reader.read_u32()?;
502 let name = reader.read_cstring()?;
503
504 let node = DirectoryNode::new(name, offset, size, flags);
505 bundle.nodes.push(node);
506 }
507
508 Ok(())
509 }
510
511 fn parse_legacy_directory(
513 bundle: &mut AssetBundle,
514 directory_data: &[u8],
515 header_size: usize,
516 options: &BundleLoadOptions,
517 ) -> Result<()> {
518 let mut dir_reader = BinaryReader::new(directory_data, ByteOrder::Big);
519 dir_reader.set_position(header_size as u64)?; let file_count_i32 = dir_reader.read_i32()?;
523 if file_count_i32 < 0 {
524 return Err(BinaryError::invalid_data(format!(
525 "Negative legacy bundle file count: {}",
526 file_count_i32
527 )));
528 }
529 let file_count: usize = file_count_i32 as usize;
530 if file_count > options.max_nodes {
531 return Err(BinaryError::ResourceLimitExceeded(format!(
532 "Legacy bundle file count {} exceeds limit {}",
533 file_count, options.max_nodes
534 )));
535 }
536
537 for _ in 0..file_count {
539 let name = dir_reader.read_cstring()?;
540 let offset = dir_reader.read_u32()? as u64;
541 let size = dir_reader.read_u32()? as u64;
542
543 let file_info = BundleFileInfo::new(name.clone(), offset, size);
544 bundle.files.push(file_info);
545
546 let node = DirectoryNode::new(name, offset, size, 0x4); bundle.nodes.push(node);
549 }
550
551 Ok(())
552 }
553
554 fn load_assets(bundle: &mut AssetBundle, options: &BundleLoadOptions) -> Result<()> {
556 let (backing, base_offset, visible_len) = if bundle.header.is_unity_fs() {
557 let backing = crate::shared_bytes::SharedBytes::from_arc(bundle.data_arc()?);
558 let visible_len = backing.len() as u64;
559 (backing, 0usize, visible_len)
560 } else {
561 let view = bundle.legacy_source().ok_or_else(|| {
562 BinaryError::invalid_data("Legacy bundle source is not available")
563 })?;
564 let visible_len = view.len() as u64;
565 (view.backing_shared(), view.base_offset(), visible_len)
566 };
567
568 let nodes = bundle.nodes.clone();
570
571 for node in &nodes {
572 if !node.is_file() {
573 continue;
574 }
575
576 if node.name.ends_with(".resS") || node.name.ends_with(".resource") {
578 continue;
579 }
580
581 let end = node.offset.saturating_add(node.size);
582 if end > visible_len {
583 return Err(BinaryError::invalid_data(format!(
584 "Bundle node '{}' exceeds decompressed data: end {} > {}",
585 node.name, end, visible_len
586 )));
587 }
588
589 if let Some(max_memory) = options.max_memory
590 && node.size > max_memory as u64
591 {
592 return Err(BinaryError::ResourceLimitExceeded(format!(
593 "Bundle node '{}' size {} exceeds max_memory {}",
594 node.name, node.size, max_memory
595 )));
596 }
597
598 let start = usize::try_from(node.offset).map_err(|_| {
599 BinaryError::ResourceLimitExceeded(format!(
600 "Bundle node '{}' offset {} does not fit in usize",
601 node.name, node.offset
602 ))
603 })?;
604 let end = usize::try_from(end).map_err(|_| {
605 BinaryError::ResourceLimitExceeded(format!(
606 "Bundle node '{}' end {} does not fit in usize",
607 node.name, end
608 ))
609 })?;
610
611 let abs_start = base_offset.checked_add(start).ok_or_else(|| {
612 BinaryError::ResourceLimitExceeded(format!(
613 "Bundle node '{}' absolute start overflow",
614 node.name
615 ))
616 })?;
617 let abs_end = base_offset.checked_add(end).ok_or_else(|| {
618 BinaryError::ResourceLimitExceeded(format!(
619 "Bundle node '{}' absolute end overflow",
620 node.name
621 ))
622 })?;
623
624 if let Ok(serialized_file) = crate::asset::SerializedFileParser::from_shared_range(
626 backing.clone(),
627 abs_start..abs_end,
628 ) {
629 bundle.assets.push(serialized_file);
630 bundle.asset_names.push(node.name.clone());
631 }
632 }
633
634 Ok(())
635 }
636
637 pub fn estimate_complexity(data: &[u8]) -> Result<ParsingComplexity> {
639 let mut reader = BinaryReader::new(data, ByteOrder::Big);
640 let header = BundleHeader::from_reader(&mut reader)?;
641
642 let complexity = match header.signature.as_str() {
643 "UnityFS" => {
644 let compression_type = header.compression_type()?;
645 let has_compression = compression_type != CompressionType::None;
646
647 ParsingComplexity {
648 format: "UnityFS".to_string(),
649 estimated_time: if has_compression { "Medium" } else { "Fast" }.to_string(),
650 memory_usage: header.size,
651 has_compression,
652 block_count: 0, }
654 }
655 "UnityWeb" | "UnityRaw" => ParsingComplexity {
656 format: header.signature.clone(),
657 estimated_time: "Fast".to_string(),
658 memory_usage: header.size,
659 has_compression: header.signature == "UnityWeb",
660 block_count: 1,
661 },
662 _ => {
663 return Err(BinaryError::unsupported(format!(
664 "Unknown bundle format: {}",
665 header.signature
666 )));
667 }
668 };
669
670 Ok(complexity)
671 }
672}
673
674#[derive(Debug, Clone)]
676pub struct ParsingComplexity {
677 pub format: String,
678 pub estimated_time: String,
679 pub memory_usage: u64,
680 pub has_compression: bool,
681 pub block_count: usize,
682}
683
684#[cfg(test)]
685mod tests {
686 use super::*;
687
688 #[test]
689 fn test_parser_creation() {
690 let _dummy = 1 + 1;
693 assert_eq!(_dummy, 2);
694 }
695
696 #[test]
697 fn load_assets_rejects_out_of_bounds_node() {
698 let header = BundleHeader {
699 signature: "UnityFS".to_string(),
700 ..Default::default()
701 };
702 let mut bundle = AssetBundle::new(header, vec![0u8; 8]);
703 bundle
704 .nodes
705 .push(DirectoryNode::new("a.assets".to_string(), 1024, 4, 0x4));
706
707 let err =
708 BundleParser::load_assets(&mut bundle, &BundleLoadOptions::default()).unwrap_err();
709 assert!(matches!(err, BinaryError::InvalidData(_)));
710 }
711}