1use super::header::BundleHeader;
6use crate::asset::Asset;
7use crate::compression::CompressionBlock;
8use crate::data_view::DataView;
9use crate::error::{BinaryError, Result};
10use crate::reader::{BinaryReader, ByteOrder};
11use serde::{Deserialize, Serialize};
12use std::collections::VecDeque;
13use std::sync::Arc;
14use std::sync::Mutex;
15use std::sync::OnceLock;
16
17#[derive(Debug)]
18struct UnityFsBlockCache {
19 source: DataView,
20 block_data_start: usize,
21 max_memory: Option<usize>,
22 max_block_cache_memory: Option<usize>,
23 max_compressed_block_size: Option<usize>,
24 compressed_starts: Vec<u64>,
25 uncompressed_starts: Vec<u64>,
26 cached: Vec<Option<Arc<[u8]>>>,
27 cached_bytes: usize,
28 cached_blocks: usize,
29 tick: u64,
30 last_tick: Vec<u64>,
31 lru: VecDeque<(usize, u64)>,
32}
33
34#[derive(Debug, Clone)]
35struct LazyDecompress {
36 source: DataView,
37 block_data_start: usize,
38 max_memory: Option<usize>,
39 max_compressed_block_size: Option<usize>,
40}
41
42#[derive(Debug, Clone, Serialize, Deserialize, Default)]
46pub struct BundleFileInfo {
47 pub offset: u64,
49 pub size: u64,
51 pub name: String,
53}
54
55impl BundleFileInfo {
56 pub fn new(name: String, offset: u64, size: u64) -> Self {
58 Self { name, offset, size }
59 }
60
61 pub fn is_valid(&self) -> bool {
63 !self.name.is_empty() && self.size > 0
64 }
65
66 pub fn end_offset(&self) -> u64 {
68 self.offset.saturating_add(self.size)
69 }
70}
71
72#[derive(Debug, Clone, Serialize, Deserialize, Default)]
77pub struct DirectoryNode {
78 pub name: String,
80 pub offset: u64,
82 pub size: u64,
84 pub flags: u32,
86}
87
88impl DirectoryNode {
89 pub fn new(name: String, offset: u64, size: u64, flags: u32) -> Self {
91 Self {
92 name,
93 offset,
94 size,
95 flags,
96 }
97 }
98
99 pub fn is_file(&self) -> bool {
101 (self.flags & 0x4) != 0
103 }
104
105 pub fn is_directory(&self) -> bool {
107 !self.is_file()
108 }
109
110 pub fn is_compressed(&self) -> bool {
112 (self.flags & 0x2) != 0
113 }
114
115 pub fn end_offset(&self) -> u64 {
117 self.offset.saturating_add(self.size)
118 }
119}
120
121#[derive(Debug)]
126pub struct AssetBundle {
127 pub header: BundleHeader,
129 pub blocks: Vec<CompressionBlock>,
131 pub nodes: Vec<DirectoryNode>,
133 pub files: Vec<BundleFileInfo>,
135 pub assets: Vec<Asset>,
137 pub asset_names: Vec<String>,
139 legacy_source: Option<DataView>,
141 decompressed: OnceLock<Arc<[u8]>>,
143 decompress_lock: Mutex<()>,
144 lazy: Mutex<Option<LazyDecompress>>,
145 unityfs_cache: Mutex<Option<UnityFsBlockCache>>,
146 decompressed_len: u64,
147}
148
149impl AssetBundle {
150 pub fn new(header: BundleHeader, data: Vec<u8>) -> Self {
152 let decompressed_len = data.len() as u64;
153 let decompressed: Arc<[u8]> = data.into();
154 let lock = OnceLock::new();
155 let _ = lock.set(decompressed);
156 Self {
157 header,
158 blocks: Vec::new(),
159 nodes: Vec::new(),
160 files: Vec::new(),
161 assets: Vec::new(),
162 asset_names: Vec::new(),
163 legacy_source: None,
164 decompressed: lock,
165 decompress_lock: Mutex::new(()),
166 lazy: Mutex::new(None),
167 unityfs_cache: Mutex::new(None),
168 decompressed_len,
169 }
170 }
171
172 pub(crate) fn new_empty(header: BundleHeader) -> Self {
173 Self {
174 header,
175 blocks: Vec::new(),
176 nodes: Vec::new(),
177 files: Vec::new(),
178 assets: Vec::new(),
179 asset_names: Vec::new(),
180 legacy_source: None,
181 decompressed: OnceLock::new(),
182 decompress_lock: Mutex::new(()),
183 lazy: Mutex::new(None),
184 unityfs_cache: Mutex::new(None),
185 decompressed_len: 0,
186 }
187 }
188
189 pub(crate) fn set_decompressed_len(&mut self, len: u64) {
190 self.decompressed_len = len;
191 }
192
193 pub(crate) fn set_legacy_source(&mut self, source: DataView) {
194 self.legacy_source = Some(source);
195 }
196
197 pub(crate) fn legacy_source(&self) -> Option<&DataView> {
198 self.legacy_source.as_ref()
199 }
200
201 pub(crate) fn set_lazy_unityfs_source(
202 &mut self,
203 source: DataView,
204 block_data_start: usize,
205 max_memory: Option<usize>,
206 max_block_cache_memory: Option<usize>,
207 max_compressed_block_size: Option<usize>,
208 ) -> Result<()> {
209 if block_data_start > source.len() {
210 return Err(BinaryError::invalid_data(format!(
211 "UnityFS block data start {} exceeds available bytes {}",
212 block_data_start,
213 source.len()
214 )));
215 }
216 let available_compressed = (source.len() - block_data_start) as u64;
217
218 let mut guard = self.lazy.lock().unwrap();
219 *guard = Some(LazyDecompress {
220 source,
221 block_data_start,
222 max_memory,
223 max_compressed_block_size,
224 });
225
226 let mut compressed_starts = Vec::with_capacity(self.blocks.len());
227 let mut uncompressed_starts = Vec::with_capacity(self.blocks.len());
228 let mut comp_cursor: u64 = 0;
229 let mut uncomp_cursor: u64 = 0;
230 for block in &self.blocks {
231 if let Some(limit) = max_compressed_block_size
232 && (block.compressed_size as u64) > (limit as u64)
233 {
234 return Err(BinaryError::ResourceLimitExceeded(format!(
235 "Block compressed size {} exceeds max_compressed_block_size {}",
236 block.compressed_size, limit
237 )));
238 }
239 compressed_starts.push(comp_cursor);
240 uncompressed_starts.push(uncomp_cursor);
241 comp_cursor = comp_cursor
242 .checked_add(block.compressed_size as u64)
243 .ok_or_else(|| BinaryError::invalid_data("Total compressed size overflow"))?;
244 uncomp_cursor = uncomp_cursor
245 .checked_add(block.uncompressed_size as u64)
246 .ok_or_else(|| BinaryError::invalid_data("Total uncompressed size overflow"))?;
247 }
248 if comp_cursor > available_compressed {
249 return Err(BinaryError::invalid_data(format!(
250 "Total compressed block bytes {} exceeds available bytes {}",
251 comp_cursor, available_compressed
252 )));
253 }
254
255 let mut cache_guard = self.unityfs_cache.lock().unwrap();
256 *cache_guard = Some(UnityFsBlockCache {
257 source: guard.as_ref().unwrap().source.clone(),
258 block_data_start,
259 max_memory,
260 max_block_cache_memory,
261 max_compressed_block_size,
262 compressed_starts,
263 uncompressed_starts,
264 cached: std::iter::repeat_with(|| None)
265 .take(self.blocks.len())
266 .collect(),
267 cached_bytes: 0,
268 cached_blocks: 0,
269 tick: 0,
270 last_tick: vec![0; self.blocks.len()],
271 lru: VecDeque::new(),
272 });
273
274 Ok(())
275 }
276
277 pub(crate) fn set_decompressed_data(&mut self, data: Vec<u8>) {
278 self.decompressed_len = data.len() as u64;
279 let arc: Arc<[u8]> = data.into();
280 let _ = self.decompressed.set(arc);
281 let mut guard = self.lazy.lock().unwrap();
282 *guard = None;
283 let mut cache_guard = self.unityfs_cache.lock().unwrap();
284 *cache_guard = None;
285 }
286
287 fn extract_range_unityfs(&self, offset: u64, size: u64) -> Result<Vec<u8>> {
288 let end = offset
289 .checked_add(size)
290 .ok_or_else(|| BinaryError::invalid_data("Range offset+size overflow"))?;
291 if end > self.decompressed_len {
292 return Err(BinaryError::invalid_data(
293 "Requested range exceeds decompressed bundle data",
294 ));
295 }
296 let len_usize = usize::try_from(size).map_err(|_| {
297 BinaryError::ResourceLimitExceeded("Requested range does not fit in usize".to_string())
298 })?;
299
300 let mut cache_guard = self.unityfs_cache.lock().unwrap();
301 let cache = cache_guard.as_mut().ok_or_else(|| {
302 BinaryError::invalid_data("Bundle data is not available (no UnityFS lazy cache)")
303 })?;
304
305 if let Some(limit) = cache.max_memory
306 && size > limit as u64
307 {
308 return Err(BinaryError::ResourceLimitExceeded(format!(
309 "Requested range size {} exceeds max_memory {}",
310 size, limit
311 )));
312 }
313
314 let mut out = vec![0u8; len_usize];
315
316 let mut copied = 0usize;
317
318 for (idx, block) in self.blocks.iter().enumerate() {
319 let block_start = cache.uncompressed_starts[idx];
320 let block_end = block_start
321 .checked_add(block.uncompressed_size as u64)
322 .ok_or_else(|| BinaryError::invalid_data("Block uncompressed range overflow"))?;
323
324 if block_end <= offset || block_start >= end {
325 continue;
326 }
327
328 if cache.cached[idx].is_none() {
329 if let Some(limit) = cache.max_memory
330 && (block.uncompressed_size as usize) > limit
331 {
332 return Err(BinaryError::ResourceLimitExceeded(format!(
333 "Block uncompressed size {} exceeds max_memory {}",
334 block.uncompressed_size, limit
335 )));
336 }
337 if let Some(limit) = cache.max_block_cache_memory
338 && (block.uncompressed_size as usize) > limit
339 {
340 return Err(BinaryError::ResourceLimitExceeded(format!(
341 "Block uncompressed size {} exceeds max_unityfs_block_cache_memory {}",
342 block.uncompressed_size, limit
343 )));
344 }
345 if let Some(limit) = cache.max_compressed_block_size
346 && (block.compressed_size as usize) > limit
347 {
348 return Err(BinaryError::ResourceLimitExceeded(format!(
349 "Block compressed size {} exceeds max_compressed_block_size {}",
350 block.compressed_size, limit
351 )));
352 }
353
354 let mut reader = BinaryReader::new(cache.source.as_bytes(), ByteOrder::Big);
355 let comp_start = cache.compressed_starts[idx]
356 .checked_add(cache.block_data_start as u64)
357 .ok_or_else(|| BinaryError::invalid_data("Block compressed start overflow"))?;
358 reader.set_position(comp_start)?;
359 let compressed = reader.read_bytes(block.compressed_size as usize)?;
360 let decompressed = block.decompress(&compressed)?;
361 let arc: Arc<[u8]> = decompressed.into();
362 let arc_len = arc.len();
363 cache.cached[idx] = Some(arc);
364 cache.cached_bytes = cache.cached_bytes.checked_add(arc_len).ok_or_else(|| {
365 BinaryError::ResourceLimitExceeded(
366 "UnityFS block cache size overflow".to_string(),
367 )
368 })?;
369 cache.cached_blocks = cache.cached_blocks.saturating_add(1);
370 }
371
372 cache.tick = cache.tick.wrapping_add(1);
373 cache.last_tick[idx] = cache.tick;
374 cache.lru.push_back((idx, cache.tick));
375
376 if let Some(limit) = cache.max_block_cache_memory {
377 while cache.cached_bytes > limit {
378 let Some((evict_idx, evict_tick)) = cache.lru.pop_front() else {
379 break;
380 };
381 if cache.last_tick[evict_idx] != evict_tick {
382 continue;
383 }
384 if let Some(data) = cache.cached[evict_idx].take() {
385 cache.cached_bytes = cache.cached_bytes.saturating_sub(data.len());
386 cache.cached_blocks = cache.cached_blocks.saturating_sub(1);
387 }
388 }
389
390 if cache.cached_bytes > limit {
391 return Err(BinaryError::ResourceLimitExceeded(format!(
392 "UnityFS block cache memory {} exceeds max_unityfs_block_cache_memory {}",
393 cache.cached_bytes, limit
394 )));
395 }
396 }
397
398 let data = cache.cached[idx]
399 .as_ref()
400 .ok_or_else(|| BinaryError::generic("Failed to materialize block cache"))?;
401
402 let copy_start = std::cmp::max(offset, block_start);
403 let copy_end = std::cmp::min(end, block_end);
404 let src_start = usize::try_from(copy_start - block_start).map_err(|_| {
405 BinaryError::ResourceLimitExceeded(
406 "Block-relative start does not fit in usize".to_string(),
407 )
408 })?;
409 let src_end = usize::try_from(copy_end - block_start).map_err(|_| {
410 BinaryError::ResourceLimitExceeded(
411 "Block-relative end does not fit in usize".to_string(),
412 )
413 })?;
414 let dst_start = usize::try_from(copy_start - offset).map_err(|_| {
415 BinaryError::ResourceLimitExceeded(
416 "Output-relative start does not fit in usize".to_string(),
417 )
418 })?;
419 let dst_end = dst_start + (src_end - src_start);
420
421 out[dst_start..dst_end].copy_from_slice(&data[src_start..src_end]);
422 copied += src_end - src_start;
423 if copied == len_usize {
424 break;
425 }
426 }
427
428 if copied != len_usize {
429 return Err(BinaryError::invalid_data(
430 "Failed to extract full range from UnityFS blocks",
431 ));
432 }
433
434 Ok(out)
435 }
436
437 pub fn data_checked(&self) -> Result<&[u8]> {
439 if let Some(bytes) = self.decompressed.get() {
440 return Ok(bytes.as_ref());
441 }
442
443 if self.header.is_legacy() {
444 return self
445 .legacy_source
446 .as_ref()
447 .map(|v| v.as_bytes())
448 .ok_or_else(|| BinaryError::invalid_data("Legacy bundle source is not available"));
449 }
450
451 let _guard = self.decompress_lock.lock().unwrap();
452 if let Some(bytes) = self.decompressed.get() {
453 return Ok(bytes.as_ref());
454 }
455
456 let lazy = self.lazy.lock().unwrap().clone().ok_or_else(|| {
457 BinaryError::invalid_data(
458 "Bundle data is not available (not decompressed and no source)",
459 )
460 })?;
461
462 if let Some(limit) = lazy.max_compressed_block_size {
463 for block in &self.blocks {
464 if (block.compressed_size as u64) > (limit as u64) {
465 return Err(BinaryError::ResourceLimitExceeded(format!(
466 "Block compressed size {} exceeds max_compressed_block_size {}",
467 block.compressed_size, limit
468 )));
469 }
470 }
471 }
472
473 let mut reader = BinaryReader::new(lazy.source.as_bytes(), ByteOrder::Big);
474 reader.set_position(lazy.block_data_start as u64)?;
475 let data = super::compression::BundleCompression::decompress_data_blocks_limited(
476 &self.header,
477 &self.blocks,
478 &mut reader,
479 lazy.max_memory,
480 )?;
481 let arc: Arc<[u8]> = data.into();
482 let _ = self.decompressed.set(arc);
483 let mut cache_guard = self.unityfs_cache.lock().unwrap();
484 *cache_guard = None;
485
486 Ok(self
487 .decompressed
488 .get()
489 .ok_or_else(|| BinaryError::generic("Failed to initialize decompressed bundle data"))?
490 .as_ref())
491 }
492
493 pub fn data(&self) -> &[u8] {
495 self.decompressed
496 .get()
497 .map(|v| v.as_ref())
498 .or_else(|| self.legacy_source.as_ref().map(|v| v.as_bytes()))
499 .unwrap_or(&[])
500 }
501
502 pub fn data_arc(&self) -> Result<Arc<[u8]>> {
504 let _ = self.data_checked()?;
505 self.decompressed
506 .get()
507 .cloned()
508 .ok_or_else(|| BinaryError::generic("Decompressed bundle data missing"))
509 }
510
511 pub fn size(&self) -> u64 {
513 if let Some(bytes) = self.decompressed.get() {
514 bytes.len() as u64
515 } else if self.header.is_legacy() {
516 self.legacy_source
517 .as_ref()
518 .map(|v| v.len() as u64)
519 .unwrap_or(0)
520 } else {
521 self.decompressed_len
522 }
523 }
524
525 pub fn is_compressed(&self) -> bool {
527 !self.blocks.is_empty()
528 && self.blocks.iter().any(|block| {
529 block
530 .compression_type()
531 .unwrap_or(crate::compression::CompressionType::None)
532 != crate::compression::CompressionType::None
533 })
534 }
535
536 pub fn file_count(&self) -> usize {
538 self.files.len()
539 }
540
541 pub fn asset_count(&self) -> usize {
543 self.assets.len()
544 }
545
546 pub fn find_file(&self, name: &str) -> Option<&BundleFileInfo> {
548 self.files.iter().find(|file| file.name == name)
549 }
550
551 pub fn find_node(&self, name: &str) -> Option<&DirectoryNode> {
553 self.nodes.iter().find(|node| node.name == name)
554 }
555
556 pub fn file_names(&self) -> Vec<&str> {
558 self.files.iter().map(|file| file.name.as_str()).collect()
559 }
560
561 pub fn node_names(&self) -> Vec<&str> {
563 self.nodes.iter().map(|node| node.name.as_str()).collect()
564 }
565
566 pub fn extract_file_data(&self, file: &BundleFileInfo) -> crate::error::Result<Vec<u8>> {
568 if self.decompressed.get().is_some() {
569 let bytes = self.extract_file_slice(file)?;
570 return Ok(bytes.to_vec());
571 }
572
573 if self.header.is_legacy() {
574 let bytes = self.extract_file_slice(file)?;
575 return Ok(bytes.to_vec());
576 }
577
578 self.extract_range_unityfs(file.offset, file.size)
579 }
580
581 pub fn extract_file_slice(&self, file: &BundleFileInfo) -> crate::error::Result<&[u8]> {
582 let end_u64 = file
583 .offset
584 .checked_add(file.size)
585 .ok_or_else(|| crate::error::BinaryError::invalid_data("File offset+size overflow"))?;
586 let data = self.data_checked()?;
587 if end_u64 > data.len() as u64 {
588 return Err(crate::error::BinaryError::invalid_data(
589 "File offset/size exceeds bundle data",
590 ));
591 }
592
593 let start = usize::try_from(file.offset).map_err(|_| {
594 crate::error::BinaryError::ResourceLimitExceeded(
595 "File offset does not fit in usize".to_string(),
596 )
597 })?;
598 let end = usize::try_from(end_u64).map_err(|_| {
599 crate::error::BinaryError::ResourceLimitExceeded(
600 "File end offset does not fit in usize".to_string(),
601 )
602 })?;
603 if start > end {
604 return Err(crate::error::BinaryError::invalid_data(
605 "File slice start exceeds end",
606 ));
607 }
608 Ok(&data[start..end])
609 }
610
611 pub fn extract_node_data(&self, node: &DirectoryNode) -> crate::error::Result<Vec<u8>> {
613 if self.decompressed.get().is_some() {
614 let bytes = self.extract_node_slice(node)?;
615 return Ok(bytes.to_vec());
616 }
617
618 if self.header.is_legacy() {
619 let bytes = self.extract_node_slice(node)?;
620 return Ok(bytes.to_vec());
621 }
622
623 self.extract_range_unityfs(node.offset, node.size)
624 }
625
626 pub fn extract_node_slice(&self, node: &DirectoryNode) -> crate::error::Result<&[u8]> {
627 let end_u64 = node
628 .offset
629 .checked_add(node.size)
630 .ok_or_else(|| crate::error::BinaryError::invalid_data("Node offset+size overflow"))?;
631 let data = self.data_checked()?;
632 if end_u64 > data.len() as u64 {
633 return Err(crate::error::BinaryError::invalid_data(
634 "Node offset/size exceeds bundle data",
635 ));
636 }
637
638 let start = usize::try_from(node.offset).map_err(|_| {
639 crate::error::BinaryError::ResourceLimitExceeded(
640 "Node offset does not fit in usize".to_string(),
641 )
642 })?;
643 let end = usize::try_from(end_u64).map_err(|_| {
644 crate::error::BinaryError::ResourceLimitExceeded(
645 "Node end offset does not fit in usize".to_string(),
646 )
647 })?;
648 if start > end {
649 return Err(crate::error::BinaryError::invalid_data(
650 "Node slice start exceeds end",
651 ));
652 }
653 Ok(&data[start..end])
654 }
655
656 pub fn statistics(&self) -> BundleStatistics {
658 let total_compressed_size: u64 = self.blocks.iter().map(|b| b.compressed_size as u64).sum();
659 let total_uncompressed_size: u64 =
660 self.blocks.iter().map(|b| b.uncompressed_size as u64).sum();
661
662 BundleStatistics {
663 total_size: self.size(),
664 header_size: self.header.header_size(),
665 compressed_size: total_compressed_size,
666 uncompressed_size: total_uncompressed_size,
667 compression_ratio: if total_uncompressed_size > 0 {
668 total_compressed_size as f64 / total_uncompressed_size as f64
669 } else {
670 1.0
671 },
672 file_count: self.file_count(),
673 asset_count: self.asset_count(),
674 block_count: self.blocks.len(),
675 node_count: self.nodes.len(),
676 }
677 }
678
679 pub fn validate(&self) -> crate::error::Result<()> {
681 self.header.validate()?;
683
684 for file in &self.files {
686 if file.offset.checked_add(file.size).is_none() {
687 return Err(crate::error::BinaryError::invalid_data(format!(
688 "File '{}' offset+size overflow",
689 file.name
690 )));
691 }
692 if file.end_offset() > self.size() {
693 return Err(crate::error::BinaryError::invalid_data(format!(
694 "File '{}' exceeds bundle size",
695 file.name
696 )));
697 }
698 }
699
700 for node in &self.nodes {
702 if node.offset.checked_add(node.size).is_none() {
703 return Err(crate::error::BinaryError::invalid_data(format!(
704 "Node '{}' offset+size overflow",
705 node.name
706 )));
707 }
708 if node.end_offset() > self.size() {
709 return Err(crate::error::BinaryError::invalid_data(format!(
710 "Node '{}' exceeds bundle size",
711 node.name
712 )));
713 }
714 }
715
716 Ok(())
717 }
718}
719
720#[cfg(test)]
721mod tests {
722 use super::*;
723 use crate::compression::CompressionBlock;
724 use crate::data_view::DataView;
725 use crate::shared_bytes::SharedBytes;
726
727 #[test]
728 fn unityfs_extract_node_data_is_lazy_and_supports_cross_block_ranges() {
729 let header = BundleHeader {
730 signature: "UnityFS".to_string(),
731 ..Default::default()
732 };
733
734 let mut bundle = AssetBundle::new_empty(header);
735 bundle.blocks = vec![
736 CompressionBlock::new(5, 5, 0),
737 CompressionBlock::new(5, 5, 0),
738 ];
739 bundle.set_decompressed_len(10);
740
741 let bytes: Vec<u8> = (0u8..10u8).collect();
742 let view = DataView::from_shared(SharedBytes::from_vec(bytes));
743 bundle
744 .set_lazy_unityfs_source(view, 0, None, None, None)
745 .unwrap();
746
747 let node = DirectoryNode::new("test.bin".to_string(), 3, 6, 0x4);
748 let out = bundle.extract_node_data(&node).unwrap();
749 assert_eq!(out, vec![3, 4, 5, 6, 7, 8]);
750
751 assert!(bundle.decompressed.get().is_none());
753 assert!(bundle.data().is_empty());
754 }
755}
756
757#[derive(Debug, Clone, Serialize, Deserialize)]
759pub struct BundleStatistics {
760 pub total_size: u64,
761 pub header_size: u64,
762 pub compressed_size: u64,
763 pub uncompressed_size: u64,
764 pub compression_ratio: f64,
765 pub file_count: usize,
766 pub asset_count: usize,
767 pub block_count: usize,
768 pub node_count: usize,
769}
770
771#[derive(Debug, Clone)]
773pub struct BundleLoadOptions {
774 pub load_assets: bool,
776 pub decompress_blocks: bool,
778 pub validate: bool,
780 pub max_memory: Option<usize>,
782 pub max_unityfs_block_cache_memory: Option<usize>,
789 pub max_compressed_blocks_info_size: Option<usize>,
793 pub max_blocks_info_size: Option<usize>,
795 pub max_legacy_directory_compressed_size: Option<usize>,
799 pub max_compressed_block_size: Option<usize>,
804 pub max_blocks: usize,
806 pub max_nodes: usize,
808}
809
810impl Default for BundleLoadOptions {
811 fn default() -> Self {
812 Self {
813 load_assets: true,
814 decompress_blocks: false,
817 validate: true,
818 max_memory: Some(1024 * 1024 * 1024), max_unityfs_block_cache_memory: Some(1024 * 1024 * 1024), max_compressed_blocks_info_size: Some(64 * 1024 * 1024), max_blocks_info_size: Some(64 * 1024 * 1024), max_legacy_directory_compressed_size: Some(64 * 1024 * 1024), max_compressed_block_size: Some(1024 * 1024 * 1024), max_blocks: 1_000_000,
825 max_nodes: 1_000_000,
826 }
827 }
828}
829
830impl BundleLoadOptions {
831 pub fn lazy() -> Self {
833 Self {
834 load_assets: false,
835 decompress_blocks: false,
836 validate: true,
837 ..Default::default()
838 }
839 }
840
841 pub fn fast() -> Self {
843 Self {
844 load_assets: false,
845 decompress_blocks: false,
846 validate: false,
847 max_memory: None,
848 max_unityfs_block_cache_memory: None,
849 max_compressed_blocks_info_size: None,
850 max_blocks_info_size: None,
851 max_legacy_directory_compressed_size: None,
852 max_compressed_block_size: None,
853 max_blocks: usize::MAX,
854 max_nodes: usize::MAX,
855 }
856 }
857
858 pub fn complete() -> Self {
860 Self {
861 load_assets: true,
862 decompress_blocks: true,
863 validate: true,
864 max_memory: Some(2048 * 1024 * 1024), max_unityfs_block_cache_memory: Some(2048 * 1024 * 1024), max_compressed_blocks_info_size: Some(128 * 1024 * 1024), max_blocks_info_size: Some(128 * 1024 * 1024), max_legacy_directory_compressed_size: Some(128 * 1024 * 1024), max_compressed_block_size: Some(2048 * 1024 * 1024), max_blocks: 2_000_000,
871 max_nodes: 2_000_000,
872 }
873 }
874}