1use crate::decompress::rar5::Rar5Decoder;
48use crate::decompress::Rar29Decoder;
49use crate::error::{RarError, Result};
50use crate::file_media::ReadInterval;
51use crate::parsing::RarVersion;
52use crate::rar_file_chunk::RarFileChunk;
53use std::sync::{Arc, Mutex};
54
55#[derive(Clone, Debug, PartialEq, Eq)]
60pub struct ChunkMapEntry {
61 pub index: usize,
63 pub start: u64,
65 pub end: u64,
67}
68
69#[cfg(feature = "crypto")]
74#[derive(Debug, Clone, PartialEq, Eq)]
75pub enum EncryptionInfo {
76 Rar5 {
78 salt: [u8; 16],
80 init_v: [u8; 16],
82 lg2_count: u8,
84 },
85 Rar4 {
87 salt: [u8; 8],
89 },
90}
91
92#[derive(Debug)]
119pub struct InnerFile {
120 pub name: String,
122
123 pub length: u64,
125
126 chunks: Vec<RarFileChunk>,
127 chunk_map: Vec<ChunkMapEntry>,
129 method: u8,
131 dict_size_log: u8,
133 packed_size: u64,
135 unpacked_size: u64,
137 rar_version: RarVersion,
139 is_solid: bool,
141 decompressed_cache: Mutex<Option<Arc<Vec<u8>>>>,
143 #[cfg(feature = "crypto")]
145 encryption: Option<EncryptionInfo>,
146 #[cfg(feature = "crypto")]
148 password: Option<String>,
149}
150
151impl InnerFile {
152 pub fn new(
154 name: String,
155 chunks: Vec<RarFileChunk>,
156 method: u8,
157 unpacked_size: u64,
158 rar_version: RarVersion,
159 ) -> Self {
160 Self::new_with_solid_dict(name, chunks, method, 22, unpacked_size, rar_version, false)
161 }
162
163 pub fn new_with_solid(
165 name: String,
166 chunks: Vec<RarFileChunk>,
167 method: u8,
168 unpacked_size: u64,
169 rar_version: RarVersion,
170 is_solid: bool,
171 ) -> Self {
172 Self::new_with_solid_dict(
173 name,
174 chunks,
175 method,
176 22,
177 unpacked_size,
178 rar_version,
179 is_solid,
180 )
181 }
182
183 pub fn new_with_solid_dict(
185 name: String,
186 chunks: Vec<RarFileChunk>,
187 method: u8,
188 dict_size_log: u8,
189 unpacked_size: u64,
190 rar_version: RarVersion,
191 is_solid: bool,
192 ) -> Self {
193 let packed_size: u64 = chunks.iter().map(|c| c.length()).sum();
194 let chunk_map = Self::calculate_chunk_map(&chunks);
195
196 let length = if method == 0x30 || method == 0 {
200 packed_size
201 } else {
202 unpacked_size
203 };
204
205 Self {
206 name,
207 length,
208 chunks,
209 chunk_map,
210 method,
211 dict_size_log,
212 packed_size,
213 unpacked_size,
214 rar_version,
215 is_solid,
216 decompressed_cache: Mutex::new(None),
217 #[cfg(feature = "crypto")]
218 encryption: None,
219 #[cfg(feature = "crypto")]
220 password: None,
221 }
222 }
223
224 #[cfg(feature = "crypto")]
226 pub fn new_encrypted(
227 name: String,
228 chunks: Vec<RarFileChunk>,
229 method: u8,
230 unpacked_size: u64,
231 rar_version: RarVersion,
232 encryption: Option<EncryptionInfo>,
233 password: Option<String>,
234 ) -> Self {
235 Self::new_encrypted_with_solid_dict(
236 name,
237 chunks,
238 method,
239 22, unpacked_size,
241 rar_version,
242 encryption,
243 password,
244 false,
245 )
246 }
247
248 #[cfg(feature = "crypto")]
250 #[allow(clippy::too_many_arguments)]
251 pub fn new_encrypted_with_solid(
252 name: String,
253 chunks: Vec<RarFileChunk>,
254 method: u8,
255 unpacked_size: u64,
256 rar_version: RarVersion,
257 encryption: Option<EncryptionInfo>,
258 password: Option<String>,
259 is_solid: bool,
260 ) -> Self {
261 Self::new_encrypted_with_solid_dict(
262 name,
263 chunks,
264 method,
265 22,
266 unpacked_size,
267 rar_version,
268 encryption,
269 password,
270 is_solid,
271 )
272 }
273
274 #[cfg(feature = "crypto")]
276 #[allow(clippy::too_many_arguments)]
277 pub fn new_encrypted_with_solid_dict(
278 name: String,
279 chunks: Vec<RarFileChunk>,
280 method: u8,
281 dict_size_log: u8,
282 unpacked_size: u64,
283 rar_version: RarVersion,
284 encryption: Option<EncryptionInfo>,
285 password: Option<String>,
286 is_solid: bool,
287 ) -> Self {
288 let packed_size: u64 = chunks.iter().map(|c| c.length()).sum();
289 let chunk_map = Self::calculate_chunk_map(&chunks);
290
291 let length = if encryption.is_some() {
294 unpacked_size
295 } else if method == 0x30 || method == 0 {
296 packed_size
297 } else {
298 unpacked_size
299 };
300
301 Self {
302 name,
303 length,
304 chunks,
305 chunk_map,
306 method,
307 dict_size_log,
308 packed_size,
309 unpacked_size,
310 rar_version,
311 is_solid,
312 decompressed_cache: Mutex::new(None),
313 encryption,
314 password,
315 }
316 }
317
318 #[cfg(feature = "crypto")]
320 pub fn is_encrypted(&self) -> bool {
321 self.encryption.is_some()
322 }
323
324 pub fn is_solid(&self) -> bool {
326 self.is_solid
327 }
328
329 pub fn is_compressed(&self) -> bool {
331 match self.rar_version {
332 RarVersion::Rar4 => self.method != 0x30,
333 RarVersion::Rar5 => self.method != 0, }
335 }
336
337 fn calculate_chunk_map(chunks: &[RarFileChunk]) -> Vec<ChunkMapEntry> {
338 let mut map = Vec::with_capacity(chunks.len());
339 let mut offset = 0u64;
340
341 for (index, chunk) in chunks.iter().enumerate() {
342 let start = offset;
343 let len = chunk.length();
344 let end = if len > 0 { offset + len - 1 } else { offset };
345 map.push(ChunkMapEntry { index, start, end });
346 offset = end + 1;
347 }
348
349 map
350 }
351
352 #[inline]
355 pub fn find_chunk_index(&self, offset: u64) -> Option<usize> {
356 if offset >= self.length {
357 return None;
358 }
359
360 let idx = self.chunk_map.partition_point(|entry| entry.end < offset);
362
363 if idx < self.chunk_map.len() && self.chunk_map[idx].start <= offset {
364 Some(idx)
365 } else {
366 None
367 }
368 }
369
370 #[inline]
372 pub fn get_chunk_entry(&self, index: usize) -> Option<&ChunkMapEntry> {
373 self.chunk_map.get(index)
374 }
375
376 #[inline]
378 pub fn get_chunk(&self, index: usize) -> Option<&RarFileChunk> {
379 self.chunks.get(index)
380 }
381
382 #[inline]
384 pub fn chunk_count(&self) -> usize {
385 self.chunks.len()
386 }
387
388 pub async fn read_to_end(&self) -> Result<Vec<u8>> {
390 if self.is_compressed() {
391 let data = self.read_decompressed().await?;
392 Ok((*data).clone())
393 } else {
394 self.read_raw_range(0, self.length.saturating_sub(1)).await
395 }
396 }
397
398 pub async fn read_to_end_shared(&self) -> Result<Arc<Vec<u8>>> {
404 if self.is_compressed() {
405 self.read_decompressed().await
406 } else {
407 let data = self
408 .read_raw_range(0, self.length.saturating_sub(1))
409 .await?;
410 Ok(Arc::new(data))
411 }
412 }
413
414 async fn read_raw_range(&self, start: u64, end: u64) -> Result<Vec<u8>> {
416 if start > end {
417 return Ok(Vec::new());
418 }
419
420 let packed_len = self.packed_size;
421 let actual_end = end.min(packed_len.saturating_sub(1));
422
423 if start >= packed_len {
424 return Ok(Vec::new());
425 }
426
427 let start_idx = self
428 .find_chunk_index(start)
429 .ok_or(RarError::InvalidOffset {
430 offset: start,
431 length: packed_len,
432 })?;
433 let end_idx = self
434 .find_chunk_index(actual_end)
435 .ok_or(RarError::InvalidOffset {
436 offset: actual_end,
437 length: packed_len,
438 })?;
439
440 let mut result = Vec::with_capacity((actual_end - start + 1) as usize);
441
442 for i in start_idx..=end_idx {
443 let entry = &self.chunk_map[i];
444 let chunk = &self.chunks[i];
445
446 let chunk_start = if i == start_idx {
447 start - entry.start
448 } else {
449 0
450 };
451 let chunk_end = if i == end_idx {
452 actual_end - entry.start
453 } else {
454 chunk.length().saturating_sub(1)
455 };
456
457 let data = chunk.read_range(chunk_start, chunk_end).await?;
458 result.extend_from_slice(&data);
459 }
460
461 Ok(result)
462 }
463
464 async fn read_all_raw(&self) -> Result<Vec<u8>> {
466 let capacity_hint = (self.packed_size as usize).min(64 * 1024 * 1024);
469 let mut result = Vec::with_capacity(capacity_hint);
470 for chunk in &self.chunks {
471 let data = chunk
472 .read_range(0, chunk.length().saturating_sub(1))
473 .await?;
474 result.extend_from_slice(&data);
475 }
476 Ok(result)
477 }
478
479 pub async fn read_decompressed(&self) -> Result<Arc<Vec<u8>>> {
481 {
483 let cache = self.decompressed_cache.lock().unwrap();
484 if let Some(ref data) = *cache {
485 return Ok(Arc::clone(data));
486 }
487 }
488
489 #[allow(unused_mut)]
491 let mut packed = self.read_all_raw().await?;
492
493 #[cfg(feature = "crypto")]
495 if let Some(ref enc) = self.encryption {
496 let password = self.password.as_ref().ok_or(RarError::PasswordRequired)?;
497
498 match enc {
499 EncryptionInfo::Rar5 {
500 salt,
501 init_v,
502 lg2_count,
503 } => {
504 use crate::crypto::Rar5Crypto;
505 let crypto = Rar5Crypto::derive_key(password, salt, *lg2_count);
506 crypto
507 .decrypt(init_v, &mut packed)
508 .map_err(|e| RarError::DecryptionFailed(e.to_string()))?;
509 }
510 EncryptionInfo::Rar4 { salt } => {
511 use crate::crypto::Rar4Crypto;
512 let crypto = Rar4Crypto::derive_key(password, salt);
513 crypto
514 .decrypt(&mut packed)
515 .map_err(|e| RarError::DecryptionFailed(e.to_string()))?;
516 }
517 }
518 }
519
520 let decompressed = if !self.is_compressed() {
522 #[cfg(feature = "crypto")]
524 if self.encryption.is_some() {
525 packed.truncate(self.unpacked_size as usize);
527 }
528 packed
529 } else {
530 match self.rar_version {
531 RarVersion::Rar4 => {
532 let mut decoder = Rar29Decoder::new();
533 decoder.decompress(&packed, self.unpacked_size)?
534 }
535 RarVersion::Rar5 => {
536 let mut decoder = Rar5Decoder::with_dict_size(self.dict_size_log);
537 #[cfg(feature = "parallel")]
539 if !self.is_solid && self.method >= 1 && self.method <= 5 {
540 decoder.decompress_pipeline(&packed, self.unpacked_size)?
541 } else {
542 decoder.decompress(
543 &packed,
544 self.unpacked_size,
545 self.method,
546 self.is_solid,
547 )?
548 }
549 #[cfg(not(feature = "parallel"))]
550 decoder.decompress(&packed, self.unpacked_size, self.method, self.is_solid)?
551 }
552 }
553 };
554 let decompressed = Arc::new(decompressed);
555
556 {
558 let mut cache = self.decompressed_cache.lock().unwrap();
559 *cache = Some(Arc::clone(&decompressed));
560 }
561
562 Ok(decompressed)
563 }
564
565 pub async fn read_range(&self, interval: ReadInterval) -> Result<Vec<u8>> {
568 let start = interval.start;
569 let end = interval.end;
570
571 if start > end || end >= self.length {
572 return Err(RarError::InvalidOffset {
573 offset: end,
574 length: self.length,
575 });
576 }
577
578 if self.is_compressed() {
579 let decompressed = self.read_decompressed().await?;
581 let start_usize = start as usize;
582 let end_usize = (end + 1) as usize;
583 if end_usize > decompressed.len() {
584 return Err(RarError::InvalidOffset {
585 offset: end,
586 length: self.length,
587 });
588 }
589 return Ok(decompressed[start_usize..end_usize].to_vec());
590 }
591
592 let start_idx = self
593 .find_chunk_index(start)
594 .ok_or(RarError::InvalidOffset {
595 offset: start,
596 length: self.length,
597 })?;
598 let end_idx = self.find_chunk_index(end).ok_or(RarError::InvalidOffset {
599 offset: end,
600 length: self.length,
601 })?;
602
603 let mut result = Vec::with_capacity((end - start + 1) as usize);
605
606 for i in start_idx..=end_idx {
607 let entry = &self.chunk_map[i];
608 let chunk = &self.chunks[i];
609
610 let chunk_start = if i == start_idx {
612 start - entry.start
613 } else {
614 0
615 };
616 let chunk_end = if i == end_idx {
617 end - entry.start
618 } else {
619 chunk.length().saturating_sub(1)
620 };
621
622 let data = chunk.read_range(chunk_start, chunk_end).await?;
623 result.extend_from_slice(&data);
624 }
625
626 Ok(result)
627 }
628
629 pub fn stream_range(&self, start: u64, end: u64) -> InnerFileStream<'_> {
632 InnerFileStream::new(self, start, end)
633 }
634
635 pub fn get_chunk_ranges(&self, start: u64, end: u64) -> Vec<(usize, u64, u64)> {
638 let start_idx = match self.find_chunk_index(start) {
639 Some(i) => i,
640 None => return vec![],
641 };
642 let end_idx = match self.find_chunk_index(end) {
643 Some(i) => i,
644 None => return vec![],
645 };
646
647 let mut ranges = Vec::with_capacity(end_idx - start_idx + 1);
648
649 for i in start_idx..=end_idx {
650 let entry = &self.chunk_map[i];
651 let chunk = &self.chunks[i];
652
653 let chunk_start = if i == start_idx {
654 start - entry.start
655 } else {
656 0
657 };
658 let chunk_end = if i == end_idx {
659 end - entry.start
660 } else {
661 chunk.length().saturating_sub(1)
662 };
663
664 let abs_start = chunk.start_offset + chunk_start;
666 let abs_end = chunk.start_offset + chunk_end;
667
668 ranges.push((i, abs_start, abs_end));
669 }
670
671 ranges
672 }
673
674 pub fn translate_offset(&self, offset: u64) -> Option<(usize, u64)> {
677 let idx = self.find_chunk_index(offset)?;
678 let entry = &self.chunk_map[idx];
679 let chunk = &self.chunks[idx];
680
681 let offset_in_chunk = offset - entry.start;
682 let volume_offset = chunk.start_offset + offset_in_chunk;
683
684 Some((idx, volume_offset))
685 }
686}
687
688pub struct InnerFileStream<'a> {
691 inner_file: &'a InnerFile,
692 current_offset: u64,
693 end_offset: u64,
694 current_chunk_idx: Option<usize>,
695 done: bool,
696}
697
698impl<'a> InnerFileStream<'a> {
699 pub fn new(inner_file: &'a InnerFile, start: u64, end: u64) -> Self {
700 let current_chunk_idx = inner_file.find_chunk_index(start);
701 Self {
702 inner_file,
703 current_offset: start,
704 end_offset: end.min(inner_file.length.saturating_sub(1)),
705 current_chunk_idx,
706 done: start > end || start >= inner_file.length,
707 }
708 }
709
710 pub async fn next_chunk(&mut self) -> Option<Result<Vec<u8>>> {
713 if self.done {
714 return None;
715 }
716
717 let chunk_idx = self.current_chunk_idx?;
718 let entry = self.inner_file.get_chunk_entry(chunk_idx)?;
719 let chunk = self.inner_file.get_chunk(chunk_idx)?;
720
721 let chunk_start = self.current_offset - entry.start;
723 let chunk_end = if self.end_offset <= entry.end {
724 self.end_offset - entry.start
725 } else {
726 chunk.length().saturating_sub(1)
727 };
728
729 let result = chunk.read_range(chunk_start, chunk_end).await;
731
732 match &result {
733 Ok(data) => {
734 self.current_offset += data.len() as u64;
735
736 if self.current_offset > self.end_offset {
737 self.done = true;
738 } else {
739 self.current_chunk_idx = Some(chunk_idx + 1);
741 if chunk_idx + 1 >= self.inner_file.chunk_count() {
742 self.done = true;
743 }
744 }
745 }
746 Err(_) => {
747 self.done = true;
748 }
749 }
750
751 Some(result)
752 }
753
754 pub fn remaining(&self) -> u64 {
756 if self.done {
757 0
758 } else {
759 self.end_offset.saturating_sub(self.current_offset) + 1
760 }
761 }
762
763 pub fn position(&self) -> u64 {
765 self.current_offset
766 }
767}
768
769#[derive(Debug, Clone)]
771pub struct StreamChunkInfo {
772 pub chunk_index: usize,
773 pub logical_start: u64,
774 pub logical_end: u64,
775 pub volume_start: u64,
776 pub volume_end: u64,
777 pub size: u64,
778}
779
780impl InnerFile {
781 pub fn get_stream_chunks(&self, start: u64, end: u64) -> Vec<StreamChunkInfo> {
784 let start_idx = match self.find_chunk_index(start) {
785 Some(i) => i,
786 None => return vec![],
787 };
788 let end_idx = match self.find_chunk_index(end) {
789 Some(i) => i,
790 None => return vec![],
791 };
792
793 let mut infos = Vec::with_capacity(end_idx - start_idx + 1);
794
795 for i in start_idx..=end_idx {
796 let entry = &self.chunk_map[i];
797 let chunk = &self.chunks[i];
798
799 let logical_start = if i == start_idx { start } else { entry.start };
800 let logical_end = if i == end_idx { end } else { entry.end };
801
802 let offset_in_chunk_start = logical_start - entry.start;
803 let offset_in_chunk_end = logical_end - entry.start;
804
805 infos.push(StreamChunkInfo {
806 chunk_index: i,
807 logical_start,
808 logical_end,
809 volume_start: chunk.start_offset + offset_in_chunk_start,
810 volume_end: chunk.start_offset + offset_in_chunk_end,
811 size: logical_end - logical_start + 1,
812 });
813 }
814
815 infos
816 }
817}
818
819#[cfg(test)]
820mod tests {
821 use super::*;
822 use crate::file_media::{FileMedia, ReadInterval};
823 use std::sync::Arc;
824
825 struct MockFileMedia {
827 name: String,
828 length: u64,
829 data: Vec<u8>,
830 }
831
832 impl MockFileMedia {
833 fn new(name: &str, data: Vec<u8>) -> Self {
834 Self {
835 name: name.to_string(),
836 length: data.len() as u64,
837 data,
838 }
839 }
840 }
841
842 impl FileMedia for MockFileMedia {
843 fn length(&self) -> u64 {
844 self.length
845 }
846
847 fn name(&self) -> &str {
848 &self.name
849 }
850
851 fn read_range(
852 &self,
853 interval: ReadInterval,
854 ) -> std::pin::Pin<
855 Box<dyn std::future::Future<Output = crate::error::Result<Vec<u8>>> + Send + '_>,
856 > {
857 let start = interval.start as usize;
858 let end = (interval.end + 1) as usize;
859 let data = self.data[start..end.min(self.data.len())].to_vec();
860 Box::pin(async move { Ok(data) })
861 }
862 }
863
864 fn create_test_chunks(chunk_sizes: &[u64]) -> Vec<RarFileChunk> {
865 let mut chunks = Vec::new();
866 for (i, &size) in chunk_sizes.iter().enumerate() {
867 let data = vec![i as u8; size as usize];
868 let media = Arc::new(MockFileMedia::new(&format!("chunk{}.rar", i), data));
869 chunks.push(RarFileChunk::new(media, 0, size - 1));
871 }
872 chunks
873 }
874
875 #[test]
876 fn test_binary_search_single_chunk() {
877 let chunks = create_test_chunks(&[1000]);
878 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
879
880 assert_eq!(file.length, 1000);
881 assert_eq!(file.find_chunk_index(0), Some(0));
882 assert_eq!(file.find_chunk_index(500), Some(0));
883 assert_eq!(file.find_chunk_index(999), Some(0));
884 assert_eq!(file.find_chunk_index(1000), None); }
886
887 #[test]
888 fn test_binary_search_multiple_chunks() {
889 let chunks = create_test_chunks(&[100, 100, 100]);
891 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
892
893 assert_eq!(file.length, 300);
894
895 assert_eq!(file.find_chunk_index(0), Some(0));
897 assert_eq!(file.find_chunk_index(50), Some(0));
898 assert_eq!(file.find_chunk_index(99), Some(0));
899
900 assert_eq!(file.find_chunk_index(100), Some(1));
902 assert_eq!(file.find_chunk_index(150), Some(1));
903 assert_eq!(file.find_chunk_index(199), Some(1));
904
905 assert_eq!(file.find_chunk_index(200), Some(2));
907 assert_eq!(file.find_chunk_index(250), Some(2));
908 assert_eq!(file.find_chunk_index(299), Some(2));
909
910 assert_eq!(file.find_chunk_index(300), None);
912 }
913
914 #[test]
915 fn test_binary_search_many_chunks() {
916 let chunk_sizes: Vec<u64> = vec![1000; 100];
918 let chunks = create_test_chunks(&chunk_sizes);
919 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
920
921 assert_eq!(file.length, 100_000);
922
923 for i in 0..100 {
925 let offset = i * 1000;
926 assert_eq!(file.find_chunk_index(offset), Some(i as usize));
927 assert_eq!(file.find_chunk_index(offset + 500), Some(i as usize));
928 assert_eq!(file.find_chunk_index(offset + 999), Some(i as usize));
929 }
930 }
931
932 #[test]
933 fn test_translate_offset() {
934 let chunks = create_test_chunks(&[100, 100, 100]);
935 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
936
937 let (idx, vol_offset) = file.translate_offset(0).unwrap();
939 assert_eq!(idx, 0);
940 assert_eq!(vol_offset, 0);
941
942 let (idx, vol_offset) = file.translate_offset(150).unwrap();
943 assert_eq!(idx, 1);
944 assert_eq!(vol_offset, 50); let (idx, vol_offset) = file.translate_offset(250).unwrap();
947 assert_eq!(idx, 2);
948 assert_eq!(vol_offset, 50); }
950
951 #[test]
952 fn test_get_stream_chunks() {
953 let chunks = create_test_chunks(&[100, 100, 100]);
954 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
955
956 let infos = file.get_stream_chunks(50, 250);
958 assert_eq!(infos.len(), 3);
959
960 assert_eq!(infos[0].chunk_index, 0);
961 assert_eq!(infos[0].logical_start, 50);
962 assert_eq!(infos[0].logical_end, 99);
963 assert_eq!(infos[0].size, 50);
964
965 assert_eq!(infos[1].chunk_index, 1);
966 assert_eq!(infos[1].logical_start, 100);
967 assert_eq!(infos[1].logical_end, 199);
968 assert_eq!(infos[1].size, 100);
969
970 assert_eq!(infos[2].chunk_index, 2);
971 assert_eq!(infos[2].logical_start, 200);
972 assert_eq!(infos[2].logical_end, 250);
973 assert_eq!(infos[2].size, 51);
974 }
975
976 #[tokio::test]
977 async fn test_read_range() {
978 let chunks = create_test_chunks(&[100, 100, 100]);
979 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
980
981 let data = file
983 .read_range(ReadInterval {
984 start: 150,
985 end: 160,
986 })
987 .await
988 .unwrap();
989 assert_eq!(data.len(), 11);
990 assert!(data.iter().all(|&b| b == 1));
992 }
993
994 #[tokio::test]
995 async fn test_read_range_spanning_chunks() {
996 let chunks = create_test_chunks(&[100, 100, 100]);
997 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
998
999 let data = file
1001 .read_range(ReadInterval {
1002 start: 90,
1003 end: 110,
1004 })
1005 .await
1006 .unwrap();
1007 assert_eq!(data.len(), 21);
1008
1009 assert!(data[..10].iter().all(|&b| b == 0));
1011 assert!(data[10..].iter().all(|&b| b == 1));
1013 }
1014}