1use crate::decompress::rar5::Rar5Decoder;
48use crate::decompress::Rar29Decoder;
49use crate::error::{RarError, Result};
50use crate::file_media::ReadInterval;
51use crate::parsing::RarVersion;
52use crate::rar_file_chunk::RarFileChunk;
53use std::sync::{Arc, Mutex};
54
55#[derive(Clone, Debug, PartialEq, Eq)]
60pub struct ChunkMapEntry {
61 pub index: usize,
63 pub start: u64,
65 pub end: u64,
67}
68
69#[cfg(feature = "crypto")]
74#[derive(Debug, Clone, PartialEq, Eq)]
75pub enum EncryptionInfo {
76 Rar5 {
78 salt: [u8; 16],
80 init_v: [u8; 16],
82 lg2_count: u8,
84 },
85 Rar4 {
87 salt: [u8; 8],
89 },
90}
91
92#[derive(Debug)]
119pub struct InnerFile {
120 pub name: String,
122
123 pub length: u64,
125
126 chunks: Vec<RarFileChunk>,
127 chunk_map: Vec<ChunkMapEntry>,
129 method: u8,
131 dict_size_log: u8,
133 packed_size: u64,
135 unpacked_size: u64,
137 rar_version: RarVersion,
139 is_solid: bool,
141 decompressed_cache: Mutex<Option<Arc<Vec<u8>>>>,
143 #[cfg(feature = "crypto")]
145 encryption: Option<EncryptionInfo>,
146 #[cfg(feature = "crypto")]
148 password: Option<String>,
149}
150
151impl InnerFile {
152 pub fn new(
153 name: String,
154 chunks: Vec<RarFileChunk>,
155 method: u8,
156 unpacked_size: u64,
157 rar_version: RarVersion,
158 ) -> Self {
159 Self::new_with_solid_dict(name, chunks, method, 22, unpacked_size, rar_version, false)
160 }
161
162 pub fn new_with_solid(
164 name: String,
165 chunks: Vec<RarFileChunk>,
166 method: u8,
167 unpacked_size: u64,
168 rar_version: RarVersion,
169 is_solid: bool,
170 ) -> Self {
171 Self::new_with_solid_dict(
172 name,
173 chunks,
174 method,
175 22,
176 unpacked_size,
177 rar_version,
178 is_solid,
179 )
180 }
181
182 pub fn new_with_solid_dict(
184 name: String,
185 chunks: Vec<RarFileChunk>,
186 method: u8,
187 dict_size_log: u8,
188 unpacked_size: u64,
189 rar_version: RarVersion,
190 is_solid: bool,
191 ) -> Self {
192 let packed_size: u64 = chunks.iter().map(|c| c.length()).sum();
193 let chunk_map = Self::calculate_chunk_map(&chunks);
194
195 let length = if method == 0x30 || method == 0 {
199 packed_size
200 } else {
201 unpacked_size
202 };
203
204 Self {
205 name,
206 length,
207 chunks,
208 chunk_map,
209 method,
210 dict_size_log,
211 packed_size,
212 unpacked_size,
213 rar_version,
214 is_solid,
215 decompressed_cache: Mutex::new(None),
216 #[cfg(feature = "crypto")]
217 encryption: None,
218 #[cfg(feature = "crypto")]
219 password: None,
220 }
221 }
222
223 #[cfg(feature = "crypto")]
225 pub fn new_encrypted(
226 name: String,
227 chunks: Vec<RarFileChunk>,
228 method: u8,
229 unpacked_size: u64,
230 rar_version: RarVersion,
231 encryption: Option<EncryptionInfo>,
232 password: Option<String>,
233 ) -> Self {
234 Self::new_encrypted_with_solid_dict(
235 name,
236 chunks,
237 method,
238 22, unpacked_size,
240 rar_version,
241 encryption,
242 password,
243 false,
244 )
245 }
246
247 #[cfg(feature = "crypto")]
249 #[allow(clippy::too_many_arguments)]
250 pub fn new_encrypted_with_solid(
251 name: String,
252 chunks: Vec<RarFileChunk>,
253 method: u8,
254 unpacked_size: u64,
255 rar_version: RarVersion,
256 encryption: Option<EncryptionInfo>,
257 password: Option<String>,
258 is_solid: bool,
259 ) -> Self {
260 Self::new_encrypted_with_solid_dict(
261 name,
262 chunks,
263 method,
264 22,
265 unpacked_size,
266 rar_version,
267 encryption,
268 password,
269 is_solid,
270 )
271 }
272
273 #[cfg(feature = "crypto")]
275 #[allow(clippy::too_many_arguments)]
276 pub fn new_encrypted_with_solid_dict(
277 name: String,
278 chunks: Vec<RarFileChunk>,
279 method: u8,
280 dict_size_log: u8,
281 unpacked_size: u64,
282 rar_version: RarVersion,
283 encryption: Option<EncryptionInfo>,
284 password: Option<String>,
285 is_solid: bool,
286 ) -> Self {
287 let packed_size: u64 = chunks.iter().map(|c| c.length()).sum();
288 let chunk_map = Self::calculate_chunk_map(&chunks);
289
290 let length = if encryption.is_some() {
293 unpacked_size
294 } else if method == 0x30 || method == 0 {
295 packed_size
296 } else {
297 unpacked_size
298 };
299
300 Self {
301 name,
302 length,
303 chunks,
304 chunk_map,
305 method,
306 dict_size_log,
307 packed_size,
308 unpacked_size,
309 rar_version,
310 is_solid,
311 decompressed_cache: Mutex::new(None),
312 encryption,
313 password,
314 }
315 }
316
317 #[cfg(feature = "crypto")]
319 pub fn is_encrypted(&self) -> bool {
320 self.encryption.is_some()
321 }
322
323 pub fn is_solid(&self) -> bool {
325 self.is_solid
326 }
327
328 pub fn is_compressed(&self) -> bool {
330 match self.rar_version {
331 RarVersion::Rar4 => self.method != 0x30,
332 RarVersion::Rar5 => self.method != 0, }
334 }
335
336 fn calculate_chunk_map(chunks: &[RarFileChunk]) -> Vec<ChunkMapEntry> {
337 let mut map = Vec::with_capacity(chunks.len());
338 let mut offset = 0u64;
339
340 for (index, chunk) in chunks.iter().enumerate() {
341 let start = offset;
342 let len = chunk.length();
343 let end = if len > 0 { offset + len - 1 } else { offset };
344 map.push(ChunkMapEntry { index, start, end });
345 offset = end + 1;
346 }
347
348 map
349 }
350
351 #[inline]
354 pub fn find_chunk_index(&self, offset: u64) -> Option<usize> {
355 if offset >= self.length {
356 return None;
357 }
358
359 let idx = self.chunk_map.partition_point(|entry| entry.end < offset);
361
362 if idx < self.chunk_map.len() && self.chunk_map[idx].start <= offset {
363 Some(idx)
364 } else {
365 None
366 }
367 }
368
369 #[inline]
371 pub fn get_chunk_entry(&self, index: usize) -> Option<&ChunkMapEntry> {
372 self.chunk_map.get(index)
373 }
374
375 #[inline]
377 pub fn get_chunk(&self, index: usize) -> Option<&RarFileChunk> {
378 self.chunks.get(index)
379 }
380
381 #[inline]
383 pub fn chunk_count(&self) -> usize {
384 self.chunks.len()
385 }
386
387 pub async fn read_to_end(&self) -> Result<Vec<u8>> {
389 if self.is_compressed() {
390 let data = self.read_decompressed().await?;
391 Ok((*data).clone())
392 } else {
393 self.read_raw_range(0, self.length.saturating_sub(1)).await
394 }
395 }
396
397 async fn read_raw_range(&self, start: u64, end: u64) -> Result<Vec<u8>> {
399 if start > end {
400 return Ok(Vec::new());
401 }
402
403 let packed_len = self.packed_size;
404 let actual_end = end.min(packed_len.saturating_sub(1));
405
406 if start >= packed_len {
407 return Ok(Vec::new());
408 }
409
410 let start_idx = self
411 .find_chunk_index(start)
412 .ok_or(RarError::InvalidOffset {
413 offset: start,
414 length: packed_len,
415 })?;
416 let end_idx = self
417 .find_chunk_index(actual_end)
418 .ok_or(RarError::InvalidOffset {
419 offset: actual_end,
420 length: packed_len,
421 })?;
422
423 let mut result = Vec::with_capacity((actual_end - start + 1) as usize);
424
425 for i in start_idx..=end_idx {
426 let entry = &self.chunk_map[i];
427 let chunk = &self.chunks[i];
428
429 let chunk_start = if i == start_idx {
430 start - entry.start
431 } else {
432 0
433 };
434 let chunk_end = if i == end_idx {
435 actual_end - entry.start
436 } else {
437 chunk.length().saturating_sub(1)
438 };
439
440 let data = chunk.read_range(chunk_start, chunk_end).await?;
441 result.extend_from_slice(&data);
442 }
443
444 Ok(result)
445 }
446
447 async fn read_all_raw(&self) -> Result<Vec<u8>> {
449 let mut result = Vec::with_capacity(self.packed_size as usize);
450 for chunk in &self.chunks {
451 let data = chunk
452 .read_range(0, chunk.length().saturating_sub(1))
453 .await?;
454 result.extend_from_slice(&data);
455 }
456 Ok(result)
457 }
458
459 pub async fn read_decompressed(&self) -> Result<Arc<Vec<u8>>> {
461 {
463 let cache = self.decompressed_cache.lock().unwrap();
464 if let Some(ref data) = *cache {
465 return Ok(Arc::clone(data));
466 }
467 }
468
469 #[allow(unused_mut)]
471 let mut packed = self.read_all_raw().await?;
472
473 #[cfg(feature = "crypto")]
475 if let Some(ref enc) = self.encryption {
476 let password = self.password.as_ref().ok_or(RarError::PasswordRequired)?;
477
478 match enc {
479 EncryptionInfo::Rar5 {
480 salt,
481 init_v,
482 lg2_count,
483 } => {
484 use crate::crypto::Rar5Crypto;
485 let crypto = Rar5Crypto::derive_key(password, salt, *lg2_count);
486 crypto
487 .decrypt(init_v, &mut packed)
488 .map_err(|e| RarError::DecryptionFailed(e.to_string()))?;
489 }
490 EncryptionInfo::Rar4 { salt } => {
491 use crate::crypto::Rar4Crypto;
492 let crypto = Rar4Crypto::derive_key(password, salt);
493 crypto
494 .decrypt(&mut packed)
495 .map_err(|e| RarError::DecryptionFailed(e.to_string()))?;
496 }
497 }
498 }
499
500 let decompressed = if !self.is_compressed() {
502 #[cfg(feature = "crypto")]
504 if self.encryption.is_some() {
505 packed.truncate(self.unpacked_size as usize);
507 }
508 packed
509 } else {
510 match self.rar_version {
511 RarVersion::Rar4 => {
512 let mut decoder = Rar29Decoder::new();
513 decoder.decompress(&packed, self.unpacked_size)?
514 }
515 RarVersion::Rar5 => {
516 let mut decoder = Rar5Decoder::with_dict_size(self.dict_size_log);
517 #[cfg(feature = "parallel")]
519 if !self.is_solid && self.method >= 1 && self.method <= 5 {
520 decoder.decompress_pipeline(&packed, self.unpacked_size)?
521 } else {
522 decoder.decompress(
523 &packed,
524 self.unpacked_size,
525 self.method,
526 self.is_solid,
527 )?
528 }
529 #[cfg(not(feature = "parallel"))]
530 decoder.decompress(&packed, self.unpacked_size, self.method, self.is_solid)?
531 }
532 }
533 };
534 let decompressed = Arc::new(decompressed);
535
536 {
538 let mut cache = self.decompressed_cache.lock().unwrap();
539 *cache = Some(Arc::clone(&decompressed));
540 }
541
542 Ok(decompressed)
543 }
544
545 pub async fn read_range(&self, interval: ReadInterval) -> Result<Vec<u8>> {
548 let start = interval.start;
549 let end = interval.end;
550
551 if start > end || end >= self.length {
552 return Err(RarError::InvalidOffset {
553 offset: end,
554 length: self.length,
555 });
556 }
557
558 if self.is_compressed() {
559 let decompressed = self.read_decompressed().await?;
561 let start_usize = start as usize;
562 let end_usize = (end + 1) as usize;
563 if end_usize > decompressed.len() {
564 return Err(RarError::InvalidOffset {
565 offset: end,
566 length: self.length,
567 });
568 }
569 return Ok(decompressed[start_usize..end_usize].to_vec());
570 }
571
572 let start_idx = self
573 .find_chunk_index(start)
574 .ok_or(RarError::InvalidOffset {
575 offset: start,
576 length: self.length,
577 })?;
578 let end_idx = self.find_chunk_index(end).ok_or(RarError::InvalidOffset {
579 offset: end,
580 length: self.length,
581 })?;
582
583 let mut result = Vec::with_capacity((end - start + 1) as usize);
585
586 for i in start_idx..=end_idx {
587 let entry = &self.chunk_map[i];
588 let chunk = &self.chunks[i];
589
590 let chunk_start = if i == start_idx {
592 start - entry.start
593 } else {
594 0
595 };
596 let chunk_end = if i == end_idx {
597 end - entry.start
598 } else {
599 chunk.length().saturating_sub(1)
600 };
601
602 let data = chunk.read_range(chunk_start, chunk_end).await?;
603 result.extend_from_slice(&data);
604 }
605
606 Ok(result)
607 }
608
609 pub fn stream_range(&self, start: u64, end: u64) -> InnerFileStream<'_> {
612 InnerFileStream::new(self, start, end)
613 }
614
615 pub fn get_chunk_ranges(&self, start: u64, end: u64) -> Vec<(usize, u64, u64)> {
618 let start_idx = match self.find_chunk_index(start) {
619 Some(i) => i,
620 None => return vec![],
621 };
622 let end_idx = match self.find_chunk_index(end) {
623 Some(i) => i,
624 None => return vec![],
625 };
626
627 let mut ranges = Vec::with_capacity(end_idx - start_idx + 1);
628
629 for i in start_idx..=end_idx {
630 let entry = &self.chunk_map[i];
631 let chunk = &self.chunks[i];
632
633 let chunk_start = if i == start_idx {
634 start - entry.start
635 } else {
636 0
637 };
638 let chunk_end = if i == end_idx {
639 end - entry.start
640 } else {
641 chunk.length().saturating_sub(1)
642 };
643
644 let abs_start = chunk.start_offset + chunk_start;
646 let abs_end = chunk.start_offset + chunk_end;
647
648 ranges.push((i, abs_start, abs_end));
649 }
650
651 ranges
652 }
653
654 pub fn translate_offset(&self, offset: u64) -> Option<(usize, u64)> {
657 let idx = self.find_chunk_index(offset)?;
658 let entry = &self.chunk_map[idx];
659 let chunk = &self.chunks[idx];
660
661 let offset_in_chunk = offset - entry.start;
662 let volume_offset = chunk.start_offset + offset_in_chunk;
663
664 Some((idx, volume_offset))
665 }
666}
667
668pub struct InnerFileStream<'a> {
671 inner_file: &'a InnerFile,
672 current_offset: u64,
673 end_offset: u64,
674 current_chunk_idx: Option<usize>,
675 done: bool,
676}
677
678impl<'a> InnerFileStream<'a> {
679 pub fn new(inner_file: &'a InnerFile, start: u64, end: u64) -> Self {
680 let current_chunk_idx = inner_file.find_chunk_index(start);
681 Self {
682 inner_file,
683 current_offset: start,
684 end_offset: end.min(inner_file.length.saturating_sub(1)),
685 current_chunk_idx,
686 done: start > end || start >= inner_file.length,
687 }
688 }
689
690 pub async fn next_chunk(&mut self) -> Option<Result<Vec<u8>>> {
693 if self.done {
694 return None;
695 }
696
697 let chunk_idx = self.current_chunk_idx?;
698 let entry = self.inner_file.get_chunk_entry(chunk_idx)?;
699 let chunk = self.inner_file.get_chunk(chunk_idx)?;
700
701 let chunk_start = self.current_offset - entry.start;
703 let chunk_end = if self.end_offset <= entry.end {
704 self.end_offset - entry.start
705 } else {
706 chunk.length().saturating_sub(1)
707 };
708
709 let result = chunk.read_range(chunk_start, chunk_end).await;
711
712 match &result {
713 Ok(data) => {
714 self.current_offset += data.len() as u64;
715
716 if self.current_offset > self.end_offset {
717 self.done = true;
718 } else {
719 self.current_chunk_idx = Some(chunk_idx + 1);
721 if chunk_idx + 1 >= self.inner_file.chunk_count() {
722 self.done = true;
723 }
724 }
725 }
726 Err(_) => {
727 self.done = true;
728 }
729 }
730
731 Some(result)
732 }
733
734 pub fn remaining(&self) -> u64 {
736 if self.done {
737 0
738 } else {
739 self.end_offset.saturating_sub(self.current_offset) + 1
740 }
741 }
742
743 pub fn position(&self) -> u64 {
745 self.current_offset
746 }
747}
748
749#[derive(Debug, Clone)]
751pub struct StreamChunkInfo {
752 pub chunk_index: usize,
753 pub logical_start: u64,
754 pub logical_end: u64,
755 pub volume_start: u64,
756 pub volume_end: u64,
757 pub size: u64,
758}
759
760impl InnerFile {
761 pub fn get_stream_chunks(&self, start: u64, end: u64) -> Vec<StreamChunkInfo> {
764 let start_idx = match self.find_chunk_index(start) {
765 Some(i) => i,
766 None => return vec![],
767 };
768 let end_idx = match self.find_chunk_index(end) {
769 Some(i) => i,
770 None => return vec![],
771 };
772
773 let mut infos = Vec::with_capacity(end_idx - start_idx + 1);
774
775 for i in start_idx..=end_idx {
776 let entry = &self.chunk_map[i];
777 let chunk = &self.chunks[i];
778
779 let logical_start = if i == start_idx { start } else { entry.start };
780 let logical_end = if i == end_idx { end } else { entry.end };
781
782 let offset_in_chunk_start = logical_start - entry.start;
783 let offset_in_chunk_end = logical_end - entry.start;
784
785 infos.push(StreamChunkInfo {
786 chunk_index: i,
787 logical_start,
788 logical_end,
789 volume_start: chunk.start_offset + offset_in_chunk_start,
790 volume_end: chunk.start_offset + offset_in_chunk_end,
791 size: logical_end - logical_start + 1,
792 });
793 }
794
795 infos
796 }
797}
798
799#[cfg(test)]
800mod tests {
801 use super::*;
802 use crate::file_media::{FileMedia, ReadInterval};
803 use std::sync::Arc;
804
805 struct MockFileMedia {
807 name: String,
808 length: u64,
809 data: Vec<u8>,
810 }
811
812 impl MockFileMedia {
813 fn new(name: &str, data: Vec<u8>) -> Self {
814 Self {
815 name: name.to_string(),
816 length: data.len() as u64,
817 data,
818 }
819 }
820 }
821
822 impl FileMedia for MockFileMedia {
823 fn length(&self) -> u64 {
824 self.length
825 }
826
827 fn name(&self) -> &str {
828 &self.name
829 }
830
831 fn read_range(
832 &self,
833 interval: ReadInterval,
834 ) -> std::pin::Pin<
835 Box<dyn std::future::Future<Output = crate::error::Result<Vec<u8>>> + Send + '_>,
836 > {
837 let start = interval.start as usize;
838 let end = (interval.end + 1) as usize;
839 let data = self.data[start..end.min(self.data.len())].to_vec();
840 Box::pin(async move { Ok(data) })
841 }
842 }
843
844 fn create_test_chunks(chunk_sizes: &[u64]) -> Vec<RarFileChunk> {
845 let mut chunks = Vec::new();
846 for (i, &size) in chunk_sizes.iter().enumerate() {
847 let data = vec![i as u8; size as usize];
848 let media = Arc::new(MockFileMedia::new(&format!("chunk{}.rar", i), data));
849 chunks.push(RarFileChunk::new(media, 0, size - 1));
851 }
852 chunks
853 }
854
855 #[test]
856 fn test_binary_search_single_chunk() {
857 let chunks = create_test_chunks(&[1000]);
858 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
859
860 assert_eq!(file.length, 1000);
861 assert_eq!(file.find_chunk_index(0), Some(0));
862 assert_eq!(file.find_chunk_index(500), Some(0));
863 assert_eq!(file.find_chunk_index(999), Some(0));
864 assert_eq!(file.find_chunk_index(1000), None); }
866
867 #[test]
868 fn test_binary_search_multiple_chunks() {
869 let chunks = create_test_chunks(&[100, 100, 100]);
871 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
872
873 assert_eq!(file.length, 300);
874
875 assert_eq!(file.find_chunk_index(0), Some(0));
877 assert_eq!(file.find_chunk_index(50), Some(0));
878 assert_eq!(file.find_chunk_index(99), Some(0));
879
880 assert_eq!(file.find_chunk_index(100), Some(1));
882 assert_eq!(file.find_chunk_index(150), Some(1));
883 assert_eq!(file.find_chunk_index(199), Some(1));
884
885 assert_eq!(file.find_chunk_index(200), Some(2));
887 assert_eq!(file.find_chunk_index(250), Some(2));
888 assert_eq!(file.find_chunk_index(299), Some(2));
889
890 assert_eq!(file.find_chunk_index(300), None);
892 }
893
894 #[test]
895 fn test_binary_search_many_chunks() {
896 let chunk_sizes: Vec<u64> = vec![1000; 100];
898 let chunks = create_test_chunks(&chunk_sizes);
899 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
900
901 assert_eq!(file.length, 100_000);
902
903 for i in 0..100 {
905 let offset = i * 1000;
906 assert_eq!(file.find_chunk_index(offset), Some(i as usize));
907 assert_eq!(file.find_chunk_index(offset + 500), Some(i as usize));
908 assert_eq!(file.find_chunk_index(offset + 999), Some(i as usize));
909 }
910 }
911
912 #[test]
913 fn test_translate_offset() {
914 let chunks = create_test_chunks(&[100, 100, 100]);
915 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
916
917 let (idx, vol_offset) = file.translate_offset(0).unwrap();
919 assert_eq!(idx, 0);
920 assert_eq!(vol_offset, 0);
921
922 let (idx, vol_offset) = file.translate_offset(150).unwrap();
923 assert_eq!(idx, 1);
924 assert_eq!(vol_offset, 50); let (idx, vol_offset) = file.translate_offset(250).unwrap();
927 assert_eq!(idx, 2);
928 assert_eq!(vol_offset, 50); }
930
931 #[test]
932 fn test_get_stream_chunks() {
933 let chunks = create_test_chunks(&[100, 100, 100]);
934 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
935
936 let infos = file.get_stream_chunks(50, 250);
938 assert_eq!(infos.len(), 3);
939
940 assert_eq!(infos[0].chunk_index, 0);
941 assert_eq!(infos[0].logical_start, 50);
942 assert_eq!(infos[0].logical_end, 99);
943 assert_eq!(infos[0].size, 50);
944
945 assert_eq!(infos[1].chunk_index, 1);
946 assert_eq!(infos[1].logical_start, 100);
947 assert_eq!(infos[1].logical_end, 199);
948 assert_eq!(infos[1].size, 100);
949
950 assert_eq!(infos[2].chunk_index, 2);
951 assert_eq!(infos[2].logical_start, 200);
952 assert_eq!(infos[2].logical_end, 250);
953 assert_eq!(infos[2].size, 51);
954 }
955
956 #[tokio::test]
957 async fn test_read_range() {
958 let chunks = create_test_chunks(&[100, 100, 100]);
959 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
960
961 let data = file
963 .read_range(ReadInterval {
964 start: 150,
965 end: 160,
966 })
967 .await
968 .unwrap();
969 assert_eq!(data.len(), 11);
970 assert!(data.iter().all(|&b| b == 1));
972 }
973
974 #[tokio::test]
975 async fn test_read_range_spanning_chunks() {
976 let chunks = create_test_chunks(&[100, 100, 100]);
977 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
978
979 let data = file
981 .read_range(ReadInterval {
982 start: 90,
983 end: 110,
984 })
985 .await
986 .unwrap();
987 assert_eq!(data.len(), 21);
988
989 assert!(data[..10].iter().all(|&b| b == 0));
991 assert!(data[10..].iter().all(|&b| b == 1));
993 }
994}