1use crate::decompress::rar5::Rar5Decoder;
48use crate::decompress::Rar29Decoder;
49use crate::error::{RarError, Result};
50use crate::file_media::ReadInterval;
51use crate::parsing::RarVersion;
52use crate::rar_file_chunk::RarFileChunk;
53use std::sync::{Arc, Mutex};
54
55#[derive(Clone, Debug, PartialEq, Eq)]
60pub struct ChunkMapEntry {
61 pub index: usize,
63 pub start: u64,
65 pub end: u64,
67}
68
69#[cfg(feature = "crypto")]
74#[derive(Debug, Clone, PartialEq, Eq)]
75pub enum EncryptionInfo {
76 Rar5 {
78 salt: [u8; 16],
80 init_v: [u8; 16],
82 lg2_count: u8,
84 },
85 Rar4 {
87 salt: [u8; 8],
89 },
90}
91
92#[derive(Debug)]
119pub struct InnerFile {
120 pub name: String,
122
123 pub length: u64,
125
126 chunks: Vec<RarFileChunk>,
127 chunk_map: Vec<ChunkMapEntry>,
129 method: u8,
131 dict_size_log: u8,
133 packed_size: u64,
135 unpacked_size: u64,
137 rar_version: RarVersion,
139 is_solid: bool,
141 decompressed_cache: Mutex<Option<Arc<Vec<u8>>>>,
143 #[cfg(feature = "crypto")]
145 encryption: Option<EncryptionInfo>,
146 #[cfg(feature = "crypto")]
148 password: Option<String>,
149}
150
151impl InnerFile {
152 pub fn new(
154 name: String,
155 chunks: Vec<RarFileChunk>,
156 method: u8,
157 unpacked_size: u64,
158 rar_version: RarVersion,
159 ) -> Self {
160 Self::new_with_solid_dict(name, chunks, method, 22, unpacked_size, rar_version, false)
161 }
162
163 pub fn new_with_solid(
165 name: String,
166 chunks: Vec<RarFileChunk>,
167 method: u8,
168 unpacked_size: u64,
169 rar_version: RarVersion,
170 is_solid: bool,
171 ) -> Self {
172 Self::new_with_solid_dict(
173 name,
174 chunks,
175 method,
176 22,
177 unpacked_size,
178 rar_version,
179 is_solid,
180 )
181 }
182
183 pub fn new_with_solid_dict(
185 name: String,
186 chunks: Vec<RarFileChunk>,
187 method: u8,
188 dict_size_log: u8,
189 unpacked_size: u64,
190 rar_version: RarVersion,
191 is_solid: bool,
192 ) -> Self {
193 let packed_size: u64 = chunks.iter().map(|c| c.length()).sum();
194 let chunk_map = Self::calculate_chunk_map(&chunks);
195
196 let length = if method == 0x30 || method == 0 {
200 packed_size
201 } else {
202 unpacked_size
203 };
204
205 Self {
206 name,
207 length,
208 chunks,
209 chunk_map,
210 method,
211 dict_size_log,
212 packed_size,
213 unpacked_size,
214 rar_version,
215 is_solid,
216 decompressed_cache: Mutex::new(None),
217 #[cfg(feature = "crypto")]
218 encryption: None,
219 #[cfg(feature = "crypto")]
220 password: None,
221 }
222 }
223
224 #[cfg(feature = "crypto")]
226 pub fn new_encrypted(
227 name: String,
228 chunks: Vec<RarFileChunk>,
229 method: u8,
230 unpacked_size: u64,
231 rar_version: RarVersion,
232 encryption: Option<EncryptionInfo>,
233 password: Option<String>,
234 ) -> Self {
235 Self::new_encrypted_with_solid_dict(
236 name,
237 chunks,
238 method,
239 22, unpacked_size,
241 rar_version,
242 encryption,
243 password,
244 false,
245 )
246 }
247
248 #[cfg(feature = "crypto")]
250 #[allow(clippy::too_many_arguments)]
251 pub fn new_encrypted_with_solid(
252 name: String,
253 chunks: Vec<RarFileChunk>,
254 method: u8,
255 unpacked_size: u64,
256 rar_version: RarVersion,
257 encryption: Option<EncryptionInfo>,
258 password: Option<String>,
259 is_solid: bool,
260 ) -> Self {
261 Self::new_encrypted_with_solid_dict(
262 name,
263 chunks,
264 method,
265 22,
266 unpacked_size,
267 rar_version,
268 encryption,
269 password,
270 is_solid,
271 )
272 }
273
274 #[cfg(feature = "crypto")]
276 #[allow(clippy::too_many_arguments)]
277 pub fn new_encrypted_with_solid_dict(
278 name: String,
279 chunks: Vec<RarFileChunk>,
280 method: u8,
281 dict_size_log: u8,
282 unpacked_size: u64,
283 rar_version: RarVersion,
284 encryption: Option<EncryptionInfo>,
285 password: Option<String>,
286 is_solid: bool,
287 ) -> Self {
288 let packed_size: u64 = chunks.iter().map(|c| c.length()).sum();
289 let chunk_map = Self::calculate_chunk_map(&chunks);
290
291 let length = if encryption.is_some() {
294 unpacked_size
295 } else if method == 0x30 || method == 0 {
296 packed_size
297 } else {
298 unpacked_size
299 };
300
301 Self {
302 name,
303 length,
304 chunks,
305 chunk_map,
306 method,
307 dict_size_log,
308 packed_size,
309 unpacked_size,
310 rar_version,
311 is_solid,
312 decompressed_cache: Mutex::new(None),
313 encryption,
314 password,
315 }
316 }
317
318 #[cfg(feature = "crypto")]
320 pub fn is_encrypted(&self) -> bool {
321 self.encryption.is_some()
322 }
323
324 pub fn is_solid(&self) -> bool {
326 self.is_solid
327 }
328
329 pub fn is_compressed(&self) -> bool {
331 match self.rar_version {
332 RarVersion::Rar4 => self.method != 0x30,
333 RarVersion::Rar5 => self.method != 0, }
335 }
336
337 fn calculate_chunk_map(chunks: &[RarFileChunk]) -> Vec<ChunkMapEntry> {
338 let mut map = Vec::with_capacity(chunks.len());
339 let mut offset = 0u64;
340
341 for (index, chunk) in chunks.iter().enumerate() {
342 let start = offset;
343 let len = chunk.length();
344 let end = if len > 0 { offset + len - 1 } else { offset };
345 map.push(ChunkMapEntry { index, start, end });
346 offset = end + 1;
347 }
348
349 map
350 }
351
352 #[inline]
355 pub fn find_chunk_index(&self, offset: u64) -> Option<usize> {
356 if offset >= self.length {
357 return None;
358 }
359
360 let idx = self.chunk_map.partition_point(|entry| entry.end < offset);
362
363 if idx < self.chunk_map.len() && self.chunk_map[idx].start <= offset {
364 Some(idx)
365 } else {
366 None
367 }
368 }
369
370 #[inline]
372 pub fn get_chunk_entry(&self, index: usize) -> Option<&ChunkMapEntry> {
373 self.chunk_map.get(index)
374 }
375
376 #[inline]
378 pub fn get_chunk(&self, index: usize) -> Option<&RarFileChunk> {
379 self.chunks.get(index)
380 }
381
382 #[inline]
384 pub fn chunk_count(&self) -> usize {
385 self.chunks.len()
386 }
387
388 pub async fn read_to_end(&self) -> Result<Vec<u8>> {
390 if self.is_compressed() {
391 let data = self.read_decompressed().await?;
392 Ok((*data).clone())
393 } else {
394 self.read_raw_range(0, self.length.saturating_sub(1)).await
395 }
396 }
397
398 async fn read_raw_range(&self, start: u64, end: u64) -> Result<Vec<u8>> {
400 if start > end {
401 return Ok(Vec::new());
402 }
403
404 let packed_len = self.packed_size;
405 let actual_end = end.min(packed_len.saturating_sub(1));
406
407 if start >= packed_len {
408 return Ok(Vec::new());
409 }
410
411 let start_idx = self
412 .find_chunk_index(start)
413 .ok_or(RarError::InvalidOffset {
414 offset: start,
415 length: packed_len,
416 })?;
417 let end_idx = self
418 .find_chunk_index(actual_end)
419 .ok_or(RarError::InvalidOffset {
420 offset: actual_end,
421 length: packed_len,
422 })?;
423
424 let mut result = Vec::with_capacity((actual_end - start + 1) as usize);
425
426 for i in start_idx..=end_idx {
427 let entry = &self.chunk_map[i];
428 let chunk = &self.chunks[i];
429
430 let chunk_start = if i == start_idx {
431 start - entry.start
432 } else {
433 0
434 };
435 let chunk_end = if i == end_idx {
436 actual_end - entry.start
437 } else {
438 chunk.length().saturating_sub(1)
439 };
440
441 let data = chunk.read_range(chunk_start, chunk_end).await?;
442 result.extend_from_slice(&data);
443 }
444
445 Ok(result)
446 }
447
448 async fn read_all_raw(&self) -> Result<Vec<u8>> {
450 let mut result = Vec::with_capacity(self.packed_size as usize);
451 for chunk in &self.chunks {
452 let data = chunk
453 .read_range(0, chunk.length().saturating_sub(1))
454 .await?;
455 result.extend_from_slice(&data);
456 }
457 Ok(result)
458 }
459
460 pub async fn read_decompressed(&self) -> Result<Arc<Vec<u8>>> {
462 {
464 let cache = self.decompressed_cache.lock().unwrap();
465 if let Some(ref data) = *cache {
466 return Ok(Arc::clone(data));
467 }
468 }
469
470 #[allow(unused_mut)]
472 let mut packed = self.read_all_raw().await?;
473
474 #[cfg(feature = "crypto")]
476 if let Some(ref enc) = self.encryption {
477 let password = self.password.as_ref().ok_or(RarError::PasswordRequired)?;
478
479 match enc {
480 EncryptionInfo::Rar5 {
481 salt,
482 init_v,
483 lg2_count,
484 } => {
485 use crate::crypto::Rar5Crypto;
486 let crypto = Rar5Crypto::derive_key(password, salt, *lg2_count);
487 crypto
488 .decrypt(init_v, &mut packed)
489 .map_err(|e| RarError::DecryptionFailed(e.to_string()))?;
490 }
491 EncryptionInfo::Rar4 { salt } => {
492 use crate::crypto::Rar4Crypto;
493 let crypto = Rar4Crypto::derive_key(password, salt);
494 crypto
495 .decrypt(&mut packed)
496 .map_err(|e| RarError::DecryptionFailed(e.to_string()))?;
497 }
498 }
499 }
500
501 let decompressed = if !self.is_compressed() {
503 #[cfg(feature = "crypto")]
505 if self.encryption.is_some() {
506 packed.truncate(self.unpacked_size as usize);
508 }
509 packed
510 } else {
511 match self.rar_version {
512 RarVersion::Rar4 => {
513 let mut decoder = Rar29Decoder::new();
514 decoder.decompress(&packed, self.unpacked_size)?
515 }
516 RarVersion::Rar5 => {
517 let mut decoder = Rar5Decoder::with_dict_size(self.dict_size_log);
518 #[cfg(feature = "parallel")]
520 if !self.is_solid && self.method >= 1 && self.method <= 5 {
521 decoder.decompress_pipeline(&packed, self.unpacked_size)?
522 } else {
523 decoder.decompress(
524 &packed,
525 self.unpacked_size,
526 self.method,
527 self.is_solid,
528 )?
529 }
530 #[cfg(not(feature = "parallel"))]
531 decoder.decompress(&packed, self.unpacked_size, self.method, self.is_solid)?
532 }
533 }
534 };
535 let decompressed = Arc::new(decompressed);
536
537 {
539 let mut cache = self.decompressed_cache.lock().unwrap();
540 *cache = Some(Arc::clone(&decompressed));
541 }
542
543 Ok(decompressed)
544 }
545
546 pub async fn read_range(&self, interval: ReadInterval) -> Result<Vec<u8>> {
549 let start = interval.start;
550 let end = interval.end;
551
552 if start > end || end >= self.length {
553 return Err(RarError::InvalidOffset {
554 offset: end,
555 length: self.length,
556 });
557 }
558
559 if self.is_compressed() {
560 let decompressed = self.read_decompressed().await?;
562 let start_usize = start as usize;
563 let end_usize = (end + 1) as usize;
564 if end_usize > decompressed.len() {
565 return Err(RarError::InvalidOffset {
566 offset: end,
567 length: self.length,
568 });
569 }
570 return Ok(decompressed[start_usize..end_usize].to_vec());
571 }
572
573 let start_idx = self
574 .find_chunk_index(start)
575 .ok_or(RarError::InvalidOffset {
576 offset: start,
577 length: self.length,
578 })?;
579 let end_idx = self.find_chunk_index(end).ok_or(RarError::InvalidOffset {
580 offset: end,
581 length: self.length,
582 })?;
583
584 let mut result = Vec::with_capacity((end - start + 1) as usize);
586
587 for i in start_idx..=end_idx {
588 let entry = &self.chunk_map[i];
589 let chunk = &self.chunks[i];
590
591 let chunk_start = if i == start_idx {
593 start - entry.start
594 } else {
595 0
596 };
597 let chunk_end = if i == end_idx {
598 end - entry.start
599 } else {
600 chunk.length().saturating_sub(1)
601 };
602
603 let data = chunk.read_range(chunk_start, chunk_end).await?;
604 result.extend_from_slice(&data);
605 }
606
607 Ok(result)
608 }
609
610 pub fn stream_range(&self, start: u64, end: u64) -> InnerFileStream<'_> {
613 InnerFileStream::new(self, start, end)
614 }
615
616 pub fn get_chunk_ranges(&self, start: u64, end: u64) -> Vec<(usize, u64, u64)> {
619 let start_idx = match self.find_chunk_index(start) {
620 Some(i) => i,
621 None => return vec![],
622 };
623 let end_idx = match self.find_chunk_index(end) {
624 Some(i) => i,
625 None => return vec![],
626 };
627
628 let mut ranges = Vec::with_capacity(end_idx - start_idx + 1);
629
630 for i in start_idx..=end_idx {
631 let entry = &self.chunk_map[i];
632 let chunk = &self.chunks[i];
633
634 let chunk_start = if i == start_idx {
635 start - entry.start
636 } else {
637 0
638 };
639 let chunk_end = if i == end_idx {
640 end - entry.start
641 } else {
642 chunk.length().saturating_sub(1)
643 };
644
645 let abs_start = chunk.start_offset + chunk_start;
647 let abs_end = chunk.start_offset + chunk_end;
648
649 ranges.push((i, abs_start, abs_end));
650 }
651
652 ranges
653 }
654
655 pub fn translate_offset(&self, offset: u64) -> Option<(usize, u64)> {
658 let idx = self.find_chunk_index(offset)?;
659 let entry = &self.chunk_map[idx];
660 let chunk = &self.chunks[idx];
661
662 let offset_in_chunk = offset - entry.start;
663 let volume_offset = chunk.start_offset + offset_in_chunk;
664
665 Some((idx, volume_offset))
666 }
667}
668
669pub struct InnerFileStream<'a> {
672 inner_file: &'a InnerFile,
673 current_offset: u64,
674 end_offset: u64,
675 current_chunk_idx: Option<usize>,
676 done: bool,
677}
678
679impl<'a> InnerFileStream<'a> {
680 pub fn new(inner_file: &'a InnerFile, start: u64, end: u64) -> Self {
681 let current_chunk_idx = inner_file.find_chunk_index(start);
682 Self {
683 inner_file,
684 current_offset: start,
685 end_offset: end.min(inner_file.length.saturating_sub(1)),
686 current_chunk_idx,
687 done: start > end || start >= inner_file.length,
688 }
689 }
690
691 pub async fn next_chunk(&mut self) -> Option<Result<Vec<u8>>> {
694 if self.done {
695 return None;
696 }
697
698 let chunk_idx = self.current_chunk_idx?;
699 let entry = self.inner_file.get_chunk_entry(chunk_idx)?;
700 let chunk = self.inner_file.get_chunk(chunk_idx)?;
701
702 let chunk_start = self.current_offset - entry.start;
704 let chunk_end = if self.end_offset <= entry.end {
705 self.end_offset - entry.start
706 } else {
707 chunk.length().saturating_sub(1)
708 };
709
710 let result = chunk.read_range(chunk_start, chunk_end).await;
712
713 match &result {
714 Ok(data) => {
715 self.current_offset += data.len() as u64;
716
717 if self.current_offset > self.end_offset {
718 self.done = true;
719 } else {
720 self.current_chunk_idx = Some(chunk_idx + 1);
722 if chunk_idx + 1 >= self.inner_file.chunk_count() {
723 self.done = true;
724 }
725 }
726 }
727 Err(_) => {
728 self.done = true;
729 }
730 }
731
732 Some(result)
733 }
734
735 pub fn remaining(&self) -> u64 {
737 if self.done {
738 0
739 } else {
740 self.end_offset.saturating_sub(self.current_offset) + 1
741 }
742 }
743
744 pub fn position(&self) -> u64 {
746 self.current_offset
747 }
748}
749
750#[derive(Debug, Clone)]
752pub struct StreamChunkInfo {
753 pub chunk_index: usize,
754 pub logical_start: u64,
755 pub logical_end: u64,
756 pub volume_start: u64,
757 pub volume_end: u64,
758 pub size: u64,
759}
760
761impl InnerFile {
762 pub fn get_stream_chunks(&self, start: u64, end: u64) -> Vec<StreamChunkInfo> {
765 let start_idx = match self.find_chunk_index(start) {
766 Some(i) => i,
767 None => return vec![],
768 };
769 let end_idx = match self.find_chunk_index(end) {
770 Some(i) => i,
771 None => return vec![],
772 };
773
774 let mut infos = Vec::with_capacity(end_idx - start_idx + 1);
775
776 for i in start_idx..=end_idx {
777 let entry = &self.chunk_map[i];
778 let chunk = &self.chunks[i];
779
780 let logical_start = if i == start_idx { start } else { entry.start };
781 let logical_end = if i == end_idx { end } else { entry.end };
782
783 let offset_in_chunk_start = logical_start - entry.start;
784 let offset_in_chunk_end = logical_end - entry.start;
785
786 infos.push(StreamChunkInfo {
787 chunk_index: i,
788 logical_start,
789 logical_end,
790 volume_start: chunk.start_offset + offset_in_chunk_start,
791 volume_end: chunk.start_offset + offset_in_chunk_end,
792 size: logical_end - logical_start + 1,
793 });
794 }
795
796 infos
797 }
798}
799
800#[cfg(test)]
801mod tests {
802 use super::*;
803 use crate::file_media::{FileMedia, ReadInterval};
804 use std::sync::Arc;
805
806 struct MockFileMedia {
808 name: String,
809 length: u64,
810 data: Vec<u8>,
811 }
812
813 impl MockFileMedia {
814 fn new(name: &str, data: Vec<u8>) -> Self {
815 Self {
816 name: name.to_string(),
817 length: data.len() as u64,
818 data,
819 }
820 }
821 }
822
823 impl FileMedia for MockFileMedia {
824 fn length(&self) -> u64 {
825 self.length
826 }
827
828 fn name(&self) -> &str {
829 &self.name
830 }
831
832 fn read_range(
833 &self,
834 interval: ReadInterval,
835 ) -> std::pin::Pin<
836 Box<dyn std::future::Future<Output = crate::error::Result<Vec<u8>>> + Send + '_>,
837 > {
838 let start = interval.start as usize;
839 let end = (interval.end + 1) as usize;
840 let data = self.data[start..end.min(self.data.len())].to_vec();
841 Box::pin(async move { Ok(data) })
842 }
843 }
844
845 fn create_test_chunks(chunk_sizes: &[u64]) -> Vec<RarFileChunk> {
846 let mut chunks = Vec::new();
847 for (i, &size) in chunk_sizes.iter().enumerate() {
848 let data = vec![i as u8; size as usize];
849 let media = Arc::new(MockFileMedia::new(&format!("chunk{}.rar", i), data));
850 chunks.push(RarFileChunk::new(media, 0, size - 1));
852 }
853 chunks
854 }
855
856 #[test]
857 fn test_binary_search_single_chunk() {
858 let chunks = create_test_chunks(&[1000]);
859 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
860
861 assert_eq!(file.length, 1000);
862 assert_eq!(file.find_chunk_index(0), Some(0));
863 assert_eq!(file.find_chunk_index(500), Some(0));
864 assert_eq!(file.find_chunk_index(999), Some(0));
865 assert_eq!(file.find_chunk_index(1000), None); }
867
868 #[test]
869 fn test_binary_search_multiple_chunks() {
870 let chunks = create_test_chunks(&[100, 100, 100]);
872 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
873
874 assert_eq!(file.length, 300);
875
876 assert_eq!(file.find_chunk_index(0), Some(0));
878 assert_eq!(file.find_chunk_index(50), Some(0));
879 assert_eq!(file.find_chunk_index(99), Some(0));
880
881 assert_eq!(file.find_chunk_index(100), Some(1));
883 assert_eq!(file.find_chunk_index(150), Some(1));
884 assert_eq!(file.find_chunk_index(199), Some(1));
885
886 assert_eq!(file.find_chunk_index(200), Some(2));
888 assert_eq!(file.find_chunk_index(250), Some(2));
889 assert_eq!(file.find_chunk_index(299), Some(2));
890
891 assert_eq!(file.find_chunk_index(300), None);
893 }
894
895 #[test]
896 fn test_binary_search_many_chunks() {
897 let chunk_sizes: Vec<u64> = vec![1000; 100];
899 let chunks = create_test_chunks(&chunk_sizes);
900 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
901
902 assert_eq!(file.length, 100_000);
903
904 for i in 0..100 {
906 let offset = i * 1000;
907 assert_eq!(file.find_chunk_index(offset), Some(i as usize));
908 assert_eq!(file.find_chunk_index(offset + 500), Some(i as usize));
909 assert_eq!(file.find_chunk_index(offset + 999), Some(i as usize));
910 }
911 }
912
913 #[test]
914 fn test_translate_offset() {
915 let chunks = create_test_chunks(&[100, 100, 100]);
916 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
917
918 let (idx, vol_offset) = file.translate_offset(0).unwrap();
920 assert_eq!(idx, 0);
921 assert_eq!(vol_offset, 0);
922
923 let (idx, vol_offset) = file.translate_offset(150).unwrap();
924 assert_eq!(idx, 1);
925 assert_eq!(vol_offset, 50); let (idx, vol_offset) = file.translate_offset(250).unwrap();
928 assert_eq!(idx, 2);
929 assert_eq!(vol_offset, 50); }
931
932 #[test]
933 fn test_get_stream_chunks() {
934 let chunks = create_test_chunks(&[100, 100, 100]);
935 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
936
937 let infos = file.get_stream_chunks(50, 250);
939 assert_eq!(infos.len(), 3);
940
941 assert_eq!(infos[0].chunk_index, 0);
942 assert_eq!(infos[0].logical_start, 50);
943 assert_eq!(infos[0].logical_end, 99);
944 assert_eq!(infos[0].size, 50);
945
946 assert_eq!(infos[1].chunk_index, 1);
947 assert_eq!(infos[1].logical_start, 100);
948 assert_eq!(infos[1].logical_end, 199);
949 assert_eq!(infos[1].size, 100);
950
951 assert_eq!(infos[2].chunk_index, 2);
952 assert_eq!(infos[2].logical_start, 200);
953 assert_eq!(infos[2].logical_end, 250);
954 assert_eq!(infos[2].size, 51);
955 }
956
957 #[tokio::test]
958 async fn test_read_range() {
959 let chunks = create_test_chunks(&[100, 100, 100]);
960 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
961
962 let data = file
964 .read_range(ReadInterval {
965 start: 150,
966 end: 160,
967 })
968 .await
969 .unwrap();
970 assert_eq!(data.len(), 11);
971 assert!(data.iter().all(|&b| b == 1));
973 }
974
975 #[tokio::test]
976 async fn test_read_range_spanning_chunks() {
977 let chunks = create_test_chunks(&[100, 100, 100]);
978 let file = InnerFile::new("test.mkv".to_string(), chunks, 0x30, 0, RarVersion::Rar4);
979
980 let data = file
982 .read_range(ReadInterval {
983 start: 90,
984 end: 110,
985 })
986 .await
987 .unwrap();
988 assert_eq!(data.len(), 21);
989
990 assert!(data[..10].iter().all(|&b| b == 0));
992 assert!(data[10..].iter().all(|&b| b == 1));
994 }
995}