1use super::{
12 dicts,
13 format::{
14 CARGO_METADATA_JSON_PATH, OutputDict, RECORD_OPTS_JSON_PATH, RUN_LOG_FILE_NAME,
15 STDERR_DICT_PATH, STDOUT_DICT_PATH, STORE_ZIP_FILE_NAME, TEST_LIST_JSON_PATH,
16 stored_file_options, zstd_file_options,
17 },
18 summary::{
19 OutputEventKind, OutputFileName, OutputKind, RecordOpts, TestEventKindSummary,
20 TestEventSummary, ZipStoreOutput, ZipStoreOutputDescription,
21 },
22};
23use crate::{
24 errors::{RunStoreError, StoreWriterError},
25 output_spec::{LiveSpec, RecordingSpec},
26 record::format::{RERUN_INFO_JSON_PATH, RerunInfo},
27 reporter::events::{
28 ChildExecutionOutputDescription, ChildOutputDescription, ExecuteStatus, ExecutionStatuses,
29 SetupScriptExecuteStatus,
30 },
31 test_output::ChildSingleOutput,
32};
33use camino::{Utf8Path, Utf8PathBuf};
34use countio::Counter;
35use debug_ignore::DebugIgnore;
36use eazip::ArchiveWriter;
37use nextest_metadata::TestListSummary;
38use std::{
39 borrow::Cow,
40 collections::HashSet,
41 fs::File,
42 io::{self, Write},
43};
44
45struct LogEncoder {
56 inner: Option<Counter<zstd::stream::Encoder<'static, Counter<File>>>>,
59}
60
61impl std::fmt::Debug for LogEncoder {
62 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
63 f.debug_struct("LogEncoder").finish_non_exhaustive()
64 }
65}
66
67impl LogEncoder {
68 fn new(encoder: zstd::stream::Encoder<'static, Counter<File>>) -> Self {
69 Self {
70 inner: Some(Counter::new(encoder)),
71 }
72 }
73
74 fn finish(mut self, entries: u64) -> io::Result<ComponentSizes> {
78 let counter = self.inner.take().expect("encoder already finished");
79 let uncompressed = counter.writer_bytes() as u64;
80 let file_counter = counter.into_inner().finish()?;
81 let compressed = file_counter.writer_bytes() as u64;
82 Ok(ComponentSizes {
83 compressed,
84 uncompressed,
85 entries,
86 })
87 }
88}
89
90impl Write for LogEncoder {
91 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
92 self.inner
93 .as_mut()
94 .expect("encoder already finished")
95 .write(buf)
96 }
97
98 fn flush(&mut self) -> io::Result<()> {
99 self.inner
100 .as_mut()
101 .expect("encoder already finished")
102 .flush()
103 }
104}
105
106impl Drop for LogEncoder {
107 fn drop(&mut self) {
108 if let Some(counter) = self.inner.take() {
109 let _ = counter.into_inner().finish();
114 }
115 }
116}
117
118#[derive(Debug)]
123pub struct RunRecorder {
124 store_path: Utf8PathBuf,
125 store_writer: StoreWriter,
126 log_path: Utf8PathBuf,
127 log: DebugIgnore<LogEncoder>,
128 log_entries: u64,
130 max_output_size: usize,
131}
132
133impl RunRecorder {
134 pub(super) fn new(
139 run_dir: Utf8PathBuf,
140 max_output_size: bytesize::ByteSize,
141 ) -> Result<Self, RunStoreError> {
142 std::fs::create_dir_all(&run_dir).map_err(|error| RunStoreError::RunDirCreate {
143 run_dir: run_dir.clone(),
144 error,
145 })?;
146
147 let store_path = run_dir.join(STORE_ZIP_FILE_NAME);
148 let store_writer =
149 StoreWriter::new(&store_path).map_err(|error| RunStoreError::StoreWrite {
150 store_path: store_path.clone(),
151 error,
152 })?;
153
154 let log_path = run_dir.join(RUN_LOG_FILE_NAME);
155 let file = std::fs::OpenOptions::new()
156 .create(true)
157 .truncate(true)
158 .write(true)
159 .open(&log_path)
160 .map_err(|error| RunStoreError::RunLogCreate {
161 path: log_path.clone(),
162 error,
163 })?;
164
165 let encoder = zstd::stream::Encoder::new(Counter::new(file), 3).map_err(|error| {
169 RunStoreError::RunLogCreate {
170 path: log_path.clone(),
171 error,
172 }
173 })?;
174 let log = LogEncoder::new(encoder);
175
176 Ok(Self {
177 store_path,
178 store_writer,
179 log_path,
180 log: DebugIgnore(log),
181 log_entries: 0,
182 max_output_size: usize::try_from(max_output_size.as_u64()).unwrap_or(usize::MAX),
185 })
186 }
187
188 pub(crate) fn write_meta(
195 &mut self,
196 cargo_metadata_json: &str,
197 test_list: &TestListSummary,
198 opts: &RecordOpts,
199 ) -> Result<(), RunStoreError> {
200 let test_list_json = serde_json::to_string(test_list)
201 .map_err(|error| RunStoreError::TestListSerialize { error })?;
202
203 let opts_json = serde_json::to_string(opts)
204 .map_err(|error| RunStoreError::RecordOptionsSerialize { error })?;
205
206 self.write_archive_file(TEST_LIST_JSON_PATH, test_list_json.as_bytes())?;
207 self.write_archive_file(CARGO_METADATA_JSON_PATH, cargo_metadata_json.as_bytes())?;
208 self.write_archive_file(RECORD_OPTS_JSON_PATH, opts_json.as_bytes())?;
209
210 self.write_archive_file(STDOUT_DICT_PATH, dicts::STDOUT)?;
212 self.write_archive_file(STDERR_DICT_PATH, dicts::STDERR)?;
213
214 Ok(())
215 }
216
217 pub(crate) fn write_rerun_info(&mut self, rerun_info: &RerunInfo) -> Result<(), RunStoreError> {
221 let rerun_info_json = serde_json::to_string(rerun_info)
222 .map_err(|error| RunStoreError::RerunInfoSerialize { error })?;
223
224 self.write_archive_file(RERUN_INFO_JSON_PATH, rerun_info_json.as_bytes())?;
225
226 Ok(())
227 }
228
229 fn write_archive_file(&mut self, path: &str, bytes: &[u8]) -> Result<(), RunStoreError> {
230 self.store_writer
231 .add_file(Utf8PathBuf::from(path), bytes)
232 .map_err(|error| RunStoreError::StoreWrite {
233 store_path: self.store_path.clone(),
234 error,
235 })
236 }
237
238 pub(crate) fn write_event(
243 &mut self,
244 event: TestEventSummary<LiveSpec>,
245 ) -> Result<(), RunStoreError> {
246 let mut cx = SerializeTestEventContext {
247 store_writer: &mut self.store_writer,
248 max_output_size: self.max_output_size,
249 };
250
251 let event = cx
252 .convert_event(event)
253 .map_err(|error| RunStoreError::StoreWrite {
254 store_path: self.store_path.clone(),
255 error,
256 })?;
257
258 let json = serde_json::to_string(&event)
259 .map_err(|error| RunStoreError::TestEventSerialize { error })?;
260 self.write_log_impl(json.as_bytes())?;
261 self.write_log_impl(b"\n")?;
262
263 self.log_entries += 1;
264
265 Ok(())
266 }
267
268 fn write_log_impl(&mut self, bytes: &[u8]) -> Result<(), RunStoreError> {
269 self.log
270 .write_all(bytes)
271 .map_err(|error| RunStoreError::RunLogWrite {
272 path: self.log_path.clone(),
273 error,
274 })
275 }
276
277 pub(crate) fn finish(self) -> Result<StoreSizes, RunStoreError> {
282 let log_sizes =
283 self.log
284 .0
285 .finish(self.log_entries)
286 .map_err(|error| RunStoreError::RunLogFlush {
287 path: self.log_path.clone(),
288 error,
289 })?;
290
291 let store_sizes =
292 self.store_writer
293 .finish()
294 .map_err(|error| RunStoreError::StoreWrite {
295 store_path: self.store_path.clone(),
296 error,
297 })?;
298
299 Ok(StoreSizes {
300 log: log_sizes,
301 store: store_sizes,
302 })
303 }
304}
305
306#[derive(Debug)]
308pub(crate) struct StoreWriter {
309 writer: DebugIgnore<ArchiveWriter<Counter<File>>>,
310 added_files: HashSet<Utf8PathBuf>,
311 uncompressed_size: u64,
313}
314
315impl StoreWriter {
316 fn new(store_path: &Utf8Path) -> Result<Self, StoreWriterError> {
318 let zip_file = std::fs::OpenOptions::new()
319 .create(true)
320 .truncate(true)
321 .write(true)
322 .open(store_path)
323 .map_err(|error| StoreWriterError::Create { error })?;
324 let writer = ArchiveWriter::new(Counter::new(zip_file));
325
326 Ok(Self {
327 writer: DebugIgnore(writer),
328 added_files: HashSet::new(),
329 uncompressed_size: 0,
330 })
331 }
332
333 fn add_file(&mut self, path: Utf8PathBuf, contents: &[u8]) -> Result<(), StoreWriterError> {
340 if self.added_files.contains(&path) {
341 return Ok(());
342 }
343
344 self.uncompressed_size += contents.len() as u64;
346
347 let dict = OutputDict::for_path(&path);
348 match dict.dict_bytes() {
349 Some(dict_bytes) => {
350 let compressed = compress_with_dict(contents, dict_bytes)
353 .map_err(|error| StoreWriterError::Compress { error })?;
354
355 let options = stored_file_options();
356 self.writer
357 .add_file(path.as_str(), &compressed[..], &options)
358 .map_err(|error| StoreWriterError::Write {
359 path: path.clone(),
360 error,
361 })?;
362 }
363 None => {
364 let options = zstd_file_options();
366 self.writer
367 .add_file(path.as_str(), contents, &options)
368 .map_err(|error| StoreWriterError::Write {
369 path: path.clone(),
370 error,
371 })?;
372 }
373 }
374
375 self.added_files.insert(path);
376
377 Ok(())
378 }
379
380 fn finish(self) -> Result<ComponentSizes, StoreWriterError> {
384 let entries = self.added_files.len() as u64;
385 let mut counter = self
386 .writer
387 .0
388 .finish()
389 .map_err(|error| StoreWriterError::Finish { error })?;
390
391 counter
392 .flush()
393 .map_err(|error| StoreWriterError::Flush { error })?;
394
395 Ok(ComponentSizes {
396 compressed: counter.writer_bytes() as u64,
397 uncompressed: self.uncompressed_size,
398 entries,
399 })
400 }
401}
402
403#[derive(Clone, Copy, Debug, Default)]
405pub struct ComponentSizes {
406 pub compressed: u64,
408 pub uncompressed: u64,
410 pub entries: u64,
412}
413
414#[derive(Clone, Copy, Debug, Default)]
416pub struct StoreSizes {
417 pub log: ComponentSizes,
419 pub store: ComponentSizes,
421}
422
423impl StoreSizes {
424 pub fn total_compressed(&self) -> u64 {
426 self.log.compressed + self.store.compressed
427 }
428
429 pub fn total_uncompressed(&self) -> u64 {
431 self.log.uncompressed + self.store.uncompressed
432 }
433}
434
435fn compress_with_dict(data: &[u8], dict_bytes: &[u8]) -> io::Result<Vec<u8>> {
437 let dict = zstd::dict::EncoderDictionary::copy(dict_bytes, 3);
440 let mut encoder = zstd::stream::Encoder::with_prepared_dictionary(Vec::new(), &dict)?;
441 encoder.write_all(data)?;
442 encoder.finish()
443}
444
445struct SerializeTestEventContext<'a> {
450 store_writer: &'a mut StoreWriter,
451 max_output_size: usize,
452}
453
454impl SerializeTestEventContext<'_> {
455 fn convert_event(
457 &mut self,
458 event: TestEventSummary<LiveSpec>,
459 ) -> Result<TestEventSummary<RecordingSpec>, StoreWriterError> {
460 Ok(TestEventSummary {
461 timestamp: event.timestamp,
462 elapsed: event.elapsed,
463 kind: self.convert_event_kind(event.kind)?,
464 })
465 }
466
467 fn convert_event_kind(
468 &mut self,
469 kind: TestEventKindSummary<LiveSpec>,
470 ) -> Result<TestEventKindSummary<RecordingSpec>, StoreWriterError> {
471 match kind {
472 TestEventKindSummary::Core(core) => Ok(TestEventKindSummary::Core(core)),
473 TestEventKindSummary::Output(output) => Ok(TestEventKindSummary::Output(
474 self.convert_output_event(output)?,
475 )),
476 }
477 }
478
479 fn convert_output_event(
480 &mut self,
481 event: OutputEventKind<LiveSpec>,
482 ) -> Result<OutputEventKind<RecordingSpec>, StoreWriterError> {
483 match event {
484 OutputEventKind::SetupScriptFinished {
485 stress_index,
486 index,
487 total,
488 script_id,
489 program,
490 args,
491 no_capture,
492 run_status,
493 } => {
494 let run_status = self.convert_setup_script_status(&run_status)?;
495 Ok(OutputEventKind::SetupScriptFinished {
496 stress_index,
497 index,
498 total,
499 script_id,
500 program,
501 args,
502 no_capture,
503 run_status,
504 })
505 }
506 OutputEventKind::TestAttemptFailedWillRetry {
507 stress_index,
508 test_instance,
509 run_status,
510 delay_before_next_attempt,
511 failure_output,
512 running,
513 } => {
514 let run_status = self.convert_execute_status(run_status)?;
515 Ok(OutputEventKind::TestAttemptFailedWillRetry {
516 stress_index,
517 test_instance,
518 run_status,
519 delay_before_next_attempt,
520 failure_output,
521 running,
522 })
523 }
524 OutputEventKind::TestFinished {
525 stress_index,
526 test_instance,
527 success_output,
528 failure_output,
529 junit_store_success_output,
530 junit_store_failure_output,
531 run_statuses,
532 current_stats,
533 running,
534 } => {
535 let run_statuses = self.convert_execution_statuses(run_statuses)?;
536 Ok(OutputEventKind::TestFinished {
537 stress_index,
538 test_instance,
539 success_output,
540 failure_output,
541 junit_store_success_output,
542 junit_store_failure_output,
543 run_statuses,
544 current_stats,
545 running,
546 })
547 }
548 }
549 }
550
551 fn convert_setup_script_status(
552 &mut self,
553 status: &SetupScriptExecuteStatus<LiveSpec>,
554 ) -> Result<SetupScriptExecuteStatus<RecordingSpec>, StoreWriterError> {
555 Ok(SetupScriptExecuteStatus {
556 output: self.convert_child_execution_output(&status.output)?,
557 result: status.result.clone(),
558 start_time: status.start_time,
559 time_taken: status.time_taken,
560 is_slow: status.is_slow,
561 env_map: status.env_map.clone(),
562 error_summary: status.error_summary.clone(),
563 })
564 }
565
566 fn convert_execution_statuses(
567 &mut self,
568 statuses: ExecutionStatuses<LiveSpec>,
569 ) -> Result<ExecutionStatuses<RecordingSpec>, StoreWriterError> {
570 let statuses = statuses
571 .into_iter()
572 .map(|status| self.convert_execute_status(status))
573 .collect::<Result<Vec<_>, _>>()?;
574 Ok(ExecutionStatuses::new(statuses))
575 }
576
577 fn convert_execute_status(
578 &mut self,
579 status: ExecuteStatus<LiveSpec>,
580 ) -> Result<ExecuteStatus<RecordingSpec>, StoreWriterError> {
581 let output = self.convert_child_execution_output(&status.output)?;
582
583 Ok(ExecuteStatus {
584 retry_data: status.retry_data,
585 output,
586 result: status.result,
587 start_time: status.start_time,
588 time_taken: status.time_taken,
589 is_slow: status.is_slow,
590 delay_before_start: status.delay_before_start,
591 error_summary: status.error_summary,
592 output_error_slice: status.output_error_slice,
593 })
594 }
595
596 fn convert_child_execution_output(
597 &mut self,
598 output: &ChildExecutionOutputDescription<LiveSpec>,
599 ) -> Result<ChildExecutionOutputDescription<RecordingSpec>, StoreWriterError> {
600 match output {
601 ChildExecutionOutputDescription::Output {
602 result,
603 output,
604 errors,
605 } => {
606 let output = self.convert_child_output(output)?;
607 Ok(ChildExecutionOutputDescription::Output {
608 result: result.clone(),
609 output,
610 errors: errors.clone(),
611 })
612 }
613 ChildExecutionOutputDescription::StartError(err) => {
614 Ok(ChildExecutionOutputDescription::StartError(err.clone()))
615 }
616 }
617 }
618
619 fn convert_child_output(
620 &mut self,
621 output: &ChildOutputDescription,
622 ) -> Result<ZipStoreOutputDescription, StoreWriterError> {
623 match output {
624 ChildOutputDescription::Split { stdout, stderr } => {
625 Ok(ZipStoreOutputDescription::Split {
626 stdout: stdout
628 .as_ref()
629 .map(|o| self.write_single_output(Some(o), OutputKind::Stdout))
630 .transpose()?,
631 stderr: stderr
632 .as_ref()
633 .map(|o| self.write_single_output(Some(o), OutputKind::Stderr))
634 .transpose()?,
635 })
636 }
637 ChildOutputDescription::Combined { output } => {
638 Ok(ZipStoreOutputDescription::Combined {
639 output: self.write_single_output(Some(output), OutputKind::Combined)?,
640 })
641 }
642 ChildOutputDescription::NotLoaded => {
643 unreachable!(
644 "NotLoaded output should never be present during recording \
645 (NotLoaded is only produced during replay conversion)"
646 );
647 }
648 }
649 }
650
651 fn write_single_output(
656 &mut self,
657 output: Option<&ChildSingleOutput>,
658 kind: OutputKind,
659 ) -> Result<ZipStoreOutput, StoreWriterError> {
660 let Some(output) = output else {
661 return Ok(ZipStoreOutput::Empty);
662 };
663
664 if output.buf.is_empty() {
665 return Ok(ZipStoreOutput::Empty);
666 }
667
668 let original_len = output.buf.len();
669 let (data, truncated): (Cow<'_, [u8]>, bool) = if original_len <= self.max_output_size {
670 (Cow::Borrowed(&output.buf), false)
671 } else {
672 (truncate_output(&output.buf, self.max_output_size), true)
673 };
674
675 let file_name = OutputFileName::from_content(&data, kind);
676 let file_path = Utf8PathBuf::from(format!("out/{file_name}"));
677
678 self.store_writer.add_file(file_path, &data)?;
679
680 if truncated {
681 Ok(ZipStoreOutput::Truncated {
682 file_name,
683 original_size: original_len as u64,
684 })
685 } else {
686 Ok(ZipStoreOutput::Full { file_name })
687 }
688 }
689}
690
691fn truncate_output(buf: &[u8], max_size: usize) -> Cow<'_, [u8]> {
698 if buf.len() <= max_size {
699 return Cow::Borrowed(buf);
700 }
701
702 let truncated_bytes = buf.len() - max_size;
703 let marker = format!("\n\n... [truncated {truncated_bytes} bytes] ...\n\n");
704 let marker_bytes = marker.as_bytes();
705
706 let content_space = max_size.saturating_sub(marker_bytes.len());
707 let head_size = content_space / 2;
708 let tail_size = content_space - head_size;
709
710 let mut result = Vec::with_capacity(max_size);
711 result.extend_from_slice(&buf[..head_size]);
712 result.extend_from_slice(marker_bytes);
713 result.extend_from_slice(&buf[buf.len() - tail_size..]);
714
715 Cow::Owned(result)
716}
717
718#[cfg(test)]
719mod tests {
720 use super::*;
721 use crate::record::dicts;
722
723 #[test]
724 fn test_truncate_output_no_truncation_needed() {
725 let input = b"hello world";
726 let result = truncate_output(input, 100);
727 assert_eq!(&*result, input);
728 assert!(matches!(result, Cow::Borrowed(_)), "should be borrowed");
729 }
730
731 #[test]
732 fn test_truncate_output_exact_size() {
733 let input = b"exactly100bytes";
734 let result = truncate_output(input, input.len());
735 assert_eq!(&*result, input);
736 assert!(matches!(result, Cow::Borrowed(_)), "should be borrowed");
737 }
738
739 #[test]
740 fn test_truncate_output_basic() {
741 let input: Vec<u8> = (0..200).collect();
743 let max_size = 100;
744
745 let result = truncate_output(&input, max_size);
746
747 assert!(matches!(result, Cow::Owned(_)), "should be owned");
749
750 assert!(
752 result.len() <= max_size,
753 "result len {} should be <= max_size {}",
754 result.len(),
755 max_size
756 );
757
758 let result_str = String::from_utf8_lossy(&result);
760 assert!(
761 result_str.contains("[truncated"),
762 "should contain truncation marker: {result_str:?}"
763 );
764 assert!(
765 result_str.contains("bytes]"),
766 "should contain 'bytes]': {result_str:?}"
767 );
768
769 assert!(
771 result.starts_with(&[0, 1, 2]),
772 "should start with beginning of input"
773 );
774
775 assert!(
777 result.ends_with(&[197, 198, 199]),
778 "should end with end of input"
779 );
780 }
781
782 #[test]
783 fn test_truncate_output_preserves_head_and_tail() {
784 let head = b"HEAD_CONTENT_";
785 let middle = vec![b'x'; 1000];
786 let tail = b"_TAIL_CONTENT";
787
788 let mut input = Vec::new();
789 input.extend_from_slice(head);
790 input.extend_from_slice(&middle);
791 input.extend_from_slice(tail);
792
793 let max_size = 200;
794 let result = truncate_output(&input, max_size);
795
796 assert!(result.len() <= max_size);
797
798 assert!(
800 result.starts_with(b"HEAD"),
801 "should preserve head: {:?}",
802 String::from_utf8_lossy(&result[..20])
803 );
804
805 assert!(
807 result.ends_with(b"CONTENT"),
808 "should preserve tail: {:?}",
809 String::from_utf8_lossy(&result[result.len() - 20..])
810 );
811 }
812
813 #[test]
814 fn test_truncate_output_marker_shows_correct_count() {
815 let input: Vec<u8> = vec![b'a'; 1000];
816 let max_size = 100;
817
818 let result = truncate_output(&input, max_size);
819 let result_str = String::from_utf8_lossy(&result);
820
821 assert!(
823 result_str.contains("[truncated 900 bytes]"),
824 "should show correct truncation count: {result_str:?}"
825 );
826 }
827
828 #[test]
829 fn test_truncate_output_large_input() {
830 let input: Vec<u8> = vec![b'x'; 20 * 1024 * 1024]; let max_size = 10 * 1024 * 1024; let result = truncate_output(&input, max_size);
835
836 assert!(
837 result.len() <= max_size,
838 "result {} should be <= max_size {}",
839 result.len(),
840 max_size
841 );
842
843 let result_str = String::from_utf8_lossy(&result);
844 assert!(
845 result_str.contains("[truncated"),
846 "should contain truncation marker"
847 );
848 }
849
850 #[test]
851 fn test_truncate_output_max_size_smaller_than_marker() {
852 let input: Vec<u8> = vec![b'x'; 100];
856 let max_size = 10; let result = truncate_output(&input, max_size);
859
860 let result_str = String::from_utf8_lossy(&result);
864 assert!(
865 result_str.contains("[truncated"),
866 "should still contain truncation marker: {result_str:?}"
867 );
868
869 assert!(
871 result_str.starts_with("\n\n..."),
872 "should start with marker prefix"
873 );
874 assert!(
875 result_str.ends_with("...\n\n"),
876 "should end with marker suffix"
877 );
878 }
879
880 #[test]
881 fn test_truncate_output_max_size_zero() {
882 let input: Vec<u8> = vec![b'x'; 50];
884 let max_size = 0;
885
886 let result = truncate_output(&input, max_size);
887
888 let result_str = String::from_utf8_lossy(&result);
890 assert!(
891 result_str.contains("[truncated 50 bytes]"),
892 "should show correct truncation count: {result_str:?}"
893 );
894 }
895
896 #[test]
897 fn test_compress_with_dict_stdout() {
898 let test_output = b"running 1 test\ntest tests::my_test ... ok\n\ntest result: ok. 1 passed; 0 failed; 0 ignored\n";
900
901 let compressed =
903 compress_with_dict(test_output, dicts::STDOUT).expect("compression failed");
904
905 let dict = zstd::dict::DecoderDictionary::copy(dicts::STDOUT);
907 let mut decoder = zstd::stream::Decoder::with_prepared_dictionary(&compressed[..], &dict)
908 .expect("decoder creation failed");
909 let mut decompressed = Vec::new();
910 io::Read::read_to_end(&mut decoder, &mut decompressed).expect("decompression failed");
911
912 assert_eq!(decompressed, test_output, "round-trip should preserve data");
913 }
914}