lepton_jpeg 0.5.8

Rust port of the Lepton lossless JPEG compression library
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
//! Implements a multiplexer that reads and writes blocks to a stream from multiple partitions. Each
//! partition can run on it own thread to allow for increased parallelism when processing large images.
//!
//! The write implementation identifies the blocks by partition_id and tries to write in 64K blocks. The file
//! ends up with an interleaved stream of blocks from each partition.
//!
//! The read implementation reads the blocks from the file and sends them to the appropriate worker thread
//! for the partition.

use std::cmp;
use std::collections::VecDeque;
use std::io::{Cursor, Read, Write};
use std::mem::swap;
use std::sync::mpsc::{Receiver, Sender, TryRecvError, channel};
use std::sync::{Arc, Mutex};

use byteorder::WriteBytesExt;

use super::simple_threadpool::LeptonThreadPool;

use crate::lepton_error::{AddContext, ExitCode, Result};
use crate::{LeptonError, Metrics};
use crate::{helpers::*, lepton_error::err_exit_code, structs::partial_buffer::PartialBuffer};

/// The message that is sent between the threads
enum Message {
    Eof(usize),
    WriteBlock(usize, Vec<u8>),
}

pub struct MultiplexWriter {
    partition_id: usize,
    sender: Sender<Message>,
    buffer: Vec<u8>,
}

const WRITE_BUFFER_SIZE: usize = 65536;

impl Write for MultiplexWriter {
    fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
        let mut copy_start = 0;
        while copy_start < buf.len() {
            let amount_to_copy = cmp::min(
                WRITE_BUFFER_SIZE - self.buffer.len(),
                buf.len() - copy_start,
            );
            self.buffer
                .extend_from_slice(&buf[copy_start..copy_start + amount_to_copy]);

            if self.buffer.len() == WRITE_BUFFER_SIZE {
                self.flush()?;
            }

            copy_start += amount_to_copy;
        }

        Ok(buf.len())
    }

    fn flush(&mut self) -> std::io::Result<()> {
        if self.buffer.len() > 0 {
            let mut new_buffer = Vec::with_capacity(WRITE_BUFFER_SIZE);
            swap(&mut new_buffer, &mut self.buffer);

            self.sender
                .send(Message::WriteBlock(self.partition_id, new_buffer))
                .unwrap();
        }
        Ok(())
    }
}

/// Collects the thread results and errors and returns them as a vector
struct ThreadResults<RESULT> {
    results: Vec<Receiver<Result<RESULT>>>,
}

impl<RESULT> ThreadResults<RESULT> {
    fn new() -> Self {
        ThreadResults {
            results: Vec::new(),
        }
    }
    /// creates a closure that wraps the passed in closure, catches any panics,
    /// collects the return result and send it to the receiver to collect.
    fn send_results<T: FnOnce() -> Result<RESULT> + Send + 'static>(
        &mut self,
        f: T,
    ) -> impl FnOnce() + use<RESULT, T> {
        let (tx, rx) = channel();

        self.results.push(rx);

        move || {
            let r = catch_unwind_result(f);
            let _ = tx.send(r);
        }
    }

    /// extracts the results from all the receivers and returns them as a vector, or returns an
    /// error if any of the threads errored out.
    fn receive_results(&mut self) -> Result<Vec<RESULT>> {
        let mut final_results = Vec::new();

        let mut error_found = None;
        for r in self.results.drain(..) {
            match r.recv() {
                Ok(Ok(r)) => final_results.push(r),
                Ok(Err(e)) => {
                    error_found = Some(e);
                }
                Err(e) => {
                    // prefer real errors over broken channel errors
                    if error_found.is_none() {
                        error_found = Some(e.into());
                    }
                }
            }
        }

        if let Some(error) = error_found {
            Err(error)
        } else {
            Ok(final_results)
        }
    }
}

/// Given an arbitrary writer, this function will launch the given number of partitions and call the processor function
/// on each of them, and collect the output written by each partition to the writer in blocks identified by the partition_id.
///
/// This output stream can be processed by multiple_read to get the data back, using the same number of threads.
pub fn multiplex_write<WRITE, FN, RESULT>(
    writer: &mut WRITE,
    num_partitions: usize,
    max_processor_threads: usize,
    thread_pool: &dyn LeptonThreadPool,
    processor: FN,
) -> Result<Vec<RESULT>>
where
    WRITE: Write,
    FN: Fn(&mut MultiplexWriter, usize) -> Result<RESULT> + Send + Sync + 'static,
    RESULT: Send + 'static,
{
    let mut thread_results = ThreadResults::new();

    // receives packets from threads as they are generated
    let mut packet_receivers = Vec::new();

    let arc_processor = Arc::new(Box::new(processor));

    let mut work: VecDeque<Box<dyn FnOnce() + Send>> = VecDeque::new();

    for partition_id in 0..num_partitions {
        let (tx, rx) = channel();

        let mut thread_writer = MultiplexWriter {
            partition_id,
            sender: tx,
            buffer: Vec::with_capacity(WRITE_BUFFER_SIZE),
        };

        let processor_clone = arc_processor.clone();

        let f = Box::new(thread_results.send_results(move || {
            let r = processor_clone(&mut thread_writer, partition_id)?;

            thread_writer.flush().context()?;

            thread_writer
                .sender
                .send(Message::Eof(partition_id))
                .context()?;
            Ok(r)
        }));
        work.push_back(f);

        packet_receivers.push(rx);
    }

    drop(arc_processor);

    if thread_pool.max_parallelism() > 1 {
        spawn_processor_threads(thread_pool, max_processor_threads, work);
    } else {
        // single threaded, just run all the work inline, which will
        // fill build up the receiver queue to write the image
        for f in work.drain(..) {
            f();
        }
    }

    // now we have all the threads running, we can write the data to the writer
    // carusel through the threads and write the data to the writer so that they
    // get written in a deterministic order.
    let mut current_thread_writer = 0;
    loop {
        match packet_receivers[current_thread_writer].recv() {
            Ok(Message::WriteBlock(partition_id, b)) => {
                // block length and partition header
                let tid = partition_id as u8;
                let l = b.len() - 1;
                if l == 4095 || l == 16383 || l == 65535 {
                    // length is a special power of 2 - standard block length is 2^16
                    writer.write_u8(tid | ((l.ilog2() as u8 >> 1) - 4) << 4)?;
                } else {
                    writer.write_u8(tid)?;
                    writer.write_u8((l & 0xff) as u8)?;
                    writer.write_u8(((l >> 8) & 0xff) as u8)?;
                }
                // block itself
                writer.write_all(&b[..])?;

                // go to next thread
                current_thread_writer = (current_thread_writer + 1) % packet_receivers.len();
            }
            Ok(Message::Eof(_)) | Err(_) => {
                packet_receivers.remove(current_thread_writer);
                if packet_receivers.len() == 0 {
                    break;
                }

                current_thread_writer = current_thread_writer % packet_receivers.len();
            }
        }
    }

    thread_results.receive_results()
}

/// Used by the processor thread to read data in a blocking way.
/// The partition_id is used only to assert that we are only
/// getting the data that we are expecting.
pub struct MultiplexReader {
    /// the multiplexed thread stream we are processing
    partition_id: usize,

    /// the receiver part of the channel to get more buffers
    receiver: Receiver<Message>,

    /// what we are reading. When this returns zero, we try to
    /// refill the buffer if we haven't reached the end of the stream
    current_buffer: Cursor<Vec<u8>>,

    /// once we get told we are at the end of the stream, we just
    /// always return 0 bytes
    end_of_file: bool,
}

impl Read for MultiplexReader {
    /// fast path for reads. If we run out of data, take the slow path
    #[inline(always)]
    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
        let amount_read = self.current_buffer.read(buf)?;
        if amount_read > 0 {
            return Ok(amount_read);
        }

        self.read_slow(buf)
    }
}

impl MultiplexReader {
    /// slow path for reads, try to get a new buffer or
    /// return zero if at the end of the stream
    #[cold]
    #[inline(never)]
    fn read_slow(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
        while !self.end_of_file {
            let amount_read = self.current_buffer.read(buf)?;
            if amount_read > 0 {
                return Ok(amount_read);
            }

            match self.receiver.recv() {
                Ok(r) => match r {
                    Message::Eof(_tid) => {
                        self.end_of_file = true;
                    }
                    Message::WriteBlock(tid, block) => {
                        debug_assert_eq!(
                            tid, self.partition_id,
                            "incoming thread must be equal to processing thread"
                        );
                        self.current_buffer = Cursor::new(block);
                    }
                },
                Err(e) => {
                    return std::io::Result::Err(std::io::Error::new(std::io::ErrorKind::Other, e));
                }
            }
        }

        // nothing if we reached the end of file
        return Ok(0);
    }
}

/// Reads data in multiplexed format and sends it to the appropriate processor, each
/// running on its own thread. The processor function is called with the partition_id and
/// a blocking reader that it can use to read its own data.
///
/// Once the multiplexed data is finished reading, we break the channel to the worker threads
/// causing processor that is trying to read from the channel to error out and exit. After all
/// the readers have exited, we collect the results/errors from all the processors and return a vector
/// of the results back to the caller.
pub struct MultiplexReaderState<RESULT> {
    sender_channels: Vec<Sender<Message>>,
    receiver_channels: Vec<Receiver<MultiplexReadResult<RESULT>>>,
    retention_bytes: usize,
    current_state: State,
    single_thread_work: Option<VecDeque<Box<dyn FnOnce() + Send>>>,
    merged_metrics: Metrics,
}

enum State {
    StartBlock,
    U16Length(u8),
    Block(u8, usize),
}

pub enum MultiplexReadResult<RESULT> {
    Result(RESULT),
    Error(LeptonError),
    Complete(Metrics),
}

/// Given a number of threads, this function will create a multiplexed reader state that
/// can be used to process incoming multiplexed data. The processor function is called
/// on each thread with the partition_id and a blocking reader that it can use to read its own data.
///
/// Each processor is also given a sender channel that it can use to send back results or errors.
/// Partial results can be sent back by sending multiple results before the end of file is reached.
///
/// The state object returned can be used to process incoming data and retrieve results/errors
/// from the threads.
pub fn multiplex_read<FN, RESULT>(
    num_partitions: usize,
    max_processor_threads: usize,
    thread_pool: &dyn LeptonThreadPool,
    retention_bytes: usize,
    processor: FN,
) -> MultiplexReaderState<RESULT>
where
    FN: Fn(usize, &mut MultiplexReader, &Sender<MultiplexReadResult<RESULT>>) -> Result<()>
        + Send
        + Sync
        + 'static,
    RESULT: Send + 'static,
{
    let arc_processor = Arc::new(Box::new(processor));

    let mut channel_to_sender = Vec::new();

    // collect the worker threads in a queue so we can spawn them
    let mut work = VecDeque::new();
    let mut result_receiver = Vec::new();

    for partition_id in 0..num_partitions {
        let (tx, rx) = channel::<Message>();
        channel_to_sender.push(tx);

        let cloned_processor = arc_processor.clone();

        let (result_tx, result_rx) = channel::<MultiplexReadResult<RESULT>>();
        result_receiver.push(result_rx);

        let f: Box<dyn FnOnce() + Send> = Box::new(move || {
            // get the appropriate receiver so we can read out data from it
            let mut proc_reader = MultiplexReader {
                partition_id,
                current_buffer: Cursor::new(Vec::new()),
                receiver: rx,
                end_of_file: false,
            };

            if let Err(e) =
                catch_unwind_result(|| cloned_processor(partition_id, &mut proc_reader, &result_tx))
            {
                let _ = result_tx.send(MultiplexReadResult::Error(e));
            }
        });

        work.push_back(f);
    }

    let single_thread_work = if thread_pool.max_parallelism() > 1 {
        spawn_processor_threads(thread_pool, max_processor_threads, work);
        None
    } else {
        Some(work)
    };

    MultiplexReaderState {
        sender_channels: channel_to_sender,
        receiver_channels: result_receiver,
        current_state: State::StartBlock,
        retention_bytes,
        single_thread_work,
        merged_metrics: Metrics::default(),
    }
}

/// spawns the processor threads to handle the work items in the queue. There may be fewer workers
/// than work items.
fn spawn_processor_threads(
    thread_pool: &dyn LeptonThreadPool,
    max_processor_threads: usize,
    work: VecDeque<Box<dyn FnOnce() + Send>>,
) {
    let work_threads = work.len().min(max_processor_threads);
    let shared_queue = Arc::new(Mutex::new(work));

    // spawn the worker threads to process all the items
    // (there may be less processor threads than the number of threads in the image)
    for _i in 0..work_threads {
        let q = shared_queue.clone();

        thread_pool.run(Box::new(move || {
            loop {
                // do this to make sure the lock gets
                let w = q.lock().unwrap().pop_front();

                if let Some(f) = w {
                    f();
                } else {
                    break;
                }
            }
        }));
    }
}

impl<RESULT> MultiplexReaderState<RESULT> {
    /// process as much incoming data as we can and send it to the appropriate thread
    pub fn process_buffer(&mut self, source: &mut PartialBuffer<'_>) -> Result<()> {
        while source.continue_processing() {
            match self.current_state {
                State::StartBlock => {
                    if let Some(a) = source.take_n::<1>(self.retention_bytes) {
                        let thread_marker = a[0];

                        let partition_id = thread_marker & 0xf;

                        if usize::from(partition_id) >= self.sender_channels.len() {
                            return err_exit_code(
                                ExitCode::BadLeptonFile,
                                format!("invalid partition_id {0}", partition_id),
                            );
                        }

                        if thread_marker < 16 {
                            self.current_state = State::U16Length(partition_id);
                        } else {
                            let flags = (thread_marker >> 4) & 3;
                            self.current_state = State::Block(partition_id, 1024 << (2 * flags));
                        }
                    } else {
                        break;
                    }
                }
                State::U16Length(thread_marker) => {
                    if let Some(a) = source.take_n::<2>(self.retention_bytes) {
                        let b0 = usize::from(a[0]);
                        let b1 = usize::from(a[1]);

                        self.current_state = State::Block(thread_marker, (b1 << 8) + b0 + 1);
                    } else {
                        break;
                    }
                }
                State::Block(partition_id, data_length) => {
                    if let Some(a) = source.take(data_length, self.retention_bytes) {
                        // ignore if we get error sending because channel died since we will collect
                        // the error later. We don't want to interrupt the other threads that are processing
                        // so we only get the error from the thread that actually errored out.
                        let tid = usize::from(partition_id);
                        let _ = self.sender_channels[tid].send(Message::WriteBlock(tid, a));
                        self.current_state = State::StartBlock;
                    } else {
                        break;
                    }
                }
            }
        }

        Ok(())
    }

    /// retrieves the next available result from the threads. If complete is true, this function
    /// will block until all threads are complete and return the first result or error it finds.
    /// If complete is false, this function will return immediately if no results are available.
    pub fn retrieve_result(&mut self, complete: bool) -> Result<Option<RESULT>> {
        if let Some(value) =
            Self::try_get_result(&mut self.receiver_channels, &mut self.merged_metrics)?
        {
            return Ok(Some(value));
        }

        if complete {
            // if we are complete, send eof to all threads
            for partition_id in 0..self.sender_channels.len() {
                // send eof to all threads (ignore results since they might be dead already)
                let _ = self.sender_channels[partition_id].send(Message::Eof(partition_id));
            }
            self.sender_channels.clear();

            // if we are running single threaded, now do all the work since we've buffered up everything
            // and broken the sender channels, so there's no danger of deadlock
            if let Some(single_thread_work) = &mut self.single_thread_work {
                while let Some(f) = single_thread_work.pop_front() {
                    f();

                    if let Some(value) =
                        Self::try_get_result(&mut self.receiver_channels, &mut self.merged_metrics)?
                    {
                        return Ok(Some(value));
                    }
                }
            }

            // if we are complete, then walk through all the channels to get the first result by blocking
            while let Some(r) = self.receiver_channels.get_mut(0) {
                match r.recv() {
                    Ok(v) => match v {
                        MultiplexReadResult::Result(v) => return Ok(Some(v)),
                        MultiplexReadResult::Error(e) => return Err(e),
                        MultiplexReadResult::Complete(m) => {
                            // finished, so remove it and try the next one
                            self.merged_metrics.merge_from(m);
                            self.receiver_channels.remove(0);
                        }
                    },
                    Err(e) => {
                        // channel is closed unexpectedly, clear out all channels and return error
                        self.receiver_channels.clear();
                        return Err(e.into());
                    }
                }
            }
        }
        // nothing left to read
        Ok(None)
    }

    /// tries to get a result from the receiver channels without blocking
    fn try_get_result(
        receiver_channels: &mut Vec<Receiver<MultiplexReadResult<RESULT>>>,
        metrics: &mut Metrics,
    ) -> Result<Option<RESULT>> {
        // if we aren't complete, use non-blocking to try to get some results
        // from the first thread
        while let Some(r) = receiver_channels.get_mut(0) {
            match r.try_recv() {
                Ok(v) => match v {
                    MultiplexReadResult::Result(v) => return Ok(Some(v)),
                    MultiplexReadResult::Error(e) => return Err(e),
                    MultiplexReadResult::Complete(m) => {
                        // finished, so remove it and try the next one
                        metrics.merge_from(m);
                        receiver_channels.remove(0);
                    }
                },
                Err(TryRecvError::Disconnected) => {
                    // finished, so remove it and try the next one
                    return Err(LeptonError::new(
                        ExitCode::AssertionFailure,
                        "multiplexed reader channel disconnected unexpectedly",
                    ));
                }
                Err(TryRecvError::Empty) => {
                    // no result yet, exit loop without result
                    break;
                }
            }
        }
        Ok(None)
    }

    /// takes the merged metrics from all the threads
    pub fn take_metrics(&mut self) -> Metrics {
        std::mem::take(&mut self.merged_metrics)
    }
}

#[cfg(test)]
mod tests {
    use std::time::Duration;

    use byteorder::ReadBytesExt;

    use super::*;
    use crate::lepton_error::{ExitCode, LeptonError};
    use crate::{DEFAULT_THREAD_POOL, SingleThreadPool};

    /// simple end to end test that write the thread id and reads it back
    #[test]
    fn test_multiplex_end_to_end() {
        let mut output = Vec::new();

        let w = multiplex_write(
            &mut output,
            10,
            10,
            &DEFAULT_THREAD_POOL,
            |writer, partition_id| -> Result<usize> {
                for i in partition_id as u32..10000 {
                    writer.write_u32::<byteorder::LittleEndian>(i)?;
                }

                Ok(partition_id)
            },
        )
        .unwrap();

        assert_eq!(w[..], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);

        for max_processor_threads in 1..=10 {
            test_read(&output, &w, max_processor_threads);
        }
    }

    fn test_read(output: &[u8], w: &[usize], max_processor_threads: usize) {
        let mut extra = Vec::new();
        let single = SingleThreadPool::default();

        let mut multiplex_state = multiplex_read(
            10,
            max_processor_threads,
            if max_processor_threads == 1 {
                // for a single thread we shouldn't spawn any threads
                &single
            } else {
                &DEFAULT_THREAD_POOL
            },
            0,
            |partition_id, reader, result_tx: &Sender<MultiplexReadResult<usize>>| {
                for i in partition_id as u32..10000 {
                    let read_partition_id = reader.read_u32::<byteorder::LittleEndian>()?;
                    assert_eq!(read_partition_id, i);
                }
                result_tx.send(MultiplexReadResult::Result(partition_id))?;

                let mut metrics = Metrics::default();
                metrics.record_cpu_worker_time(Duration::new(1, 0));

                result_tx.send(MultiplexReadResult::Complete(metrics))?;
                Ok(())
            },
        );

        // do worst case, we are just given byte at a time
        let mut r = Vec::new();

        for i in 0..output.len() {
            let mut i = PartialBuffer::new(&output[i..=i], &mut extra);
            multiplex_state.process_buffer(&mut i).unwrap();

            if let Some(res) = multiplex_state.retrieve_result(false).unwrap() {
                r.push(res);
            }
        }

        while let Some(res) = multiplex_state.retrieve_result(true).unwrap() {
            r.push(res);
        }

        let metrics = multiplex_state.take_metrics();
        assert_eq!(metrics.get_cpu_time_worker_time(), Duration::new(10, 0));

        assert_eq!(r[..], w[..]);
    }

    #[test]
    fn test_multiplex_read_error() {
        let mut multiplex_state = multiplex_read(
            10,
            10,
            &DEFAULT_THREAD_POOL,
            0,
            |_, _, _: &Sender<MultiplexReadResult<()>>| -> Result<()> {
                Err(LeptonError::new(ExitCode::FileNotFound, "test error"))?
            },
        );

        let e: LeptonError = multiplex_state.retrieve_result(true).unwrap_err().into();
        assert_eq!(e.exit_code(), ExitCode::FileNotFound);
        assert!(e.message().starts_with("test error"));
    }

    #[test]
    fn test_multiplex_read_panic() {
        let mut multiplex_state = multiplex_read(
            10,
            10,
            &DEFAULT_THREAD_POOL,
            0,
            |_, _, _: &Sender<MultiplexReadResult<()>>| -> Result<()> {
                panic!();
            },
        );

        let e: LeptonError = multiplex_state.retrieve_result(true).unwrap_err().into();
        assert_eq!(e.exit_code(), ExitCode::AssertionFailure);
    }

    // test catching errors in the multiplex_write function
    #[test]
    fn test_multiplex_write_error() {
        let mut output = Vec::new();

        let e: LeptonError = multiplex_write(
            &mut output,
            10,
            10,
            &DEFAULT_THREAD_POOL,
            |_, partition_id| -> Result<usize> {
                if partition_id == 3 {
                    // have one partition fail
                    Err(LeptonError::new(ExitCode::FileNotFound, "test error"))?
                } else {
                    Ok(0)
                }
            },
        )
        .unwrap_err()
        .into();

        assert_eq!(e.exit_code(), ExitCode::FileNotFound);
        assert!(e.message().starts_with("test error"));
    }

    // test catching errors in the multiplex_write function
    #[test]
    fn test_multiplex_write_panic() {
        let mut output = Vec::new();

        let e: LeptonError = multiplex_write(
            &mut output,
            10,
            10,
            &DEFAULT_THREAD_POOL,
            |_, partition_id| -> Result<usize> {
                if partition_id == 5 {
                    panic!();
                }
                Ok(0)
            },
        )
        .unwrap_err()
        .into();

        assert_eq!(e.exit_code(), ExitCode::AssertionFailure);
    }
}