use std::collections::VecDeque;
use std::io::{BufRead, Cursor, Read, Write};
use std::mem;
use std::sync::mpsc::Sender;
use default_boxed::DefaultBoxed;
#[cfg(feature = "detailed_tracing")]
use log::info;
use log::warn;
use crate::enabled_features::EnabledFeatures;
use crate::jpeg::block_based_image::BlockBasedImage;
use crate::jpeg::jpeg_code;
use crate::jpeg::jpeg_header::{JpegHeader, ReconstructionInfo, RestartSegmentCodingInfo};
use crate::jpeg::jpeg_write::{JpegIncrementalWriter, jpeg_write_entire_scan};
use crate::lepton_error::{AddContext, ExitCode, Result, err_exit_code};
use crate::metrics::{CpuTimeMeasure, Metrics};
use crate::structs::lepton_decoder::lepton_decode_row_range;
use crate::structs::lepton_header::{FIXED_HEADER_SIZE, LeptonHeader};
use crate::structs::multiplexer::{
MultiplexReadResult, MultiplexReader, MultiplexReaderState, multiplex_read,
};
use crate::structs::partial_buffer::PartialBuffer;
use crate::structs::quantization_tables::QuantizationTables;
use crate::structs::simple_threadpool::ThreadPoolHolder;
use crate::structs::thread_handoff::ThreadHandoff;
use crate::{LeptonThreadPool, consts::*};
pub fn decode_lepton<R: BufRead, W: Write>(
reader: &mut R,
writer: &mut W,
enabled_features: &EnabledFeatures,
thread_pool: &dyn LeptonThreadPool,
) -> Result<Metrics> {
let mut decoder =
LeptonFileReader::new(enabled_features.clone(), ThreadPoolHolder::Dyn(thread_pool));
loop {
let buffer = reader.fill_buf().context()?;
decoder
.process_buffer(buffer, buffer.len() == 0, writer)
.context()?;
if buffer.len() == 0 {
break;
}
let amt = buffer.len();
reader.consume(amt);
}
return Ok(decoder.take_metrics());
}
#[allow(dead_code)]
pub fn decode_lepton_file_image<R: BufRead>(
reader: &mut R,
enabled_features: &EnabledFeatures,
thread_pool: &dyn LeptonThreadPool,
) -> Result<(Box<LeptonHeader>, Vec<BlockBasedImage>)> {
let mut lh = LeptonHeader::default_boxed();
let mut enabled_features = enabled_features.clone();
let mut fixed_header_buffer = [0; FIXED_HEADER_SIZE];
reader.read_exact(&mut fixed_header_buffer).context()?;
let compressed_header_size = lh
.read_lepton_fixed_header(&fixed_header_buffer, &mut enabled_features)
.context()?;
lh.read_compressed_lepton_header(reader, &mut enabled_features, compressed_header_size)
.context()?;
let mut buf = [0; 3];
reader.read_exact(&mut buf).context()?;
if buf != LEPTON_HEADER_COMPLETION_MARKER {
return err_exit_code(ExitCode::BadLeptonFile, "CMP marker not found");
}
let mut state = LeptonFileReader::run_lepton_decoder_threads(
&lh,
&enabled_features,
4,
thread_pool,
progressive_decoding_thread,
)
.context()?;
let mut results = Vec::new();
let mut extra_buffer = Vec::new();
loop {
let b = reader.fill_buf().context()?;
let b_len = b.len();
if b_len == 0 {
break;
}
state.process_buffer(&mut PartialBuffer::new(b, &mut extra_buffer))?;
reader.consume(b_len);
if let Some(r) = state.retrieve_result(false)? {
results.push(r);
}
}
while let Some(r) = state.retrieve_result(true)? {
results.push(r);
}
let num_components = results[0].len();
let mut block_image = Vec::new();
for i in 0..num_components {
block_image.push(BlockBasedImage::merge(&mut results, i).context()?);
}
Ok((lh, block_image))
}
enum DecoderState {
FixedHeader(),
CompressedHeader(usize),
CMP(),
ScanProgressive(MultiplexReaderState<Vec<BlockBasedImage>>),
ScanBaseline(MultiplexReaderState<Vec<u8>>),
EOI,
}
struct LimitedOutputWriter<'a, W: Write> {
inner: &'a mut W,
amount_left: &'a mut u64,
}
impl<W: Write> Write for LimitedOutputWriter<'_, W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let to_write = std::cmp::min(buf.len() as u64, *self.amount_left) as usize;
let written = self.inner.write(&buf[0..to_write])?;
if written < to_write {
*self.amount_left -= written as u64;
Ok(written)
} else {
*self.amount_left -= written as u64;
Ok(buf.len())
}
}
fn flush(&mut self) -> std::io::Result<()> {
self.inner.flush()
}
}
struct FixedBufferOuputWriter<'a> {
amount_written: usize,
output_buffer: &'a mut [u8],
extra_queue: &'a mut VecDeque<u8>,
}
impl Write for FixedBufferOuputWriter<'_> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let amount_for_output = buf
.len()
.min(self.output_buffer.len() - self.amount_written);
self.output_buffer[self.amount_written..self.amount_written + amount_for_output]
.copy_from_slice(&buf[..amount_for_output]);
self.amount_written += amount_for_output;
if amount_for_output < buf.len() {
self.extra_queue.extend(&buf[amount_for_output..]);
}
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
pub struct LeptonFileReader<'a> {
state: DecoderState,
lh: Box<LeptonHeader>,
enabled_features: EnabledFeatures,
extra_buffer: Vec<u8>,
metrics: Metrics,
total_read_size: u64,
jpeg_file_size_left: u64,
input_complete: bool,
thread_pool: ThreadPoolHolder<'a>,
}
impl<'a> LeptonFileReader<'a> {
pub fn new(features: EnabledFeatures, thread_pool: ThreadPoolHolder<'a>) -> Self {
LeptonFileReader {
state: DecoderState::FixedHeader(),
lh: LeptonHeader::default_boxed(),
enabled_features: features,
extra_buffer: Vec::new(),
metrics: Metrics::default(),
total_read_size: 0,
input_complete: false,
jpeg_file_size_left: 0,
thread_pool,
}
}
pub fn process_buffer(
&mut self,
in_buffer: &[u8],
input_complete: bool,
output: &mut impl Write,
) -> Result<()> {
if self.input_complete && in_buffer.len() > 0 {
return err_exit_code(
ExitCode::SyntaxError,
"ERROR: input was marked as complete but more data was provided",
);
}
self.total_read_size += in_buffer.len() as u64;
let mut in_buffer = PartialBuffer::new(in_buffer, &mut self.extra_buffer);
while in_buffer.continue_processing() {
match &mut self.state {
DecoderState::FixedHeader() => {
if let Some(v) = in_buffer.take(FIXED_HEADER_SIZE, 0) {
let compressed_header_size = self
.lh
.read_lepton_fixed_header(
&v.try_into().unwrap(),
&mut self.enabled_features,
)
.context()?;
self.state = DecoderState::CompressedHeader(compressed_header_size);
self.jpeg_file_size_left = u64::from(self.lh.jpeg_file_size);
}
}
DecoderState::CompressedHeader(compressed_length) => {
if let Some(v) = in_buffer.take(*compressed_length, 0) {
self.lh
.read_compressed_lepton_header(
&mut Cursor::new(v),
&mut self.enabled_features,
*compressed_length,
)
.context()?;
if !self.lh.bad_truncation_version() {
self.jpeg_file_size_left -= self.lh.rinfo.garbage_data.len() as u64;
}
self.state = DecoderState::CMP();
}
}
DecoderState::CMP() => {
if let Some(v) = in_buffer.take(3, 0) {
let mut limited_output = LimitedOutputWriter {
inner: output,
amount_left: &mut self.jpeg_file_size_left,
};
self.state = Self::process_cmp(
v,
&self.lh,
&self.enabled_features,
&self.thread_pool,
&mut limited_output,
)?;
}
}
DecoderState::ScanProgressive(state) => {
state.process_buffer(&mut in_buffer)?;
if input_complete {
Self::verify_eof_file_size(self.total_read_size, &mut in_buffer)?;
let mut results = Vec::new();
while let Some(r) = state.retrieve_result(true)? {
results.push(r);
}
let mut limited_output = LimitedOutputWriter {
inner: output,
amount_left: &mut self.jpeg_file_size_left,
};
Self::process_progressive(
&mut self.lh,
&self.enabled_features,
results,
&mut limited_output,
)?;
write_tail(&mut self.lh, &mut limited_output)?;
write_garbage_data(&self.lh, limited_output)?;
self.metrics.merge_from(state.take_metrics());
self.state = DecoderState::EOI;
}
}
DecoderState::ScanBaseline(state) => {
state.process_buffer(&mut in_buffer)?;
let mut limited_output = LimitedOutputWriter {
inner: output,
amount_left: &mut self.jpeg_file_size_left,
};
while let Some(r) = state.retrieve_result(false)? {
limited_output.write_all(&r)?;
}
if input_complete {
Self::verify_eof_file_size(self.total_read_size, &mut in_buffer)?;
while let Some(r) = state.retrieve_result(true)? {
limited_output.write_all(&r)?;
}
if self.lh.rinfo.rst_err.len() > 0 {
let cumulative_reset_markers = if self.lh.jpeg_header.rsti != 0 {
(self.lh.jpeg_header.mcuc - 1) / self.lh.jpeg_header.rsti
} else {
0
} as u8;
for i in 0..self.lh.rinfo.rst_err[0] {
let rst = jpeg_code::RST0 + ((cumulative_reset_markers + i) & 7);
limited_output.write_all(&[0xff, rst])?;
}
}
write_tail(&mut self.lh, &mut limited_output)?;
write_garbage_data(&self.lh, limited_output)?;
self.metrics.merge_from(state.take_metrics());
self.state = DecoderState::EOI;
}
}
DecoderState::EOI => {
break;
}
}
}
if input_complete {
self.input_complete = true;
match self.state {
DecoderState::EOI => {
}
_ => {
return err_exit_code(ExitCode::SyntaxError,
format!("ERROR: input was marked as complete, but the decoder in state {:?} still needs more data",
std::mem::discriminant(&self.state)).as_str());
}
}
}
Ok(())
}
pub fn process_limited_buffer(
&mut self,
input: &[u8],
input_complete: bool,
output_buffer: &mut [u8],
output_extra: &mut VecDeque<u8>,
) -> std::io::Result<(bool, usize)> {
let mut amount_written = 0;
while amount_written < output_buffer.len() && output_extra.len() > 0 {
amount_written += output_extra
.read(&mut output_buffer[amount_written..])
.unwrap();
}
let mut w = FixedBufferOuputWriter {
amount_written,
output_buffer,
extra_queue: output_extra,
};
self.process_buffer(input, input_complete, &mut w)?;
Ok((input_complete && w.extra_queue.len() == 0, w.amount_written))
}
pub fn take_metrics(&mut self) -> Metrics {
mem::take(&mut self.metrics)
}
pub fn metrics(&self) -> &Metrics {
&self.metrics
}
fn process_progressive(
lh: &mut LeptonHeader,
enabled_features: &EnabledFeatures,
mut image_segments: Vec<Vec<BlockBasedImage>>,
output: &mut impl Write,
) -> Result<()> {
let num_components = image_segments[0].len();
let mut merged = Vec::new();
for i in 0..num_components {
merged.push(BlockBasedImage::merge(&mut image_segments, i).context()?);
}
output.write_all(&SOI)?;
output
.write_all(&lh.rinfo.raw_jpeg_header[0..lh.raw_jpeg_header_read_index])
.context()?;
let mut scnc = 0;
loop {
let scan =
jpeg_write_entire_scan(&merged[..], &lh.jpeg_header, &lh.rinfo, scnc).context()?;
output.write_all(&scan).context()?;
let old_pos = lh.raw_jpeg_header_read_index;
let result = lh.advance_next_header_segment(enabled_features).context()?;
output
.write_all(&lh.rinfo.raw_jpeg_header[old_pos..lh.raw_jpeg_header_read_index])
.context()?;
if !result {
break;
}
scnc += 1;
}
Ok(())
}
fn process_cmp(
v: Vec<u8>,
lh: &LeptonHeader,
enabled_features: &EnabledFeatures,
thread_pool: &dyn LeptonThreadPool,
output: &mut impl Write,
) -> Result<DecoderState> {
if v[..] != LEPTON_HEADER_COMPLETION_MARKER {
return err_exit_code(ExitCode::BadLeptonFile, "CMP marker not found");
}
Ok(if !lh.jpeg_header.is_single_scan() {
let mux = Self::run_lepton_decoder_threads(
lh,
enabled_features,
4,
thread_pool,
progressive_decoding_thread,
)
.context()?;
DecoderState::ScanProgressive(mux)
} else {
output.write_all(&SOI)?;
output
.write_all(&lh.rinfo.raw_jpeg_header[0..lh.raw_jpeg_header_read_index])
.context()?;
let mux = Self::run_lepton_decoder_threads(
&lh,
&enabled_features,
4,
thread_pool,
baseline_decoding_thread,
)?;
DecoderState::ScanBaseline(mux)
})
}
fn verify_eof_file_size(total_read_size: u64, in_buffer: &mut PartialBuffer<'_>) -> Result<()> {
if let Some(bytes) = in_buffer.take_n::<4>(0) {
let size = u32::from_le_bytes(bytes);
if u64::from(size) != total_read_size {
return err_exit_code(
ExitCode::VerificationLengthMismatch,
format!(
"ERROR mismatch input_len = {0}, decoded_len = {1}",
size, total_read_size
),
);
}
Ok(())
} else {
err_exit_code(
ExitCode::VerificationLengthMismatch,
"Missing EOF file size",
)
}
}
fn run_lepton_decoder_threads<P: Send + 'static>(
lh: &LeptonHeader,
features: &EnabledFeatures,
retention_bytes: usize,
thread_pool: &dyn LeptonThreadPool,
process: fn(
reader: &mut MultiplexReader,
features: &EnabledFeatures,
qt: &[QuantizationTables],
thread_handoff: &ThreadHandoff,
jpeg_header: &JpegHeader,
rinfo: &ReconstructionInfo,
is_last_thread: bool,
sender: &Sender<MultiplexReadResult<P>>,
) -> Result<()>,
) -> Result<MultiplexReaderState<P>> {
let qt = QuantizationTables::construct_quantization_tables(&lh.jpeg_header)?;
let features = features.clone();
let thread_handoff = lh.thread_handoff.clone();
let jpeg_header = lh.jpeg_header.clone();
let rinfo = lh.rinfo.clone();
let multiplex_reader_state = multiplex_read(
thread_handoff.len(),
features.max_processor_threads as usize,
thread_pool,
retention_bytes,
move |thread_id, reader, result_tx| {
process(
reader,
&features,
&qt,
&thread_handoff[thread_id],
&jpeg_header,
&rinfo,
thread_id == thread_handoff.len() - 1,
result_tx,
)
},
);
Ok(multiplex_reader_state)
}
}
fn write_tail(lh: &mut LeptonHeader, output: &mut impl Write) -> Result<()> {
output
.write_all(&lh.rinfo.raw_jpeg_header[lh.raw_jpeg_header_read_index..])
.context()?;
Ok(())
}
fn progressive_decoding_thread(
reader: &mut MultiplexReader,
features: &EnabledFeatures,
qt: &[QuantizationTables],
thread_handoff: &ThreadHandoff,
jpeg_header: &JpegHeader,
rinfo: &ReconstructionInfo,
is_last_thread: bool,
sender: &Sender<MultiplexReadResult<Vec<BlockBasedImage>>>,
) -> Result<()> {
let cpu_time: CpuTimeMeasure = CpuTimeMeasure::new();
let (mut metrics, image_data) = lepton_decode_row_range(
qt,
jpeg_header,
&rinfo.truncate_components,
reader,
thread_handoff.luma_y_start,
thread_handoff.luma_y_end,
is_last_thread,
true,
features,
|_, _| Ok(()),
)?;
metrics.record_cpu_worker_time(cpu_time.elapsed());
sender.send(MultiplexReadResult::Result(image_data))?;
sender.send(MultiplexReadResult::Complete(metrics))?;
Ok(())
}
fn baseline_decoding_thread(
reader: &mut MultiplexReader,
features: &EnabledFeatures,
qt: &[QuantizationTables],
thread_handoff: &ThreadHandoff,
jpeg_header: &JpegHeader,
rinfo: &ReconstructionInfo,
is_last_thread: bool,
sender: &Sender<MultiplexReadResult<Vec<u8>>>,
) -> Result<()> {
let cpu_time: CpuTimeMeasure = CpuTimeMeasure::new();
let restart_info = RestartSegmentCodingInfo {
overhang_byte: thread_handoff.overhang_byte,
num_overhang_bits: thread_handoff.num_overhang_bits,
luma_y_start: thread_handoff.luma_y_start,
luma_y_end: thread_handoff.luma_y_end,
last_dc: thread_handoff.last_dc,
};
const BUFFER_SIZE: usize = 128 * 1024;
let mut amount_left = thread_handoff.segment_size as usize;
let mut inc_writer =
JpegIncrementalWriter::new(BUFFER_SIZE, rinfo, Some(&restart_info), jpeg_header, 0);
let (mut metrics, _image_data) = lepton_decode_row_range(
qt,
jpeg_header,
&rinfo.truncate_components,
reader,
thread_handoff.luma_y_start,
thread_handoff.luma_y_end,
is_last_thread,
true,
features,
|row_spec, image_data| {
inc_writer.process_row(row_spec, image_data).context()?;
if inc_writer.amount_buffered() >= BUFFER_SIZE {
let mut buf = inc_writer.detach_buffer();
if buf.len() > amount_left {
warn!(
"Truncating output buffer from {} to {}",
buf.len(),
amount_left
);
buf.truncate(amount_left);
}
amount_left -= buf.len();
sender.send(MultiplexReadResult::Result(buf))?;
}
Ok(())
},
)?;
metrics.record_cpu_worker_time(cpu_time.elapsed());
let mut buf = inc_writer.detach_buffer();
if buf.len() > amount_left {
warn!(
"Truncating output buffer from {} to {}",
buf.len(),
amount_left
);
buf.truncate(amount_left);
}
sender.send(MultiplexReadResult::Result(buf))?;
sender.send(MultiplexReadResult::Complete(metrics))?;
Ok(())
}
fn write_garbage_data(
lh: &LeptonHeader,
mut limited_output: LimitedOutputWriter<'_, impl Write>,
) -> Result<()> {
if !lh.bad_truncation_version() {
limited_output
.inner
.write_all(&lh.rinfo.garbage_data)
.context()?;
} else {
limited_output.write_all(&lh.rinfo.garbage_data).context()?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{BufWriter, Cursor};
use default_boxed::DefaultBoxed;
use crate::{
DEFAULT_THREAD_POOL, EnabledFeatures, SingleThreadPool, decode_lepton,
helpers::read_file,
structs::{
lepton_header::{FIXED_HEADER_SIZE, LeptonHeader},
thread_handoff::ThreadHandoff,
},
};
#[test]
fn parse_and_write_header() {
use crate::jpeg::jpeg_read::read_jpeg_file;
use std::io::Read;
let min_jpeg = read_file("tiny", ".jpg");
let mut lh = LeptonHeader::default_boxed();
let enabled_features = EnabledFeatures::compat_lepton_vector_read();
lh.jpeg_file_size = min_jpeg.len() as u32;
lh.uncompressed_lepton_header_size = Some(752);
let (_image_data, _partitions, _end_scan) = read_jpeg_file(
&mut Cursor::new(min_jpeg),
&mut lh.jpeg_header,
&mut lh.rinfo,
&enabled_features,
|_, _| {},
)
.unwrap();
lh.thread_handoff.push(ThreadHandoff {
luma_y_start: 0,
luma_y_end: 1,
segment_offset_in_file: 0,
segment_size: 1000,
overhang_byte: 0,
num_overhang_bits: 1,
last_dc: [1, 2, 3, 4],
});
let mut serialized = Vec::new();
lh.write_lepton_header(&mut Cursor::new(&mut serialized), &enabled_features)
.unwrap();
let mut other = LeptonHeader::default_boxed();
let mut other_reader = Cursor::new(&serialized);
let mut fixed_buffer = [0; FIXED_HEADER_SIZE];
other_reader.read_exact(&mut fixed_buffer).unwrap();
let mut other_enabled_features = EnabledFeatures::compat_lepton_vector_read();
let compressed_header_size = other
.read_lepton_fixed_header(&fixed_buffer, &mut other_enabled_features)
.unwrap();
other
.read_compressed_lepton_header(
&mut other_reader,
&mut other_enabled_features,
compressed_header_size,
)
.unwrap();
assert_eq!(
lh.uncompressed_lepton_header_size,
other.uncompressed_lepton_header_size
);
}
#[test]
fn test_simple_parse_progressive() {
test_file("androidprogressive")
}
#[test]
fn test_simple_parse_baseline() {
test_file("android")
}
#[test]
fn test_simple_parse_trailing() {
test_file("androidtrail")
}
#[test]
fn test_zero_dqt() {
test_file("zeros_in_dqt_tables")
}
#[test]
fn test_pixelated() {
test_file("pixelated")
}
#[test]
fn test_truncate4() {
test_file("truncate4")
}
#[test]
fn test_decode_single_threaded() {
let filename = "iphone";
let file = read_file(filename, ".lep");
let original = read_file(filename, ".jpg");
let enabled_features = EnabledFeatures::compat_lepton_vector_read();
let mut output = Vec::new();
decode_lepton(
&mut Cursor::new(&file),
&mut output,
&enabled_features,
&SingleThreadPool::default(),
)
.unwrap();
assert_eq!(output.len(), original.len());
assert!(output == original);
}
#[test]
fn test_encode_single_threaded() {
let filename = "iphone";
let file = read_file(filename, ".jpg");
let enabled_features = EnabledFeatures::compat_lepton_vector_read();
let mut output = Vec::new();
crate::encode_lepton(
&mut Cursor::new(&file),
&mut Cursor::new(&mut output),
&enabled_features,
&SingleThreadPool::default(),
)
.unwrap();
}
fn test_file(filename: &str) {
let file = read_file(filename, ".lep");
let original = read_file(filename, ".jpg");
let enabled_features = EnabledFeatures::compat_lepton_vector_read();
let _ = decode_lepton_file_image(
&mut Cursor::new(&file),
&enabled_features,
&DEFAULT_THREAD_POOL,
)
.unwrap();
let mut output = Vec::new();
decode_lepton(
&mut Cursor::new(&file),
&mut output,
&enabled_features,
&DEFAULT_THREAD_POOL,
)
.unwrap();
assert_eq!(output.len(), original.len());
assert!(output == original);
}
struct RecordStreamPosition<W: Write> {
writer: W,
position: u64,
}
impl<W: Write> Write for RecordStreamPosition<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
if buf.len() == 0 {
return Ok(0);
}
let n = self.writer.write(&[buf[0]])?;
self.position += n as u64;
Ok(n)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
#[test]
fn test_streaming_results() {
let data = read_file("hq", ".lep");
let original_data = read_file("hq", ".jpg");
let mut cursor = Cursor::new(&data);
let mut output_vector = Vec::new();
let mut output = RecordStreamPosition {
writer: BufWriter::new(&mut output_vector),
position: 0,
};
let enabled_features = EnabledFeatures::compat_lepton_vector_read();
let thread_pool = &DEFAULT_THREAD_POOL;
decode_lepton(&mut cursor, &mut output, &enabled_features, thread_pool).unwrap();
drop(output);
assert_eq!(output_vector.len(), original_data.len());
assert!(output_vector == original_data);
}
#[test]
fn test_too_small_output() {
let original = read_file("slrcity", ".lep");
let mut output = Vec::new();
output.resize(original.len() / 2, 0u8);
let r = decode_lepton(
&mut Cursor::new(&original),
&mut Cursor::new(&mut output[..]),
&EnabledFeatures::compat_lepton_vector_read(),
&DEFAULT_THREAD_POOL,
);
assert!(r.is_err() && r.err().unwrap().exit_code() == ExitCode::OsError);
}
fn verifydecode(filename: &str) {
let original = read_file(filename, ".lep");
let mut output = Vec::new();
let _ = decode_lepton(
&mut Cursor::new(&original),
&mut Cursor::new(&mut output),
&EnabledFeatures::compat_lepton_vector_read(),
&DEFAULT_THREAD_POOL,
)
.unwrap();
let jpg = read_file(filename, ".jpg");
assert_eq!(jpg.len(), output.len());
assert!(output == jpg);
}
#[test]
fn test_truncated_with_bad_truncation_version() {
verifydecode("half_scan_rust55");
}
#[test]
fn test_truncated_with_ok_truncation_version() {
verifydecode("half_scan");
}
#[test]
fn test_truncated_with_bad_garbage_data() {
verifydecode("truncbad");
}
}