makepad_zune_png/decoder.rs
1/*
2 * Copyright (c) 2023.
3 *
4 * This software is free software; You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license
5 */
6
7use alloc::vec::Vec;
8use alloc::{format, vec};
9use core::cmp::min;
10
11use zune_core::bit_depth::{BitDepth, ByteEndian};
12use zune_core::bytestream::{ZByteReader, ZReaderTrait};
13use zune_core::colorspace::ColorSpace;
14use zune_core::log::trace;
15use zune_core::options::DecoderOptions;
16use zune_core::result::DecodingResult;
17use zune_inflate::DeflateOptions;
18
19use crate::apng::{ActlChunk, FrameInfo, SingleFrame};
20use crate::constants::PNG_SIGNATURE;
21use crate::enums::{FilterMethod, InterlaceMethod, PngChunkType, PngColor};
22use crate::error::PngDecodeErrors;
23use crate::error::PngDecodeErrors::GenericStatic;
24use crate::filters::de_filter::{
25 handle_avg, handle_avg_first, handle_paeth, handle_paeth_first, handle_sub, handle_up
26};
27use crate::options::default_chunk_handler;
28use crate::utils::{
29 add_alpha, convert_be_to_target_endian_u16, convert_u16_to_u8_slice, expand_bits_to_byte,
30 expand_palette, expand_trns, is_le
31};
32
33/// A palette entry.
34///
35/// The alpha field is used if the image has a tRNS
36/// chunk and pLTE chunk.
37#[derive(Copy, Clone, Debug)]
38pub(crate) struct PLTEEntry {
39 pub red: u8,
40 pub green: u8,
41 pub blue: u8,
42 pub alpha: u8
43}
44
45impl Default for PLTEEntry {
46 fn default() -> Self {
47 // but a tRNS chunk may contain fewer values than there are palette entries.
48 // In this case, the alpha value for all remaining palette entries is assumed to be 255
49 PLTEEntry {
50 red: 0,
51 green: 0,
52 blue: 0,
53 alpha: 255
54 }
55 }
56}
57
58#[derive(Copy, Clone)]
59pub(crate) struct PngChunk {
60 pub length: usize,
61 pub chunk_type: PngChunkType,
62 pub chunk: [u8; 4],
63 pub crc: u32
64}
65
66/// Time information data
67///
68/// Extracted from tIME chunk
69#[derive(Debug, Default, Copy, Clone)]
70pub struct TimeInfo {
71 pub year: u16,
72 pub month: u8,
73 pub day: u8,
74 pub hour: u8,
75 pub minute: u8,
76 pub second: u8
77}
78
79/// iTXt details
80///
81/// UTF-8 encoded text
82///
83/// Extracted from iXTt chunk where present
84#[derive(Clone)]
85pub struct ItxtChunk {
86 pub keyword: Vec<u8>,
87 pub text: Vec<u8>
88}
89
90/// tEXt chunk details
91///
92/// Latin-1 character set
93///
94/// Extracted from tEXt chunk where present
95#[derive(Clone)]
96pub struct TextChunk {
97 pub keyword: Vec<u8>,
98 pub text: Vec<u8>
99}
100
101/// zTxt details
102///
103/// Extracted from zTXt chunk where present
104#[derive(Clone)]
105pub struct ZtxtChunk {
106 pub keyword: Vec<u8>,
107 /// Uncompressed text
108 pub text: Vec<u8>
109}
110
111/// Represents PNG information that can be extracted
112/// from a png file.
113#[derive(Default, Clone)]
114pub struct PngInfo {
115 /// Image width
116 pub width: usize,
117 /// Image height
118 pub height: usize,
119 /// Image gamma
120 pub gamma: Option<f32>,
121 /// Image interlace method
122 pub interlace_method: InterlaceMethod,
123 /// Image time info
124 pub time_info: Option<TimeInfo>,
125 /// Image exif data
126 pub exif: Option<Vec<u8>>,
127 /// Icc profile
128 pub icc_profile: Option<Vec<u8>>,
129 /// UTF-8 encoded text chunk
130 pub itxt_chunk: Vec<ItxtChunk>,
131 /// ztxt chunk
132 pub ztxt_chunk: Vec<ZtxtChunk>,
133 /// tEXt chunk
134 pub text_chunk: Vec<TextChunk>,
135 // no need to expose these ones
136 pub(crate) depth: u8,
137 // use bit_depth
138 pub(crate) color: PngColor,
139 // use get_colorspace
140 pub(crate) component: u8,
141 // use get_colorspace().num_components()
142 pub(crate) filter_method: FilterMethod // for internal use,no need to expose
143}
144
145/// A PNG decoder instance.
146///
147/// This is the main decoder for png image decoding.
148///
149/// Instantiate the decoder with either the [new](PngDecoder::new)
150/// or [new_with_options](PngDecoder::new_with_options) and
151/// using either of the [`decode_raw`](PngDecoder::decode_into) or
152/// [`decode`](PngDecoder::decode) will return pixels present in that image
153///
154/// # Note
155/// The decoder currently expands images less than 8 bits per pixels to 8 bits per pixel
156/// if this is not desired, then I'd suggest another png decoder
157///
158/// To get extra details such as exif data and ICC profile if present, use [`get_info`](PngDecoder::get_info)
159/// and access the relevant fields exposed
160pub struct PngDecoder<T>
161where
162 T: ZReaderTrait
163{
164 pub(crate) stream: ZByteReader<T>,
165 pub(crate) options: DecoderOptions,
166 pub(crate) png_info: PngInfo,
167 pub(crate) palette: Vec<PLTEEntry>,
168 pub(crate) frames: Vec<SingleFrame>,
169 pub(crate) actl_info: Option<ActlChunk>,
170 pub(crate) previous_stride: Vec<u8>,
171 pub(crate) trns_bytes: [u16; 4],
172 pub(crate) seen_hdr: bool,
173 pub(crate) seen_ptle: bool,
174 pub(crate) seen_headers: bool,
175 pub(crate) seen_trns: bool,
176 pub(crate) seen_iend: bool,
177 pub(crate) current_frame: usize,
178 pub(crate) called_from_decode_into: bool
179}
180
181impl<T: ZReaderTrait> PngDecoder<T> {
182 /// Create a new PNG decoder
183 ///
184 /// # Arguments
185 ///
186 /// * `data`: The raw bytes of a png encoded file
187 ///
188 /// returns: PngDecoder
189 ///
190 /// The decoder settings are set to be default which is
191 /// strict mode + intrinsics
192 pub fn new(data: T) -> PngDecoder<T> {
193 let default_opt = DecoderOptions::default();
194
195 PngDecoder::new_with_options(data, default_opt)
196 }
197 /// Create a new decoder with the specified options
198 ///
199 /// # Arguments
200 ///
201 /// * `data`: Raw encoded jpeg file contents
202 /// * `options`: The custom options for this decoder
203 ///
204 /// returns: PngDecoder
205 ///
206 #[allow(unused_mut, clippy::redundant_field_names)]
207 pub fn new_with_options(data: T, options: DecoderOptions) -> PngDecoder<T> {
208 PngDecoder {
209 seen_hdr: false,
210 stream: ZByteReader::new(data),
211 options: options,
212 palette: Vec::new(),
213 png_info: PngInfo::default(),
214 actl_info: None,
215 previous_stride: vec![],
216 frames: vec![],
217 seen_ptle: false,
218 seen_trns: false,
219 seen_headers: false,
220 seen_iend: false,
221 trns_bytes: [0; 4],
222 current_frame: 0,
223 called_from_decode_into: true
224 }
225 }
226
227 /// Get image dimensions or none if they aren't decoded
228 ///
229 /// In case image is animated, this doesn't return the current frame's dimension
230 /// rather the image dimension, for that use `frame_info()` and access the correct
231 /// struct value to get the dimensions
232 ///
233 /// # Returns
234 /// - `Some((width,height))`
235 /// - `None`: The image headers haven't been decoded
236 /// or there was an error decoding them
237 pub fn get_dimensions(&self) -> Option<(usize, usize)> {
238 if !self.seen_hdr {
239 return None;
240 }
241
242 Some((self.png_info.width, self.png_info.height))
243 }
244 /// Return the depth of the image
245 ///
246 /// Bit depths less than 8 will be returned as [`BitDepth::Eight`](zune_core::bit_depth::BitDepth::Eight)
247 ///
248 /// # Returns
249 /// - `Some(depth)`: The bit depth of the image.
250 /// - `None`: The header wasn't decoded hence the depth wasn't discovered.
251 pub const fn get_depth(&self) -> Option<BitDepth> {
252 if !self.seen_hdr {
253 return None;
254 }
255 match self.png_info.depth {
256 1 | 2 | 4 | 8 => Some(BitDepth::Eight),
257 16 => Some(BitDepth::Sixteen),
258 _ => unreachable!()
259 }
260 }
261 /// Get image colorspace
262 ///
263 /// If an image is a palette type, the colorspace is
264 /// either RGB or RGBA depending on existence a transparency chunk
265 ///
266 /// If an image has a transparency chunk, the colorspace
267 /// will include that
268 ///
269 /// # Returns
270 /// - `Some(colorspace)`: The colorspace which the decoded bytes will be in
271 /// - `None`: If the image headers haven't been decoded, or there was an error
272 /// during decoding
273 pub const fn get_colorspace(&self) -> Option<ColorSpace> {
274 if !self.seen_hdr {
275 return None;
276 }
277 if self.options.png_get_add_alpha_channel() {
278 return match self.png_info.color {
279 PngColor::Luma | PngColor::LumaA => Some(ColorSpace::LumaA),
280 PngColor::Palette | PngColor::RGB | PngColor::RGBA => Some(ColorSpace::RGBA),
281 PngColor::Unknown => unreachable!()
282 };
283 }
284 if !self.seen_trns {
285 match self.png_info.color {
286 PngColor::Palette => Some(ColorSpace::RGB),
287 PngColor::Luma => Some(ColorSpace::Luma),
288 PngColor::LumaA => Some(ColorSpace::LumaA),
289 PngColor::RGB => Some(ColorSpace::RGB),
290 PngColor::RGBA => Some(ColorSpace::RGBA),
291 PngColor::Unknown => unreachable!()
292 }
293 } else {
294 // for tRNS chunks, RGB=>RGBA
295 // Luma=>LumaA, but if we are already in RGB and RGBA, just return
296 // them
297 match self.png_info.color {
298 PngColor::Palette | PngColor::RGB => Some(ColorSpace::RGBA),
299 PngColor::Luma => Some(ColorSpace::LumaA),
300 PngColor::LumaA => Some(ColorSpace::LumaA),
301 PngColor::RGBA => Some(ColorSpace::RGBA),
302 _ => unreachable!()
303 }
304 }
305 }
306 /// Returns true if the image is animated
307 ///
308 /// # Note
309 /// Png has an unofficial specification that allows it to
310 /// support Animated files, or otherwise known as
311 /// APNG (with extension .apng) supported in various capaccities
312 /// in software.
313 ///
314 /// Such animated files can be decoded by this decoder, returning individual frames
315 /// There are functions provided that allow you to further process
316 /// such chunks to get the animated frames
317 pub fn is_animated(&self) -> bool {
318 self.actl_info.is_some() && self.frames.len() > self.current_frame
319 }
320
321 /// Return true if image has more frames available
322 pub fn more_frames(&self) -> bool {
323 self.actl_info.is_some() && self.frames.len() > self.current_frame
324 }
325
326 /// Return true if image has more frames available
327 pub fn actl_info(&self) -> Option<ActlChunk> {
328 self.actl_info.clone()
329 }
330
331 pub(crate) fn read_chunk_header(&mut self) -> Result<PngChunk, PngDecodeErrors> {
332 // Format is length - chunk type - [data] - crc chunk, load crc chunk now
333 let chunk_length = self.stream.get_u32_be_err()? as usize;
334 let chunk_type_int = self.stream.get_u32_be_err()?.to_be_bytes();
335
336 let mut crc_bytes = [0; 4];
337
338 let crc_ref = self.stream.peek_at(chunk_length, 4)?;
339
340 crc_bytes.copy_from_slice(crc_ref);
341
342 let crc = u32::from_be_bytes(crc_bytes);
343
344 let chunk_type = match &chunk_type_int {
345 b"IHDR" => PngChunkType::IHDR,
346 b"tRNS" => PngChunkType::tRNS,
347 b"PLTE" => PngChunkType::PLTE,
348 b"IDAT" => PngChunkType::IDAT,
349 b"IEND" => PngChunkType::IEND,
350 b"pHYs" => PngChunkType::pHYs,
351 b"tIME" => PngChunkType::tIME,
352 b"gAMA" => PngChunkType::gAMA,
353 b"acTL" => PngChunkType::acTL,
354 b"fcTL" => PngChunkType::fcTL,
355 b"iCCP" => PngChunkType::iCCP,
356 b"iTXt" => PngChunkType::iTXt,
357 b"eXIf" => PngChunkType::eXIf,
358 b"zTXt" => PngChunkType::zTXt,
359 b"tEXt" => PngChunkType::tEXt,
360 b"fdAT" => PngChunkType::fdAT,
361 _ => PngChunkType::unkn
362 };
363
364 if !self.stream.has(chunk_length + 4 /*crc stream*/) {
365 let err = format!(
366 "Not enough bytes for chunk {:?}, bytes requested are {}, but bytes present are {}",
367 chunk_type,
368 chunk_length + 4,
369 self.stream.remaining()
370 );
371
372 return Err(PngDecodeErrors::Generic(err));
373 }
374 // Confirm the CRC here.
375
376 if self.options.png_get_confirm_crc() {
377 use crate::crc::crc32_slice8;
378
379 // go back and point to chunk type.
380 self.stream.rewind(4);
381 // read chunk type + chunk data
382 let bytes = self.stream.peek_at(0, chunk_length + 4).unwrap();
383
384 // calculate crc
385 let calc_crc = !crc32_slice8(bytes, u32::MAX);
386
387 if crc != calc_crc {
388 return Err(PngDecodeErrors::BadCrc(crc, calc_crc));
389 }
390 // go point after the chunk type
391 // The other parts expect the bit-reader to point to the
392 // start of the chunk data.
393 self.stream.skip(4);
394 }
395
396 Ok(PngChunk {
397 length: chunk_length,
398 chunk: chunk_type_int,
399 chunk_type,
400 crc
401 })
402 }
403
404 /// Decode headers from the ong stream and store information
405 /// in the internal structure
406 ///
407 /// After calling this, header information can
408 /// be accessed by public headers
409 pub fn decode_headers(&mut self) -> Result<(), PngDecodeErrors> {
410 if self.seen_headers && self.seen_iend {
411 return Ok(());
412 }
413 if !self.seen_hdr {
414 // READ PNG signature
415 let signature = self.stream.get_u64_be_err()?;
416
417 if signature != PNG_SIGNATURE {
418 return Err(PngDecodeErrors::BadSignature);
419 }
420 // check if first chunk is ihdr here
421 if self.stream.peek_at(4, 4)? != b"IHDR" {
422 return Err(PngDecodeErrors::GenericStatic(
423 "First chunk not IHDR, Corrupt PNG"
424 ));
425 }
426 }
427 loop {
428 let header = self.read_chunk_header()?;
429
430 self.parse_header(header)?;
431
432 if header.chunk_type == PngChunkType::IEND {
433 break;
434 }
435 // break here, we already have content for one
436 // frame, subsequent calls will fetch the next frames
437 if header.chunk_type == PngChunkType::fcTL {
438 break;
439 }
440 }
441 self.seen_headers = true;
442 Ok(())
443 }
444
445 pub(crate) fn parse_header(&mut self, header: PngChunk) -> Result<(), PngDecodeErrors> {
446 match header.chunk_type {
447 PngChunkType::IHDR => {
448 self.parse_ihdr(header)?;
449 }
450 PngChunkType::PLTE => {
451 self.parse_plte(header)?;
452 }
453 PngChunkType::IDAT => {
454 self.parse_idat(header)?;
455 }
456 PngChunkType::tRNS => {
457 self.parse_trns(header)?;
458 }
459 PngChunkType::gAMA => {
460 self.parse_gama(header)?;
461 }
462 PngChunkType::acTL => {
463 self.parse_actl(header)?;
464 }
465 PngChunkType::tIME => {
466 self.parse_time(header)?;
467 }
468 PngChunkType::eXIf => {
469 self.parse_exif(header)?;
470 }
471 PngChunkType::iCCP => {
472 self.parse_iccp(header);
473 }
474 PngChunkType::iTXt => {
475 self.parse_itxt(header);
476 }
477 PngChunkType::zTXt => {
478 self.parse_ztxt(header);
479 }
480 PngChunkType::tEXt => {
481 self.parse_text(header);
482 }
483 PngChunkType::fcTL => {
484 // may read more headers internally
485 self.parse_fctl(header)?;
486 }
487 PngChunkType::IEND => self.seen_iend = true,
488 _ => default_chunk_handler(header.length, header.chunk, &mut self.stream, header.crc)?
489 }
490
491 if !self.seen_hdr {
492 return Err(GenericStatic("IHDR block not encountered,corrupt jpeg"));
493 }
494
495 Ok(())
496 }
497 /// Return the configured image byte endian which the pixels
498 /// will be in if the image is in 16 bit
499 ///
500 /// If the image depth is less than 16 bit, then the endianness has
501 /// no effect
502 pub const fn byte_endian(&self) -> ByteEndian {
503 self.options.get_byte_endian()
504 }
505
506 /// Return the number of bytes required to hold a decoded image frame
507 /// decoded using the given input transformations
508 ///
509 /// # Returns
510 /// - `Some(usize)`: Minimum size for a buffer needed to decode the image
511 /// - `None`: Indicates the image headers was not decoded.
512 ///
513 /// # Panics
514 /// In case `width*height*colorspace` calculation may overflow a usize
515 pub fn output_buffer_size(&self) -> Option<usize> {
516 if !self.seen_hdr {
517 return None;
518 }
519
520 let info = &self.png_info;
521 let bytes = if info.depth == 16 && !self.options.png_get_strip_to_8bit() { 2 } else { 1 };
522
523 let out_n = self.get_colorspace()?.num_components();
524 let dims = self.get_dimensions().unwrap();
525
526 dims.0
527 .checked_mul(dims.1)?
528 .checked_mul(out_n)?
529 .checked_mul(bytes)
530 }
531 /// Return the number of bytes required to hold a decoded image frame
532 /// decoded without regard to the given input transformations
533 ///
534 /// # Returns
535 /// - `Some(usize)`: Minimum size for a buffer needed to decode the image
536 /// - `None`: Indicates the image headers was not decoded.
537 ///
538 /// # Panics
539 /// In case `width*height*colorspace` calculation may overflow a usize
540 fn inner_buffer_size(&self) -> Option<usize> {
541 if !self.seen_hdr {
542 return None;
543 }
544
545 let info = self.frame_info()?;
546 let p_info = &self.png_info;
547 // only difference with output is here we don't care about
548 // stripping 16 bit to 8 bit
549 let bytes = if p_info.depth == 16 { 2 } else { 1 };
550
551 let out_n = self.get_colorspace()?.num_components();
552
553 info.width
554 .checked_mul(info.height)?
555 .checked_mul(out_n)?
556 .checked_mul(bytes)
557 }
558
559 /// Get png information which was extracted from the headers
560 ///
561 ///
562 /// # Returns
563 /// - `Some(info)` : The information present in the header
564 /// - `None` : Indicates headers were not decoded
565 pub const fn get_info(&self) -> Option<&PngInfo> {
566 if self.seen_headers {
567 Some(&self.png_info)
568 } else {
569 None
570 }
571 }
572 /// Get a mutable reference to the decoder options
573 /// for the decoder instance
574 ///
575 /// Can be used to modify options before actual decoding but after initial
576 /// creation
577 pub const fn get_options(&self) -> &DecoderOptions {
578 &self.options
579 }
580
581 /// Overwrite decoder options with the new options
582 ///
583 /// Can be used to modify decoding after initialization but before
584 /// decoding, it does not do anything after decoding an image
585 pub fn set_options(&mut self, options: DecoderOptions) {
586 self.options = options;
587 }
588
589 /// Decode PNG encoded images and write raw pixels into `out`
590 ///
591 /// # Arguments
592 /// - `out`: The slice which we will write our values into.
593 /// If the slice length is smaller than [`output_buffer_size`](Self::output_buffer_size), it's an error
594 ///
595 /// # Converting 16 bit to 8 bit images
596 /// When indicated by [`DecoderOptions::png_set_strip_to_8bit`](zune_core::options::DecoderOptions::png_get_strip_to_8bit)
597 /// the library will implicitly convert 16 bit to 8 bit by discarding the lower 8 bits
598 ///
599 /// # Endianness
600 ///
601 /// - In case the image is a 16 bit PNG, endianness of the samples may be retrieved
602 /// via [`byte_endian`](Self::byte_endian) method, which returns the configured byte
603 /// endian of the samples.
604 /// - PNG uses Big Endian while most machines today are Little Endian (x86 and mainstream Arm),
605 /// hence if the configured endianness is little endian the library will implicitly convert
606 /// samples to little endian
607 ///
608 pub fn decode_into(&mut self, out: &mut [u8]) -> Result<(), PngDecodeErrors> {
609 // decode headers
610 self.decode_headers()?;
611
612 // in case we are to decode from 16 bit to 8 bit, allocate separate and decode
613 if self.called_from_decode_into
614 && self.png_info.depth == 16
615 && self.options.png_get_strip_to_8bit()
616 {
617 let image_len = self.output_buffer_size().unwrap();
618
619 if out.len() < image_len {
620 return Err(PngDecodeErrors::TooSmallOutput(image_len, out.len()));
621 }
622 // allocate new size
623 let mut temp_alloc = vec![0; self.inner_buffer_size().unwrap()];
624 self.decode_into_inner(&mut temp_alloc)?;
625
626 let out = &mut out[..image_len];
627 // then convert it to 8 bit by taking top bit
628 for (input, output) in temp_alloc.chunks_exact(2).zip(out) {
629 *output = input[0];
630 }
631 return Ok(());
632 }
633 self.decode_into_inner(out)
634 }
635 fn decode_into_inner(&mut self, out: &mut [u8]) -> Result<(), PngDecodeErrors> {
636 // decode headers
637 self.decode_headers()?;
638
639 trace!("Input Colorspace: {:?} ", self.png_info.color);
640 trace!("Output Colorspace: {:?} ", self.get_colorspace().unwrap());
641
642 if self.frames.get(self.current_frame).is_none() {
643 return Err(PngDecodeErrors::GenericStatic("No more frames"));
644 }
645 if self.frames[self.current_frame].fctl_info.is_none() {
646 return Err(PngDecodeErrors::GenericStatic("Unimplemented frame info"));
647 }
648 let info = self.frames[self.current_frame].fctl_info.unwrap();
649
650 let png_info = self.png_info.clone();
651
652 let image_len = self.inner_buffer_size().unwrap();
653
654 if out.len() < image_len {
655 return Err(PngDecodeErrors::TooSmallOutput(image_len, out.len()));
656 }
657
658 let out = &mut out[..image_len];
659
660 // go parse IDAT chunks returning the inflate
661 let deflate_data = self.inflate()?;
662
663 // then release it, we no longer need it
664 self.frames[self.current_frame].fdat = vec![];
665 // remove idat chunks from memory
666 // we are already done with them.
667
668 if png_info.interlace_method == InterlaceMethod::Standard {
669 // allocate out to be enough to hold raw decoded bytes
670 let dims = self.frame_info().unwrap();
671
672 self.create_png_image_raw(&deflate_data, dims.width, dims.height, out, &png_info)?;
673 } else if png_info.interlace_method == InterlaceMethod::Adam7 {
674 self.decode_interlaced(&deflate_data, out, &png_info, &info)?;
675 }
676
677 // convert to set endian if need be
678 if self.get_depth().unwrap() == BitDepth::Sixteen {
679 convert_be_to_target_endian_u16(out, self.byte_endian(), self.options.use_sse41());
680 }
681 // one more frame decoded
682 self.current_frame += 1;
683 Ok(())
684 }
685
686 /// Decode data returning it into `Vec<u8>`.
687 ///
688 /// Endianness of
689 /// returned bytes in case of image being 16 bits and the decoder
690 /// not converting 16 bit images to 8 bit images is given by
691 /// [`byte_endian()`](Self::byte_endian) method
692 ///
693 /// # Converting 16 bit to 8 bit images
694 /// When indicated by [`DecoderOptions::png_set_strip_to_8bit`](zune_core::options::DecoderOptions::png_get_strip_to_8bit)
695 /// the library will implicitly convert 16 bit to 8 bit by discarding the lower 8 bits
696 ///
697 /// returns: `Result<Vec<u8, Global>, PngErrors>`
698 ///
699 pub fn decode_raw(&mut self) -> Result<Vec<u8>, PngDecodeErrors> {
700 self.decode_headers()?;
701 self.called_from_decode_into = false;
702
703 // allocate
704 let new_len = self.output_buffer_size().unwrap();
705 let t = self.inner_buffer_size().unwrap();
706 let mut out: Vec<u8> = vec![0; t];
707 //decode
708 self.decode_into(&mut out)?;
709 if self.options.png_get_strip_to_8bit() && self.png_info.depth == 16 {
710 // in case we are to convert from 16 bit to 8 bit, we can do it here
711 // we optimize it by using the same buffer the 16 bit data is stored in
712 // and implicitly converting it to 8 bit.
713 //
714 // Do note that to convert it, we only take the top 8 bits of a 16 bit.
715 // so to run [a,a,b,b,c,b,d,b] => [a,b,c,d], the write never catches on the read
716 // hence no override. which works for us
717 //
718 // then convert to 8 bit in place
719 let mut i = 0;
720 let mut j = 0;
721 while j < out.len() {
722 out[i] = out[j];
723 i += 1;
724 j += 2;
725 }
726 out.truncate(new_len);
727 }
728
729 Ok(out)
730 }
731
732 /// Return the **yet to be decoded** frame's frame information
733 ///
734 /// This contains information about the yet do be decoded frame after
735 /// reading the headers
736 ///
737 /// Once any function that decodes raw pixels is called (`decode`,`decode_raw`,`decode_into`)
738 /// this will point to the next frame to be decoded.
739 /// # Example
740 ///
741 /// This example gets frame information of an animated image
742 /// ```no_run
743 /// use zune_png::PngDecoder;
744 /// let mut decoder = PngDecoder::new(&[]);
745 ///
746 /// // decode the headers to get the information
747 /// decoder.decode_headers().unwrap();
748 ///
749 ///if decoder.is_animated(){
750 /// while decoder.more_frames(){
751 /// // multiple calls is okay, the library will handle it correctly
752 /// decoder.decode_headers().unwrap();
753 /// // get information, MUST BE before calling (decode,decode_headers,decode_raw)
754 /// let info = decoder.frame_info().unwrap();
755 /// // decode the frame
756 /// let data = decoder.decode().unwrap();
757 /// }
758 /// }
759 /// ```
760 pub fn frame_info(&self) -> Option<FrameInfo> {
761 if let Some(frame) = self.frames.get(self.current_frame) {
762 return frame.fctl_info;
763 }
764 None
765 }
766
767 fn decode_interlaced(
768 &mut self, deflate_data: &[u8], out: &mut [u8], info: &PngInfo, frame_info: &FrameInfo
769 ) -> Result<(), PngDecodeErrors> {
770 const XORIG: [usize; 7] = [0, 4, 0, 2, 0, 1, 0];
771 const YORIG: [usize; 7] = [0, 0, 4, 0, 2, 0, 1];
772
773 const XSPC: [usize; 7] = [8, 8, 4, 4, 2, 2, 1];
774 const YSPC: [usize; 7] = [8, 8, 8, 4, 4, 2, 2];
775
776 let bytes = if info.depth == 16 { 2 } else { 1 };
777
778 let out_n = self.get_colorspace().unwrap().num_components();
779
780 let new_len = frame_info.width * frame_info.height * out_n * bytes;
781
782 // A mad idea would be to make this multithreaded :)
783 // They called me a mad man - Thanos
784 let out_bytes = out_n * bytes;
785
786 // temporary space for holding interlaced images
787 let mut final_out = vec![0_u8; new_len];
788
789 let mut image_offset = 0;
790
791 // get the maximum height and width for the whole interlace part
792 for p in 0..7 {
793 let x = (frame_info
794 .width
795 .saturating_sub(XORIG[p])
796 .saturating_add(XSPC[p])
797 .saturating_sub(1))
798 / XSPC[p];
799
800 let y = (frame_info
801 .height
802 .saturating_sub(YORIG[p])
803 .saturating_add(YSPC[p])
804 .saturating_sub(1))
805 / YSPC[p];
806
807 if x != 0 && y != 0 {
808 let mut image_len = usize::from(info.color.num_components()) * x;
809
810 image_len *= usize::from(info.depth);
811 image_len += 7;
812 image_len /= 8;
813 image_len += 1; // filter byte
814 image_len *= y;
815
816 if image_offset + image_len > deflate_data.len() {
817 return Err(PngDecodeErrors::GenericStatic("Too short data"));
818 }
819
820 let deflate_slice = &deflate_data[image_offset..image_offset + image_len];
821
822 self.create_png_image_raw(deflate_slice, x, y, &mut final_out, info)?;
823
824 for j in 0..y {
825 for i in 0..x {
826 let out_y = j * YSPC[p] + YORIG[p];
827 let out_x = i * XSPC[p] + XORIG[p];
828
829 let final_start = out_y * info.width * out_bytes + out_x * out_bytes;
830 let out_start = (j * x + i) * out_bytes;
831
832 out[final_start..final_start + out_bytes]
833 .copy_from_slice(&final_out[out_start..out_start + out_bytes]);
834 }
835 }
836 image_offset += image_len;
837 }
838 }
839 Ok(())
840 }
841
842 /// Decode PNG encoded images and return the vector of raw pixels but for 16-bit images
843 /// represent them in a `Vec<u16>` if [`DecoderOptions::png_set_strip_to_8bit`](zune_core::options::DecoderOptions::png_get_strip_to_8bit)
844 /// returns false
845 ///
846 ///
847 /// This returns an enum type [`DecodingResult`](zune_core::result::DecodingResult) which
848 /// one can de-sugar to extract actual values.
849 ///
850 /// # Converting 16 bit to 8 bit images
851 /// When indicated by [`DecoderOptions::png_set_strip_to_8bit`](zune_core::options::DecoderOptions::png_get_strip_to_8bit)
852 /// the library will implicitly convert 16 bit to 8 bit by discarding the lower 8 bits
853 ///
854 /// If such is specified, this routine will always return [`DecodingResult::U8`](zune_core::result::DecodingResult::U8)
855 ///
856 /// # Example
857 ///
858 /// ```no_run
859 /// use zune_core::result::DecodingResult;
860 /// use zune_png::PngDecoder;
861 /// let mut decoder = PngDecoder::new(&[]);
862 ///
863 /// match decoder.decode().unwrap(){
864 /// DecodingResult::U16(value)=>{
865 /// // deal with 16 bit images
866 /// }
867 /// DecodingResult::U8(value)=>{
868 /// // deal with <8 bit image
869 /// }
870 /// _=>{}
871 /// }
872 /// ```
873 #[rustfmt::skip]
874 pub fn decode(&mut self) -> Result<DecodingResult, PngDecodeErrors>
875 {
876 // Here we want to either return a `u8` or a `u16` depending on the
877 // headers, so we pull two tricks
878 // 1 - We either allocate u8 or u16 depending on the output
879 // We actually allocate both, but one of the vectors ends up being
880 // zero, and in creating an empty vec nothing is allocated on the heap
881 // 2 - We convert samples to native endian, so that transmuting is a no-op in case of
882 // 16 bit images in the next step
883 // 3 - We use bytemuck to to safe align, hence keeping the no unsafe mantra except
884 // for platform specific intrinsics
885
886 if !self.seen_headers || !self.seen_iend {
887 self.decode_headers()?;
888 }
889 // in case we are to strip 16 bit to 8 bit, use decode_raw which does that for us
890 if self.options.png_get_strip_to_8bit() && self.png_info.depth == 16 {
891 let bytes = self.decode_raw()?;
892 return Ok(DecodingResult::U8(bytes));
893 }
894 // configure that the decoder converts samples to native endian
895 if is_le()
896 {
897 self.options = self.options.set_byte_endian(ByteEndian::LE);
898 } else {
899 self.options = self.options.set_byte_endian(ByteEndian::BE);
900 }
901
902 let info = &self.png_info;
903 let bytes = if info.depth == 16 { 2 } else { 1 };
904
905 let out_n = self.get_colorspace().unwrap().num_components();
906 let new_len = info.width * info.height * out_n;
907
908 let mut out_u8: Vec<u8> = vec![0; new_len * usize::from(info.depth != 16)];
909 let mut out_u16: Vec<u16> = vec![0; new_len * usize::from(info.depth == 16)];
910
911 // use either out_u8 or out_u16 depending on the expected type for the output
912 let out = if bytes == 1
913 {
914 &mut out_u8
915 } else {
916 let b = convert_u16_to_u8_slice(&mut out_u16);
917
918 assert_eq!(b.len(), new_len * 2); // length should be twice that of u8
919 b
920 };
921 self.decode_into(out)?;
922
923 if self.png_info.depth <= 8
924 {
925 return Ok(DecodingResult::U8(out_u8));
926 }
927
928 if self.png_info.depth == 16
929 {
930 return Ok(DecodingResult::U16(out_u16));
931 }
932
933 Err(PngDecodeErrors::GenericStatic("Not implemented"))
934 }
935 /// Create the png data from post deflated data
936 ///
937 /// `out` needs to have enough space to hold data, otherwise
938 /// this will panic
939 ///
940 /// This is to allow reuse e.g interlaced images use one big allocation
941 /// to and since that ends up calling this multiple times, allocation was moved
942 /// away from this method to the caller of this method
943 #[allow(clippy::manual_memcpy, clippy::comparison_chain)]
944 fn create_png_image_raw(
945 &mut self, deflate_data: &[u8], width: usize, height: usize, out: &mut [u8], info: &PngInfo
946 ) -> Result<(), PngDecodeErrors> {
947 let use_sse4 = self.options.use_sse41();
948 let use_sse2 = self.options.use_sse2();
949
950 let bytes = if info.depth == 16 { 2 } else { 1 };
951
952 let out_colorspace = self.get_colorspace().unwrap();
953
954 let mut img_width_bytes;
955
956 img_width_bytes = usize::from(info.component) * width;
957 img_width_bytes *= usize::from(info.depth);
958 img_width_bytes += 7;
959 img_width_bytes /= 8;
960
961 let out_n = usize::from(info.color.num_components());
962
963 let image_len = img_width_bytes * height;
964
965 if deflate_data.len() < image_len + height
966 // account for filter bytes
967 {
968 let msg = format!(
969 "Not enough pixels, expected {} but found {}",
970 image_len,
971 deflate_data.len()
972 );
973 return Err(PngDecodeErrors::Generic(msg));
974 }
975 // do png un-filtering
976 let mut chunk_size;
977 let mut components = usize::from(info.color.num_components()) * bytes;
978
979 if info.depth < 8 {
980 // if the bit depth is 8, the spec says the byte before
981 // X to be used by the filter
982 components = 1;
983 }
984
985 // add width plus colour component, this gives us number of bytes per every scan line
986 chunk_size = width * out_n;
987 chunk_size *= usize::from(info.depth);
988 chunk_size += 7;
989 chunk_size /= 8;
990 // filter type
991 chunk_size += 1;
992
993 let out_chunk_size = width * out_colorspace.num_components() * bytes;
994
995 // each chunk is a width stride of unfiltered data
996 let chunks = deflate_data.chunks_exact(chunk_size);
997
998 // Begin doing loop un-filtering.
999 let width_stride = chunk_size - 1;
1000
1001 let mut prev_row_start = 0;
1002 let mut first_row = true;
1003 let mut out_position = 0;
1004
1005 let mut will_post_process = self.seen_trns | self.seen_ptle | (info.depth < 8);
1006
1007 let add_alpha_channel =
1008 self.options.png_get_add_alpha_channel() && (!self.png_info.color.has_alpha());
1009
1010 will_post_process |= add_alpha_channel;
1011
1012 if will_post_process && self.previous_stride.len() < out_chunk_size {
1013 self.previous_stride.resize(out_chunk_size, 0);
1014 }
1015 let n_components = usize::from(info.color.num_components());
1016
1017 for (i, in_stride) in chunks.take(height).enumerate() {
1018 // Split output into current and previous
1019 // current points to the start of the row where we are writing de-filtered output to
1020 // prev is all rows we already wrote output to.
1021
1022 let (prev, mut current) = out.split_at_mut(out_position);
1023
1024 current = &mut current[0..out_chunk_size];
1025
1026 // get the previlet (w,h)ous row.
1027 //Set this to a dummy to handle special case of first row, if we aren't in the first
1028 // row, we actually take the real slice a line down
1029 let mut prev_row: &[u8] = &[0_u8];
1030
1031 if !first_row {
1032 // normal bit depth, use the previous row as normal
1033 prev_row = &prev[prev_row_start..prev_row_start + out_chunk_size];
1034 prev_row_start += out_chunk_size;
1035 }
1036
1037 out_position += out_chunk_size;
1038
1039 // take filter
1040 let filter_byte = in_stride[0];
1041 // raw image bytes
1042 let raw = &in_stride[1..];
1043
1044 // get it's type
1045 let mut filter = FilterMethod::from_int(filter_byte)
1046 .ok_or_else(|| PngDecodeErrors::Generic(format!("Unknown filter {filter_byte}")))?;
1047
1048 if first_row {
1049 // match our filters to special filters for first row
1050 // these special filters do not need the previous scanline and treat it
1051 // as zero
1052
1053 if filter == FilterMethod::Paeth {
1054 filter = FilterMethod::PaethFirst;
1055 }
1056 if filter == FilterMethod::Up {
1057 // up for the first row becomes a memcpy
1058 filter = FilterMethod::None;
1059 }
1060 if filter == FilterMethod::Average {
1061 filter = FilterMethod::AvgFirst;
1062 }
1063
1064 first_row = false;
1065 }
1066
1067 match filter {
1068 FilterMethod::None => current[0..width_stride].copy_from_slice(raw),
1069
1070 FilterMethod::Average => handle_avg(prev_row, raw, current, components, use_sse4),
1071
1072 FilterMethod::Sub => handle_sub(raw, current, components, use_sse2),
1073
1074 FilterMethod::Up => handle_up(prev_row, raw, current),
1075
1076 FilterMethod::Paeth => handle_paeth(prev_row, raw, current, components, use_sse4),
1077
1078 FilterMethod::PaethFirst => handle_paeth_first(raw, current, components),
1079
1080 FilterMethod::AvgFirst => handle_avg_first(raw, current, components),
1081
1082 FilterMethod::Unknown => unreachable!()
1083 }
1084
1085 if will_post_process && i > 0 {
1086 // run the post processor two scanlines behind so that we
1087 // don't mess with any filters that require previous row
1088
1089 // read the row we are about to filter
1090 let to_filter_row = &mut prev[(i - 1) * out_chunk_size..(i) * out_chunk_size];
1091
1092 if info.depth < 8 {
1093 // check if we will run any other transform
1094 let extra_transform = self.seen_ptle | self.seen_trns | add_alpha_channel;
1095
1096 if extra_transform {
1097 // input data is in_to_filter_row,
1098 // we write output to previous_stride
1099 // since other parts use previous_stride
1100 expand_bits_to_byte(
1101 width,
1102 usize::from(info.depth),
1103 n_components,
1104 self.seen_ptle,
1105 to_filter_row,
1106 &mut self.previous_stride
1107 )
1108 } else {
1109 // no extra transform, just depth upscaling, so let's
1110 // do that,
1111
1112 // copy the row to a temporary space
1113 self.previous_stride[..width_stride]
1114 .copy_from_slice(&to_filter_row[..width_stride]);
1115
1116 expand_bits_to_byte(
1117 width,
1118 usize::from(info.depth),
1119 n_components,
1120 self.seen_ptle,
1121 &self.previous_stride,
1122 to_filter_row
1123 )
1124 }
1125 } else {
1126 // copy the row to a temporary space
1127 self.previous_stride[..width_stride]
1128 .copy_from_slice(&to_filter_row[..width_stride]);
1129 }
1130
1131 if self.seen_trns && self.png_info.color != PngColor::Palette {
1132 // the expansion is a trns expansion
1133 // bytes are already in position, so finish the business
1134
1135 if info.depth <= 8 {
1136 expand_trns::<false>(
1137 &self.previous_stride,
1138 to_filter_row,
1139 info.color,
1140 self.trns_bytes,
1141 info.depth
1142 );
1143 } else if info.depth == 16 {
1144 // Tested by test_palette_trns_16bit.
1145 expand_trns::<true>(
1146 &self.previous_stride,
1147 to_filter_row,
1148 info.color,
1149 self.trns_bytes,
1150 info.depth
1151 );
1152 }
1153 }
1154
1155 if self.seen_ptle && self.png_info.color == PngColor::Palette {
1156 if self.palette.is_empty() {
1157 return Err(PngDecodeErrors::EmptyPalette);
1158 }
1159 let plte_entry: &[PLTEEntry; 256] = self.palette[..256].try_into().unwrap();
1160
1161 // so now we have two things
1162 // the palette entries stored in self.previous_stride
1163 // the row to fill the palette sored in to_filter row,
1164 // so we can finally expand the entries
1165
1166 if self.seen_trns | add_alpha_channel {
1167 // if tRNS chunk is present in paletted images, it contains
1168 // alpha byte values, so that means we create alpha data from
1169 // raw bytes
1170
1171 // if we are to add alpha channel for palette images , we simply just
1172 // read four entries from the palette.
1173 //
1174 // The palette is set that the alpha channel is initialized as 255 for non alpha
1175 // images,
1176 expand_palette(&self.previous_stride, to_filter_row, plte_entry, 4);
1177 } else {
1178 // Normal expansion
1179 expand_palette(&self.previous_stride, to_filter_row, plte_entry, 3);
1180 }
1181 } else if add_alpha_channel {
1182 // the image is a normal RGB/ Luma image, which we need to add the alpha channel
1183 // do it here
1184 add_alpha(
1185 &self.previous_stride,
1186 to_filter_row,
1187 self.png_info.color,
1188 self.get_depth().unwrap()
1189 );
1190 }
1191 }
1192 }
1193
1194 if will_post_process {
1195 for i in height..height + min(height, 1) {
1196 let to_filter_row = &mut out[(i - 1) * out_chunk_size..i * out_chunk_size];
1197
1198 // check if we will run any other transform
1199 let extra_transform = self.seen_ptle | self.seen_trns;
1200
1201 if info.depth < 8 {
1202 if extra_transform {
1203 // input data is in_to_filter_row,
1204 // we write output to previous_stride
1205 // since other parts use previous_stride
1206 expand_bits_to_byte(
1207 width,
1208 usize::from(info.depth),
1209 n_components,
1210 self.seen_ptle,
1211 to_filter_row,
1212 &mut self.previous_stride
1213 )
1214 } else {
1215 // no extra transform, just depth upscaling, so let's
1216 // do that,
1217
1218 // copy the row to a temporary space
1219 self.previous_stride[..width_stride]
1220 .copy_from_slice(&to_filter_row[..width_stride]);
1221
1222 expand_bits_to_byte(
1223 width,
1224 usize::from(info.depth),
1225 n_components,
1226 self.seen_ptle,
1227 &self.previous_stride,
1228 to_filter_row
1229 )
1230 }
1231 } else {
1232 // copy the row to a temporary space
1233 self.previous_stride[..width_stride]
1234 .copy_from_slice(&to_filter_row[..width_stride]);
1235 }
1236 if self.seen_trns && self.png_info.color != PngColor::Palette {
1237 // the expansion is a trns expansion
1238 // bytes are already in position, so finish the business
1239
1240 if info.depth <= 8 {
1241 expand_trns::<false>(
1242 &self.previous_stride,
1243 to_filter_row,
1244 info.color,
1245 self.trns_bytes,
1246 info.depth
1247 );
1248 } else if info.depth == 16 {
1249 // Tested by test_palette_trns_16bit.
1250 expand_trns::<true>(
1251 &self.previous_stride,
1252 to_filter_row,
1253 info.color,
1254 self.trns_bytes,
1255 info.depth
1256 );
1257 }
1258 }
1259 if self.seen_ptle && self.png_info.color == PngColor::Palette {
1260 if self.palette.is_empty() {
1261 return Err(PngDecodeErrors::EmptyPalette);
1262 }
1263
1264 let plte_entry: &[PLTEEntry; 256] = self.palette[..256].try_into().unwrap();
1265
1266 if self.seen_trns | add_alpha_channel {
1267 expand_palette(&self.previous_stride, to_filter_row, plte_entry, 4);
1268 } else {
1269 expand_palette(&self.previous_stride, to_filter_row, plte_entry, 3);
1270 }
1271 } else if add_alpha_channel {
1272 add_alpha(
1273 &self.previous_stride,
1274 to_filter_row,
1275 self.png_info.color,
1276 self.get_depth().unwrap()
1277 );
1278 }
1279 }
1280 }
1281 Ok(())
1282 }
1283
1284 /// Undo deflate decoding
1285 #[allow(clippy::manual_memcpy)]
1286 fn inflate(&mut self) -> Result<Vec<u8>, PngDecodeErrors> {
1287 let flat_data = &self.frames[self.current_frame];
1288
1289 // An annoying thing is that deflate doesn't
1290 // store its uncompressed size,
1291 // so we can't pre-allocate storage and pass that willy nilly
1292 //
1293 // Meaning we are left with some design choices
1294 // 1. Have deflate resize at will
1295 // 2. Have deflate return incomplete, to indicate we need to extend
1296 // the vec, extend and go back to inflate.
1297 //
1298 //
1299 // so choose point 1.
1300 //
1301 // This allows the zlib decoder to optimize its own paths(which it does)
1302 // because it controls the allocation and doesn't have to check for near EOB
1303 // runs.
1304 //
1305 let depth_scale = if self.png_info.depth == 16 { 2 } else { 1 };
1306
1307 let size_hint = (self.png_info.width + 1)
1308 * self.png_info.height
1309 * depth_scale
1310 * usize::from(self.png_info.color.num_components());
1311
1312 let option = DeflateOptions::default()
1313 .set_size_hint(size_hint)
1314 .set_limit(size_hint + 4 * (self.png_info.height))
1315 .set_confirm_checksum(self.options.inflate_get_confirm_adler());
1316
1317 let mut decoder = zune_inflate::DeflateDecoder::new_with_options(&flat_data.fdat, option);
1318
1319 decoder
1320 .decode_zlib()
1321 .map_err(PngDecodeErrors::ZlibDecodeErrors)
1322 }
1323}