1use crate::error::{Error, Result};
2
3const SCALE_FLOAT_DSCALE: u32 = 0;
4const SCALE_FLOAT_ESCALE: u32 = 1;
5const SCALE_INT: u32 = 2;
6
7const CLASS_INTEGER: u32 = 0;
8const CLASS_FLOAT: u32 = 1;
9
10const SIGN_UNSIGNED: u32 = 0;
11const SIGN_SIGNED: u32 = 1;
12
13const ORDER_LE: u32 = 0;
14const ORDER_BE: u32 = 1;
15
16const FILL_UNDEFINED: u32 = 0;
17const HEADER_SIZE: usize = 21;
18const TOTAL_PARAMS: usize = 20;
19
20#[derive(Clone, Copy)]
21struct ScaleOffsetParams<'a> {
22 raw: &'a [u32],
23 element_count: usize,
24 class: u32,
25 size: usize,
26 sign: u32,
27 order: u32,
28 fill_available: u32,
29 scale_type: u32,
30 scale_factor: i32,
31}
32
33#[derive(Clone, Copy)]
34struct PackedParams {
35 size: usize,
36 minbits: usize,
37 native_order: u32,
38}
39
40struct BitReader<'a> {
41 data: &'a [u8],
42 byte_idx: usize,
43 bits_left: u8,
44}
45
46impl<'a> BitReader<'a> {
47 fn new(data: &'a [u8]) -> Self {
48 Self {
49 data,
50 byte_idx: 0,
51 bits_left: 8,
52 }
53 }
54
55 fn read_bits(&mut self, count: usize) -> Result<u8> {
56 if count > 8 {
57 return Err(filter_error(
58 "scaleoffset attempted to read more than one byte of packed bits",
59 ));
60 }
61 if count == 0 {
62 return Ok(0);
63 }
64
65 let mut remaining = count;
66 let mut value = 0u16;
67
68 while remaining > 0 {
69 let current = self
70 .data
71 .get(self.byte_idx)
72 .copied()
73 .ok_or_else(|| filter_error("scaleoffset packed stream ended early"))?;
74 let take = remaining.min(self.bits_left as usize);
75 let shift = self.bits_left as usize - take;
76 let chunk = (current >> shift) & low_mask(take);
77 value = (value << take) | u16::from(chunk);
78 self.bits_left -= take as u8;
79 remaining -= take;
80
81 if self.bits_left == 0 {
82 self.byte_idx += 1;
83 self.bits_left = 8;
84 }
85 }
86
87 Ok(value as u8)
88 }
89}
90
91pub fn decompress(data: &[u8], client_data: &[u32]) -> Result<Vec<u8>> {
92 let params = parse_params(client_data)?;
93 let full_width_bits = params
94 .size
95 .checked_mul(8)
96 .ok_or_else(|| filter_error("scaleoffset datatype width overflowed"))?;
97
98 if params.class == CLASS_FLOAT && params.scale_type == SCALE_FLOAT_ESCALE {
99 return Err(Error::UnsupportedFilter("scaleoffset float E-scale".into()));
100 }
101
102 if params.scale_type != SCALE_FLOAT_DSCALE && params.scale_factor as usize == full_width_bits {
103 return Ok(data.to_vec());
104 }
105
106 if params.class == CLASS_FLOAT {
107 if params.scale_type != SCALE_FLOAT_DSCALE && params.scale_type != SCALE_FLOAT_ESCALE {
108 return Err(filter_error(
109 "scaleoffset float filter has an invalid scale type",
110 ));
111 }
112 } else if params.class == CLASS_INTEGER {
113 if params.scale_type != SCALE_INT {
114 return Err(filter_error(
115 "scaleoffset integer filter has an invalid scale type",
116 ));
117 }
118 } else {
119 return Err(filter_error("scaleoffset datatype class is not supported"));
120 }
121
122 if data.len() < HEADER_SIZE {
123 return Err(filter_error(
124 "scaleoffset payload is shorter than the filter header",
125 ));
126 }
127
128 let minbits = u32::from_le_bytes([data[0], data[1], data[2], data[3]]) as usize;
129 if minbits > full_width_bits {
130 return Err(filter_error(
131 "scaleoffset header encodes more bits than the datatype can hold",
132 ));
133 }
134
135 let minval_size = usize::min(std::mem::size_of::<u64>(), data[4] as usize);
136 if HEADER_SIZE > data.len() || 5 + minval_size > data.len() {
137 return Err(filter_error("scaleoffset header is truncated"));
138 }
139
140 let mut minval_bytes = [0u8; 8];
141 minval_bytes[..minval_size].copy_from_slice(&data[5..5 + minval_size]);
142 let minval = u64::from_le_bytes(minval_bytes);
143
144 let total_size = params
145 .element_count
146 .checked_mul(params.size)
147 .ok_or_else(|| filter_error("scaleoffset output size overflowed"))?;
148 let mut out = vec![0u8; total_size];
149
150 if minbits == full_width_bits {
151 let payload_end = HEADER_SIZE
152 .checked_add(total_size)
153 .ok_or_else(|| filter_error("scaleoffset payload size overflowed"))?;
154 let payload = data.get(HEADER_SIZE..payload_end).ok_or_else(|| {
155 filter_error("scaleoffset payload is shorter than the expected output")
156 })?;
157 out.copy_from_slice(payload);
158
159 if params.order != native_order() {
160 swap_endian_in_place(&mut out, params.size);
161 }
162
163 return Ok(out);
164 } else if minbits != 0 {
165 let packed = PackedParams {
166 size: params.size,
167 minbits,
168 native_order: native_order(),
169 };
170 unpack_packed(&mut out, params.element_count, &data[HEADER_SIZE..], packed)?;
171 }
172
173 match params.class {
174 CLASS_INTEGER => postprocess_integers(&mut out, params, minbits, minval)?,
175 CLASS_FLOAT => postprocess_floats(&mut out, params, minbits, minval)?,
176 _ => unreachable!(),
177 }
178
179 if params.order != native_order() {
180 swap_endian_in_place(&mut out, params.size);
181 }
182
183 Ok(out)
184}
185
186fn parse_params(client_data: &[u32]) -> Result<ScaleOffsetParams<'_>> {
187 if client_data.len() != TOTAL_PARAMS {
188 return Err(filter_error(
189 "scaleoffset client data length does not match the required parameter count",
190 ));
191 }
192
193 Ok(ScaleOffsetParams {
194 raw: client_data,
195 element_count: usize_from_u32(client_data[2], "scaleoffset element count")?,
196 class: client_data[3],
197 size: usize_from_u32(client_data[4], "scaleoffset datatype size")?,
198 sign: client_data[5],
199 order: client_data[6],
200 fill_available: client_data[7],
201 scale_type: client_data[0],
202 scale_factor: client_data[1] as i32,
203 })
204}
205
206fn unpack_packed(
207 out: &mut [u8],
208 element_count: usize,
209 data: &[u8],
210 packed: PackedParams,
211) -> Result<()> {
212 let mut bits = BitReader::new(data);
213 let dtype_len = packed
214 .size
215 .checked_mul(8)
216 .ok_or_else(|| filter_error("scaleoffset datatype width overflowed"))?;
217
218 for i in 0..element_count {
219 let offset = i
220 .checked_mul(packed.size)
221 .ok_or_else(|| filter_error("scaleoffset output offset overflowed"))?;
222 let slice = out
223 .get_mut(offset..offset + packed.size)
224 .ok_or_else(|| filter_error("scaleoffset output range is out of bounds"))?;
225
226 match packed.native_order {
227 ORDER_LE => {
228 let begin = packed.size - 1 - (dtype_len - packed.minbits) / 8;
229 for k in (0..=begin).rev() {
230 let bit_count = if k == begin {
231 let remainder = (dtype_len - packed.minbits) % 8;
232 if remainder == 0 {
233 8
234 } else {
235 8 - remainder
236 }
237 } else {
238 8
239 };
240 slice[k] = bits.read_bits(bit_count)?;
241 }
242 }
243 ORDER_BE => {
244 let begin = (dtype_len - packed.minbits) / 8;
245 for (k, byte) in slice.iter_mut().enumerate().take(packed.size).skip(begin) {
246 let bit_count = if k == begin {
247 let remainder = (dtype_len - packed.minbits) % 8;
248 if remainder == 0 {
249 8
250 } else {
251 8 - remainder
252 }
253 } else {
254 8
255 };
256 *byte = bits.read_bits(bit_count)?;
257 }
258 }
259 _ => {
260 return Err(filter_error(
261 "scaleoffset encountered an unknown byte order",
262 ))
263 }
264 }
265 }
266
267 Ok(())
268}
269
270fn postprocess_integers(
271 out: &mut [u8],
272 params: ScaleOffsetParams<'_>,
273 minbits: usize,
274 minval: u64,
275) -> Result<()> {
276 let fill = if params.fill_available != FILL_UNDEFINED {
277 Some(extract_fill_bytes(params.raw, params.size)?)
278 } else {
279 None
280 };
281 let fill_marker = bit_marker(minbits);
282
283 match params.sign {
284 SIGN_UNSIGNED => {
285 let min = truncate_unsigned(minval, params.size)?;
286 for chunk in out.chunks_exact_mut(params.size) {
287 let raw = read_unsigned_native(chunk)?;
288 let value = if let Some(fill_bytes) = &fill {
289 if raw == fill_marker {
290 read_unsigned_native(fill_bytes)?
291 } else {
292 raw.wrapping_add(min)
293 }
294 } else {
295 raw.wrapping_add(min)
296 };
297 write_unsigned_native(chunk, value)?;
298 }
299 }
300 SIGN_SIGNED => {
301 let min = truncate_signed(minval, params.size)?;
302 for chunk in out.chunks_exact_mut(params.size) {
303 let raw_unsigned = read_unsigned_native(chunk)?;
304 let value = if let Some(fill_bytes) = &fill {
305 if raw_unsigned == fill_marker {
306 read_signed_native(fill_bytes)?
307 } else {
308 read_signed_native(chunk)?.wrapping_add(min)
309 }
310 } else {
311 read_signed_native(chunk)?.wrapping_add(min)
312 };
313 write_signed_native(chunk, value)?;
314 }
315 }
316 _ => {
317 return Err(filter_error(
318 "scaleoffset integer sign code is not supported",
319 ))
320 }
321 }
322
323 Ok(())
324}
325
326fn postprocess_floats(
327 out: &mut [u8],
328 params: ScaleOffsetParams<'_>,
329 minbits: usize,
330 minval: u64,
331) -> Result<()> {
332 if params.scale_type != SCALE_FLOAT_DSCALE {
333 return Err(Error::UnsupportedFilter("scaleoffset float E-scale".into()));
334 }
335
336 let scale = 10f64.powi(params.scale_factor);
337 let fill_marker = bit_marker(minbits);
338 let fill = if params.fill_available != FILL_UNDEFINED {
339 Some(extract_fill_bytes(params.raw, params.size)?)
340 } else {
341 None
342 };
343
344 match params.size {
345 4 => {
346 let min_bytes = truncate_minval_bytes(minval, 4)?;
347 let min = f32::from_ne_bytes(min_bytes[..4].try_into().unwrap());
348 for chunk in out.chunks_exact_mut(4) {
349 let raw = i32::from_ne_bytes(chunk.try_into().unwrap());
350 let value = if let Some(fill_bytes) = &fill {
351 if raw as u32 as u64 == fill_marker {
352 f32::from_ne_bytes(fill_bytes.clone().try_into().unwrap())
353 } else {
354 (raw as f64 / scale) as f32 + min
355 }
356 } else {
357 (raw as f64 / scale) as f32 + min
358 };
359 chunk.copy_from_slice(&value.to_ne_bytes());
360 }
361 }
362 8 => {
363 let min_bytes = truncate_minval_bytes(minval, 8)?;
364 let min = f64::from_ne_bytes(min_bytes[..8].try_into().unwrap());
365 for chunk in out.chunks_exact_mut(8) {
366 let raw = i64::from_ne_bytes(chunk.try_into().unwrap());
367 let value = if let Some(fill_bytes) = &fill {
368 if raw as u64 == fill_marker {
369 f64::from_ne_bytes(fill_bytes.clone().try_into().unwrap())
370 } else {
371 raw as f64 / scale + min
372 }
373 } else {
374 raw as f64 / scale + min
375 };
376 chunk.copy_from_slice(&value.to_ne_bytes());
377 }
378 }
379 _ => {
380 return Err(filter_error(
381 "scaleoffset floating-point decode only supports 4-byte and 8-byte values",
382 ))
383 }
384 }
385
386 Ok(())
387}
388
389fn extract_fill_bytes(params: &[u32], size: usize) -> Result<Vec<u8>> {
390 let mut out = vec![0u8; size];
391 let mut idx = 8usize;
392 if native_order() == ORDER_LE {
393 let mut pos = 0usize;
394 while pos < size {
395 let word = *params
396 .get(idx)
397 .ok_or_else(|| filter_error("scaleoffset fill value is truncated"))?;
398 let bytes = word.to_ne_bytes();
399 let take = (size - pos).min(4);
400 out[pos..pos + take].copy_from_slice(&bytes[..take]);
401 pos += take;
402 idx += 1;
403 }
404 } else {
405 let mut remaining = size;
406 let mut pos = size.saturating_sub(remaining.min(4));
407 while remaining >= 4 {
408 let word = *params
409 .get(idx)
410 .ok_or_else(|| filter_error("scaleoffset fill value is truncated"))?;
411 out[pos..pos + 4].copy_from_slice(&word.to_ne_bytes());
412 idx += 1;
413 remaining -= 4;
414 if remaining >= 4 {
415 pos -= 4;
416 } else if remaining > 0 {
417 pos -= remaining;
418 }
419 }
420 if remaining > 0 {
421 let word = *params
422 .get(idx)
423 .ok_or_else(|| filter_error("scaleoffset fill value is truncated"))?;
424 let bytes = word.to_ne_bytes();
425 out[..remaining].copy_from_slice(&bytes[4 - remaining..]);
426 }
427 }
428 Ok(out)
429}
430
431fn truncate_minval_bytes(minval: u64, size: usize) -> Result<Vec<u8>> {
432 match size {
433 4 | 8 => {
434 let bytes = minval.to_ne_bytes();
435 if native_order() == ORDER_LE {
436 Ok(bytes[..size].to_vec())
437 } else {
438 Ok(bytes[8 - size..].to_vec())
439 }
440 }
441 _ => Err(filter_error(
442 "scaleoffset floating-point size is not supported",
443 )),
444 }
445}
446
447fn truncate_unsigned(minval: u64, size: usize) -> Result<u64> {
448 Ok(match size {
449 1 => minval & 0xFF,
450 2 => minval & 0xFFFF,
451 4 => minval & 0xFFFF_FFFF,
452 8 => minval,
453 _ => {
454 return Err(filter_error(
455 "scaleoffset integer decode only supports 1-, 2-, 4-, and 8-byte values",
456 ))
457 }
458 })
459}
460
461fn truncate_signed(minval: u64, size: usize) -> Result<i64> {
462 Ok(match size {
463 1 => i8::from_ne_bytes([minval.to_ne_bytes()[0]]) as i64,
464 2 => i16::from_ne_bytes(minval.to_ne_bytes()[..2].try_into().unwrap()) as i64,
465 4 => i32::from_ne_bytes(minval.to_ne_bytes()[..4].try_into().unwrap()) as i64,
466 8 => i64::from_ne_bytes(minval.to_ne_bytes()),
467 _ => {
468 return Err(filter_error(
469 "scaleoffset integer decode only supports 1-, 2-, 4-, and 8-byte values",
470 ))
471 }
472 })
473}
474
475fn read_unsigned_native(bytes: &[u8]) -> Result<u64> {
476 Ok(match bytes.len() {
477 1 => bytes[0] as u64,
478 2 => u16::from_ne_bytes(bytes.try_into().unwrap()) as u64,
479 4 => u32::from_ne_bytes(bytes.try_into().unwrap()) as u64,
480 8 => u64::from_ne_bytes(bytes.try_into().unwrap()),
481 _ => {
482 return Err(filter_error(
483 "scaleoffset integer decode only supports 1-, 2-, 4-, and 8-byte values",
484 ))
485 }
486 })
487}
488
489fn write_unsigned_native(bytes: &mut [u8], value: u64) -> Result<()> {
490 match bytes.len() {
491 1 => bytes[0] = value as u8,
492 2 => bytes.copy_from_slice(&(value as u16).to_ne_bytes()),
493 4 => bytes.copy_from_slice(&(value as u32).to_ne_bytes()),
494 8 => bytes.copy_from_slice(&value.to_ne_bytes()),
495 _ => {
496 return Err(filter_error(
497 "scaleoffset integer decode only supports 1-, 2-, 4-, and 8-byte values",
498 ))
499 }
500 }
501 Ok(())
502}
503
504fn read_signed_native(bytes: &[u8]) -> Result<i64> {
505 Ok(match bytes.len() {
506 1 => i8::from_ne_bytes([bytes[0]]) as i64,
507 2 => i16::from_ne_bytes(bytes.try_into().unwrap()) as i64,
508 4 => i32::from_ne_bytes(bytes.try_into().unwrap()) as i64,
509 8 => i64::from_ne_bytes(bytes.try_into().unwrap()),
510 _ => {
511 return Err(filter_error(
512 "scaleoffset integer decode only supports 1-, 2-, 4-, and 8-byte values",
513 ))
514 }
515 })
516}
517
518fn write_signed_native(bytes: &mut [u8], value: i64) -> Result<()> {
519 match bytes.len() {
520 1 => bytes[0] = value as i8 as u8,
521 2 => bytes.copy_from_slice(&(value as i16).to_ne_bytes()),
522 4 => bytes.copy_from_slice(&(value as i32).to_ne_bytes()),
523 8 => bytes.copy_from_slice(&value.to_ne_bytes()),
524 _ => {
525 return Err(filter_error(
526 "scaleoffset integer decode only supports 1-, 2-, 4-, and 8-byte values",
527 ))
528 }
529 }
530 Ok(())
531}
532
533fn swap_endian_in_place(bytes: &mut [u8], element_size: usize) {
534 if element_size <= 1 {
535 return;
536 }
537 for chunk in bytes.chunks_exact_mut(element_size) {
538 chunk.reverse();
539 }
540}
541
542fn bit_marker(minbits: usize) -> u64 {
543 if minbits == 0 {
544 0
545 } else if minbits >= 64 {
546 u64::MAX
547 } else {
548 (1u64 << minbits) - 1
549 }
550}
551
552fn native_order() -> u32 {
553 if cfg!(target_endian = "little") {
554 ORDER_LE
555 } else {
556 ORDER_BE
557 }
558}
559
560fn usize_from_u32(value: u32, what: &str) -> Result<usize> {
561 usize::try_from(value).map_err(|_| filter_error(&format!("{what} does not fit in usize")))
562}
563
564fn low_mask(bits: usize) -> u8 {
565 if bits >= 8 {
566 u8::MAX
567 } else {
568 ((1u16 << bits) - 1) as u8
569 }
570}
571
572fn filter_error(message: &str) -> Error {
573 Error::FilterError(message.into())
574}
575
576#[cfg(test)]
577mod tests {
578 use super::*;
579
580 struct BitWriter {
581 data: Vec<u8>,
582 bits_left: u8,
583 }
584
585 impl BitWriter {
586 fn new() -> Self {
587 Self {
588 data: vec![0],
589 bits_left: 8,
590 }
591 }
592
593 fn write_bits(&mut self, value: u8, count: usize) {
594 if count == 0 {
595 return;
596 }
597
598 let mut remaining = count;
599 while remaining > 0 {
600 let take = remaining.min(self.bits_left as usize);
601 let shift = remaining - take;
602 let chunk = (value >> shift) & low_mask(take);
603 let idx = self.data.len() - 1;
604 self.data[idx] |= chunk << (self.bits_left as usize - take);
605 self.bits_left -= take as u8;
606 remaining -= take;
607 if self.bits_left == 0 {
608 self.data.push(0);
609 self.bits_left = 8;
610 }
611 }
612 }
613
614 fn finish(mut self) -> Vec<u8> {
615 if self.bits_left == 8 {
616 self.data.pop();
617 }
618 self.data
619 }
620 }
621
622 fn pack_values(elements: &[Vec<u8>], size: usize, minbits: usize) -> Vec<u8> {
623 let mut writer = BitWriter::new();
624 let dtype_len = size * 8;
625 match native_order() {
626 ORDER_LE => {
627 let begin = size - 1 - (dtype_len - minbits) / 8;
628 for value in elements {
629 for k in (0..=begin).rev() {
630 let bit_count = if k == begin {
631 let remainder = (dtype_len - minbits) % 8;
632 if remainder == 0 {
633 8
634 } else {
635 8 - remainder
636 }
637 } else {
638 8
639 };
640 writer.write_bits(value[k] & low_mask(bit_count), bit_count);
641 }
642 }
643 }
644 ORDER_BE => {
645 let begin = (dtype_len - minbits) / 8;
646 for value in elements {
647 for (k, byte) in value.iter().enumerate().take(size).skip(begin) {
648 let bit_count = if k == begin {
649 let remainder = (dtype_len - minbits) % 8;
650 if remainder == 0 {
651 8
652 } else {
653 8 - remainder
654 }
655 } else {
656 8
657 };
658 writer.write_bits(*byte & low_mask(bit_count), bit_count);
659 }
660 }
661 }
662 _ => unreachable!(),
663 }
664 writer.finish()
665 }
666
667 fn header_with_minval(minbits: u32, minval: u64, payload: &[u8]) -> Vec<u8> {
668 let mut data = vec![0u8; HEADER_SIZE];
669 data[..4].copy_from_slice(&minbits.to_le_bytes());
670 data[4] = 8;
671 data[5..13].copy_from_slice(&minval.to_le_bytes());
672 data.extend_from_slice(payload);
673 data
674 }
675
676 #[test]
677 fn decompresses_unsigned_integer_scaleoffset() {
678 let encoded_values = vec![
679 0u16.to_ne_bytes().to_vec(),
680 1u16.to_ne_bytes().to_vec(),
681 7u16.to_ne_bytes().to_vec(),
682 10u16.to_ne_bytes().to_vec(),
683 ];
684 let packed = pack_values(&encoded_values, 2, 4);
685 let input = header_with_minval(4, 100, &packed);
686 let client_data = vec![
687 SCALE_INT,
688 0,
689 4,
690 CLASS_INTEGER,
691 2,
692 SIGN_UNSIGNED,
693 native_order(),
694 FILL_UNDEFINED,
695 0,
696 0,
697 0,
698 0,
699 0,
700 0,
701 0,
702 0,
703 0,
704 0,
705 0,
706 0,
707 ];
708
709 let decoded = decompress(&input, &client_data).unwrap();
710 let values: Vec<u16> = decoded
711 .chunks_exact(2)
712 .map(|chunk| u16::from_ne_bytes(chunk.try_into().unwrap()))
713 .collect();
714 assert_eq!(values, vec![100, 101, 107, 110]);
715 }
716
717 #[test]
718 fn decompresses_float_dscale_values() {
719 let encoded_values = vec![
720 0i32.to_ne_bytes().to_vec(),
721 25i32.to_ne_bytes().to_vec(),
722 75i32.to_ne_bytes().to_vec(),
723 ];
724 let packed = pack_values(&encoded_values, 4, 7);
725 let min = 1.25f32;
726 let minval = u64::from_ne_bytes({
727 let mut bytes = [0u8; 8];
728 bytes[..4].copy_from_slice(&min.to_ne_bytes());
729 bytes
730 });
731 let input = header_with_minval(7, minval, &packed);
732 let client_data = vec![
733 SCALE_FLOAT_DSCALE,
734 2,
735 3,
736 CLASS_FLOAT,
737 4,
738 SIGN_SIGNED,
739 native_order(),
740 FILL_UNDEFINED,
741 0,
742 0,
743 0,
744 0,
745 0,
746 0,
747 0,
748 0,
749 0,
750 0,
751 0,
752 0,
753 ];
754
755 let decoded = decompress(&input, &client_data).unwrap();
756 let values: Vec<f32> = decoded
757 .chunks_exact(4)
758 .map(|chunk| f32::from_ne_bytes(chunk.try_into().unwrap()))
759 .collect();
760 assert_eq!(values, vec![1.25, 1.5, 2.0]);
761 }
762
763 #[test]
764 fn full_precision_integer_payload_skips_postprocess() {
765 let raw_values = [300u16.to_ne_bytes(), 511u16.to_ne_bytes()];
766 let payload: Vec<u8> = raw_values.iter().flat_map(|v| v.iter().copied()).collect();
767 let input = header_with_minval(16, 700, &payload);
768 let client_data = vec![
769 SCALE_INT,
770 0,
771 2,
772 CLASS_INTEGER,
773 2,
774 SIGN_UNSIGNED,
775 native_order(),
776 FILL_UNDEFINED,
777 0,
778 0,
779 0,
780 0,
781 0,
782 0,
783 0,
784 0,
785 0,
786 0,
787 0,
788 0,
789 ];
790
791 let decoded = decompress(&input, &client_data).unwrap();
792 let values: Vec<u16> = decoded
793 .chunks_exact(2)
794 .map(|chunk| u16::from_ne_bytes(chunk.try_into().unwrap()))
795 .collect();
796 assert_eq!(values, vec![300, 511]);
797 }
798}