1pub trait LasNIR {
4 fn nir(&self) -> u16;
5 fn set_nir(&mut self, new_val: u16);
6}
7
8#[derive(Default, Copy, Clone, PartialOrd, PartialEq, Debug)]
9pub struct Nir(u16);
10
11impl LasNIR for Nir {
12 fn nir(&self) -> u16 {
13 self.0
14 }
15
16 fn set_nir(&mut self, new_val: u16) {
17 self.0 = new_val;
18 }
19}
20
21impl Nir {
22 pub const SIZE: usize = 2;
23}
24
25pub mod v3 {
26 use std::io::{Cursor, Read, Seek};
27
28 use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
29
30 use crate::decoders::ArithmeticDecoder;
31 use crate::encoders::ArithmeticEncoder;
32 use crate::las::selective::DecompressionSelection;
33 use crate::las::utils::copy_bytes_into_decoder;
34 use crate::las::utils::{
35 copy_encoder_content_to, lower_byte, lower_byte_changed, read_and_unpack, upper_byte,
36 upper_byte_changed,
37 };
38 use crate::models::{ArithmeticModel, ArithmeticModelBuilder};
39 use crate::packers::Packable;
40 use crate::record::{LayeredFieldCompressor, LayeredFieldDecompressor};
41
42 struct NirContext {
43 bytes_used_model: ArithmeticModel,
44 lower_byte_diff_model: ArithmeticModel,
45 upper_byte_diff_model: ArithmeticModel,
46 unused: bool,
47 }
48
49 impl Default for NirContext {
50 fn default() -> Self {
51 Self {
52 bytes_used_model: ArithmeticModelBuilder::new(4).build(),
53 lower_byte_diff_model: ArithmeticModelBuilder::new(256).build(),
54 upper_byte_diff_model: ArithmeticModelBuilder::new(256).build(),
55 unused: false,
56 }
57 }
58 }
59
60 pub struct LasNIRDecompressor {
61 decoder: ArithmeticDecoder<Cursor<Vec<u8>>>,
62 is_requested: bool,
63 should_decompress: bool,
64 layer_size: u32,
65 last_context_used: usize,
66 contexts: [NirContext; 4],
68 last_nirs: [u16; 4],
69 }
70
71 impl Default for LasNIRDecompressor {
72 fn default() -> Self {
73 Self {
74 decoder: ArithmeticDecoder::new(Cursor::new(Vec::<u8>::new())),
75 contexts: [
76 NirContext::default(),
77 NirContext::default(),
78 NirContext::default(),
79 NirContext::default(),
80 ],
81 should_decompress: false,
82 layer_size: 0,
83 last_context_used: 0,
84 last_nirs: [0u16; 4],
85 is_requested: true,
86 }
87 }
88 }
89
90 impl<R: Read + Seek> LayeredFieldDecompressor<R> for LasNIRDecompressor {
91 fn size_of_field(&self) -> usize {
92 std::mem::size_of::<u16>()
93 }
94
95 fn set_selection(&mut self, selection: DecompressionSelection) {
96 self.is_requested = selection.should_decompress_nir();
97 }
98
99 fn init_first_point(
100 &mut self,
101 src: &mut R,
102 first_point: &mut [u8],
103 context: &mut usize,
104 ) -> std::io::Result<()> {
105 for ctx in &mut self.contexts {
106 ctx.unused = true;
107 }
108
109 self.last_nirs[*context] = read_and_unpack::<_, u16>(src, first_point)?;
110 self.contexts[*context].unused = false;
111 self.last_context_used = *context;
112 Ok(())
113 }
114
115 fn decompress_field_with(
116 &mut self,
117 current_point: &mut [u8],
118 context: &mut usize,
119 ) -> std::io::Result<()> {
120 let mut last_nir = &mut self.last_nirs[self.last_context_used];
121 if self.last_context_used != *context {
122 self.last_context_used = *context;
123 if self.contexts[*context].unused {
124 self.last_nirs[*context] = *last_nir;
125 self.contexts[*context].unused = false;
126 last_nir = &mut self.last_nirs[*context];
127 }
128 }
129
130 let the_context = &mut self.contexts[self.last_context_used];
131 if self.should_decompress {
132 let mut new_nir: u16;
133 let sym = self
134 .decoder
135 .decode_symbol(&mut the_context.bytes_used_model)?;
136
137 if is_nth_bit_set!(sym, 0) {
138 let diff = self
139 .decoder
140 .decode_symbol(&mut the_context.lower_byte_diff_model)?
141 as u8;
142 new_nir = u16::from(diff.wrapping_add(lower_byte(*last_nir)));
143 } else {
144 new_nir = *last_nir & 0x00FF;
145 }
146
147 if is_nth_bit_set!(sym, 1) {
148 let diff = self
149 .decoder
150 .decode_symbol(&mut the_context.upper_byte_diff_model)?
151 as u8;
152 let upper_byte = u16::from(diff.wrapping_add(upper_byte(*last_nir)));
153 new_nir |= (upper_byte << 8) & 0xFF00;
154 } else {
155 new_nir |= *last_nir & 0xFF00;
156 }
157 *last_nir = new_nir;
158 }
159 last_nir.pack_into(current_point);
160 Ok(())
161 }
162
163 fn read_layers_sizes(&mut self, src: &mut R) -> std::io::Result<()> {
164 self.layer_size = src.read_u32::<LittleEndian>()?;
165 Ok(())
166 }
167
168 fn read_layers(&mut self, src: &mut R) -> std::io::Result<()> {
169 self.should_decompress = copy_bytes_into_decoder(
170 self.is_requested,
171 self.layer_size as usize,
172 &mut self.decoder,
173 src,
174 )?;
175 Ok(())
176 }
177 }
178
179 pub struct LasNIRCompressor {
180 encoder: ArithmeticEncoder<Cursor<Vec<u8>>>,
181 has_nir_changed: bool,
182 last_context_used: usize,
183 contexts: [NirContext; 4],
184 last_nirs: [u16; 4],
185 }
186
187 impl Default for LasNIRCompressor {
188 fn default() -> Self {
189 Self {
190 encoder: ArithmeticEncoder::new(Cursor::new(Vec::<u8>::new())),
191 contexts: [
192 NirContext::default(),
193 NirContext::default(),
194 NirContext::default(),
195 NirContext::default(),
196 ],
197 has_nir_changed: false,
198 last_context_used: 0,
199 last_nirs: [0u16; 4],
200 }
201 }
202 }
203
204 impl<R: std::io::Write> LayeredFieldCompressor<R> for LasNIRCompressor {
205 fn size_of_field(&self) -> usize {
206 std::mem::size_of::<u16>()
207 }
208
209 fn init_first_point(
210 &mut self,
211 dst: &mut R,
212 first_point: &[u8],
213 context: &mut usize,
214 ) -> std::io::Result<()> {
215 for ctx in &mut self.contexts {
216 ctx.unused = true;
217 }
218
219 dst.write_all(first_point)?;
220 self.last_nirs[*context] = u16::unpack_from(first_point);
221 self.contexts[*context].unused = false;
222 self.last_context_used = *context;
223 Ok(())
224 }
225
226 fn compress_field_with(
227 &mut self,
228 current_point: &[u8],
229 context: &mut usize,
230 ) -> std::io::Result<()> {
231 let mut last_nir = &mut self.last_nirs[self.last_context_used];
232 if self.last_context_used != *context {
233 self.last_context_used = *context;
234 if self.contexts[*context].unused {
235 self.last_nirs[*context] = *last_nir;
236 self.contexts[*context].unused = false;
237 last_nir = &mut self.last_nirs[*context];
238 }
239 };
240 let the_context = &mut self.contexts[self.last_context_used];
241
242 let current_nir = u16::unpack_from(current_point);
243 if current_nir != *last_nir {
244 self.has_nir_changed = true;
245 }
246
247 let sym = lower_byte_changed(current_nir, *last_nir) as u8
248 | (upper_byte_changed(current_nir, *last_nir) as u8) << 1;
249 self.encoder
250 .encode_symbol(&mut the_context.bytes_used_model, u32::from(sym))?;
251 if is_nth_bit_set!(sym, 0) {
252 let corr = lower_byte(current_nir).wrapping_sub(lower_byte(*last_nir));
253 self.encoder
254 .encode_symbol(&mut the_context.lower_byte_diff_model, u32::from(corr))?;
255 }
256
257 if is_nth_bit_set!(sym, 1) {
258 let corr = upper_byte(current_nir).wrapping_sub(upper_byte(*last_nir));
259 self.encoder
260 .encode_symbol(&mut the_context.upper_byte_diff_model, u32::from(corr))?;
261 }
262 *last_nir = current_nir;
263 Ok(())
264 }
265
266 fn write_layers_sizes(&mut self, dst: &mut R) -> std::io::Result<()> {
267 let num_bytes = if self.has_nir_changed {
268 self.encoder.done()?;
269 self.encoder.get_mut().get_ref().len() as u32
270 } else {
271 0
272 };
273 dst.write_u32::<LittleEndian>(num_bytes)?;
274
275 Ok(())
276 }
277
278 fn write_layers(&mut self, dst: &mut R) -> std::io::Result<()> {
279 if self.has_nir_changed {
280 copy_encoder_content_to(&mut self.encoder, dst)?;
281 }
282 Ok(())
283 }
284 }
285}