1use std::cmp;
2use std::time::SystemTime;
3
4use crate::ParseHardError;
5use bit::BitIndex;
6
7const BITS_PER_BYTE: usize = 8;
8const MAX_U32_BIT_WIDTH: usize = 32;
9
10pub fn get_sys_time_in_secs() -> u32 {
11 let seconds_since_epoch = SystemTime::now()
12 .duration_since(SystemTime::UNIX_EPOCH)
13 .map(|duration| duration.as_secs())
14 .unwrap_or(0);
15 cmp::min(seconds_since_epoch, u32::MAX as u64) as u32
16}
17
18fn parse_fixed_array<const ARRAY_LENGTH: usize>(
19 slice: &[u8],
20 field_name: &str,
21) -> Result<[u8; ARRAY_LENGTH], ParseHardError> {
22 slice.try_into().map_err(|_| ParseHardError {
23 message: format!(
24 "Expected {ARRAY_LENGTH} bytes for {field_name}, found {} bytes.",
25 slice.len()
26 ),
27 })
28}
29
30fn normalize_position(byte_position: &mut BytePosition) -> Result<(), ParseHardError> {
31 if byte_position.current_bit < BITS_PER_BYTE {
32 return Ok(());
33 }
34
35 let extra_full_bytes = byte_position.current_bit / BITS_PER_BYTE;
36 byte_position.current_byte =
37 byte_position.current_byte.checked_add(extra_full_bytes).ok_or_else(|| ParseHardError {
38 message: "Byte position overflow while normalizing cursor.".to_string(),
39 })?;
40 byte_position.current_bit %= BITS_PER_BYTE;
41 Ok(())
42}
43
44fn absolute_bit_offset(byte_position: &BytePosition) -> Result<usize, ParseHardError> {
45 byte_position
46 .current_byte
47 .checked_mul(BITS_PER_BYTE)
48 .and_then(|offset| offset.checked_add(byte_position.current_bit))
49 .ok_or_else(|| ParseHardError {
50 message: "Bit offset overflow while computing cursor position.".to_string(),
51 })
52}
53
54fn validate_u32_bit_width(bits_count: usize, context: &str) -> Result<(), ParseHardError> {
55 if bits_count > MAX_U32_BIT_WIDTH {
56 return Err(ParseHardError {
57 message: format!(
58 "{context} supports at most {MAX_U32_BIT_WIDTH} bits, received {bits_count}."
59 ),
60 });
61 }
62 Ok(())
63}
64
65fn advance_bits(byte_position: &mut BytePosition, bits_count: usize) -> Result<(), ParseHardError> {
66 let combined_bits = byte_position.current_bit.checked_add(bits_count).ok_or_else(|| {
67 ParseHardError { message: "Bit offset overflow while advancing cursor.".to_string() }
68 })?;
69
70 let full_bytes = combined_bits / BITS_PER_BYTE;
71 byte_position.current_byte =
72 byte_position.current_byte.checked_add(full_bytes).ok_or_else(|| ParseHardError {
73 message: "Byte offset overflow while advancing cursor.".to_string(),
74 })?;
75 byte_position.current_bit = combined_bits % BITS_PER_BYTE;
76 Ok(())
77}
78
79fn ensure_bits_available(
80 byte_slice: &[u8],
81 byte_position: &BytePosition,
82 bits_needed: usize,
83 context: &str,
84) -> Result<(), ParseHardError> {
85 let total_bits_available =
86 byte_slice.len().checked_mul(BITS_PER_BYTE).ok_or_else(|| ParseHardError {
87 message: "Bit capacity overflow while checking available input bits.".to_string(),
88 })?;
89 let current_bit_offset = absolute_bit_offset(byte_position)?;
90 if current_bit_offset > total_bits_available {
91 return Err(ParseHardError {
92 message: format!(
93 "{context} cursor is out of bounds: bit offset {current_bit_offset}, total bits {total_bits_available}."
94 ),
95 });
96 }
97 let remaining_bits = total_bits_available - current_bit_offset;
98 if bits_needed > remaining_bits {
99 return Err(ParseHardError {
100 message: format!(
101 "{context} needs {bits_needed} bits but only {remaining_bits} remain from bit offset {current_bit_offset}."
102 ),
103 });
104 }
105 Ok(())
106}
107
108fn ensure_writable_byte(
109 byte_vector: &mut Vec<u8>,
110 byte_index: usize,
111) -> Result<(), ParseHardError> {
112 if byte_index < byte_vector.len() {
113 return Ok(());
114 }
115
116 let required_length = byte_index.checked_add(1).ok_or_else(|| ParseHardError {
117 message: "Byte vector length overflow while expanding output buffer.".to_string(),
118 })?;
119 byte_vector.resize(required_length, 0);
120 Ok(())
121}
122
123pub fn u32_from(slice: &[u8], name: &'static str) -> Result<u32, ParseHardError> {
124 let parsed_bytes = parse_fixed_array::<4>(slice, name)?;
125 Ok(u32::from_le_bytes(parsed_bytes))
126}
127
128pub fn u16_from(slice: &[u8], name: &'static str) -> Result<u16, ParseHardError> {
129 let parsed_bytes = parse_fixed_array::<2>(slice, name)?;
130 Ok(u16::from_le_bytes(parsed_bytes))
131}
132
133pub fn u8_from(slice: &[u8], name: &'static str) -> Result<u8, ParseHardError> {
134 slice.first().copied().ok_or_else(|| ParseHardError {
135 message: format!("Expected 1 byte for {name}, found 0 bytes."),
136 })
137}
138
139#[derive(Default, PartialEq, Eq, Debug)]
140pub struct BytePosition {
141 pub current_byte: usize,
142 pub current_bit: usize,
143}
144
145impl BytePosition {
146 pub fn next_byte_offset(&self) -> usize {
148 self.current_byte + usize::from(self.current_bit > 0)
149 }
150}
151
152pub fn write_byte(
154 byte_vector: &mut Vec<u8>,
155 byte_position: &mut BytePosition,
156 bits_source: u8,
157 bits_count: usize,
158) -> Result<(), ParseHardError> {
159 if bits_count > BITS_PER_BYTE {
160 return Err(ParseHardError {
161 message: format!(
162 "write_byte supports at most {BITS_PER_BYTE} bits, received {bits_count}."
163 ),
164 });
165 }
166 if bits_count == 0 {
167 return Ok(());
168 }
169 if bits_count < BITS_PER_BYTE && (bits_source >> bits_count) != 0 {
170 return Err(ParseHardError {
171 message: format!(
172 "write_byte source {bits_source:#010b} does not fit in {bits_count} bits."
173 ),
174 });
175 }
176
177 normalize_position(byte_position)?;
178 let mut bits_left_to_write: usize = bits_count;
179 let mut source_bit_index = 0;
180 loop {
181 if bits_left_to_write == 0 {
182 return Ok(());
183 }
184
185 normalize_position(byte_position)?;
186 ensure_writable_byte(byte_vector, byte_position.current_byte)?;
187
188 let bits_can_write_in_byte =
189 cmp::min(bits_left_to_write, BITS_PER_BYTE - byte_position.current_bit);
190 let bits_from_source =
191 bits_source.bit_range(source_bit_index..(source_bit_index + bits_can_write_in_byte));
192
193 if bits_can_write_in_byte == BITS_PER_BYTE && byte_position.current_bit == 0 {
194 byte_vector[byte_position.current_byte] = bits_from_source;
195 } else {
196 byte_vector[byte_position.current_byte].set_bit_range(
197 byte_position.current_bit..(byte_position.current_bit + bits_can_write_in_byte),
198 bits_from_source,
199 );
200 }
201 source_bit_index += bits_can_write_in_byte;
202 advance_bits(byte_position, bits_can_write_in_byte)?;
203 bits_left_to_write -= bits_can_write_in_byte;
204 }
205}
206
207pub fn write_bits<T: Into<u32>>(
209 byte_vector: &mut Vec<u8>,
210 byte_position: &mut BytePosition,
211 bits_source: T,
212 bits_count: usize,
213) -> Result<(), ParseHardError> {
214 validate_u32_bit_width(bits_count, "write_bits")?;
215 if bits_count == 0 {
216 return Ok(());
217 }
218
219 let source_value = bits_source.into();
220 if bits_count < MAX_U32_BIT_WIDTH && (source_value >> bits_count) != 0 {
221 return Err(ParseHardError {
222 message: format!("Value {source_value} does not fit in {bits_count} bits."),
223 });
224 }
225
226 let mut bits_left_to_write: usize = bits_count;
227 let mut bits_written = 0;
228 loop {
229 if bits_left_to_write == 0 {
230 return Ok(());
231 }
232
233 let bits_can_write = cmp::min(bits_left_to_write, BITS_PER_BYTE);
234 let source_byte = ((source_value >> bits_written) & 0xFF) as u8;
235 write_byte(byte_vector, byte_position, source_byte, bits_can_write)?;
236 bits_left_to_write -= bits_can_write;
237 bits_written += bits_can_write;
238 }
239}
240
241pub fn read_bits(
245 byte_slice: &[u8],
246 byte_position: &mut BytePosition,
247 bits_to_read: usize,
248) -> Result<u32, ParseHardError> {
249 validate_u32_bit_width(bits_to_read, "read_bits")?;
250 if bits_to_read == 0 {
251 return Ok(0);
252 }
253
254 normalize_position(byte_position)?;
255 ensure_bits_available(byte_slice, byte_position, bits_to_read, "read_bits")?;
256
257 let mut bits_left_to_read: usize = bits_to_read;
258 let mut buffer: u32 = 0;
259 let mut buffer_bit_position: usize = 0;
260 loop {
261 if bits_left_to_read == 0 {
262 return Ok(buffer);
263 }
264
265 let bits_parsing_count =
266 cmp::min(BITS_PER_BYTE - byte_position.current_bit, bits_left_to_read);
267 let bits_parsed: u8 = byte_slice[byte_position.current_byte]
268 .bit_range(byte_position.current_bit..(byte_position.current_bit + bits_parsing_count));
269
270 buffer.set_bit_range(
271 buffer_bit_position..(buffer_bit_position + bits_parsing_count),
272 u32::from_le_bytes([bits_parsed, 0x00, 0x00, 0x00]),
273 );
274 buffer_bit_position += bits_parsing_count;
275 bits_left_to_read -= bits_parsing_count;
276 advance_bits(byte_position, bits_parsing_count)?;
277 }
278}
279
280#[cfg(test)]
281mod tests {
282 use super::BytePosition;
283
284 #[test]
285 fn next_byte_offset_keeps_aligned_position() {
286 let position = BytePosition { current_byte: 9, current_bit: 0 };
287
288 assert_eq!(position.next_byte_offset(), 9);
289 }
290
291 #[test]
292 fn next_byte_offset_advances_unaligned_position() {
293 let position = BytePosition { current_byte: 9, current_bit: 3 };
294
295 assert_eq!(position.next_byte_offset(), 10);
296 }
297}