bits_io/buf/
bit_buf_impls.rs

1use bitvec::view::BitView;
2
3use crate::prelude::*;
4
5impl<T: BitBuf + ?Sized> BitBuf for &mut T {
6    fn advance_bits(&mut self, count: usize) {
7        (**self).advance_bits(count);
8    }
9
10    fn remaining_bits(&self) -> usize {
11        (**self).remaining_bits()
12    }
13
14    fn chunk_bits(&self) -> &BitSlice {
15        (**self).chunk_bits()
16    }
17
18    fn chunk_bytes(&self) -> &[u8] {
19        (**self).chunk_bytes()
20    }
21
22    fn byte_aligned(&self) -> bool {
23        (**self).byte_aligned()
24    }
25}
26
27impl BitBuf for Bits {
28    fn advance_bits(&mut self, count: usize) {
29        assert!(count <= self.remaining_bits(), "advance past end of Bits");
30        self.inc_start_bits(count);
31    }
32
33    fn remaining_bits(&self) -> usize {
34        self.bit_len
35    }
36
37    fn chunk_bits(&self) -> &BitSlice {
38        &BitSlice::from_slice(&self.inner)[self.bit_start..self.bit_start + self.bit_len]
39    }
40
41    fn chunk_bytes(&self) -> &[u8] {
42        assert!(self.bit_start % 8 == 0);
43        assert!(self.bit_len % 8 == 0);
44
45        let byte_start = self.bit_start / 8;
46        let size_bytes = self.bit_len / 8;
47
48        &self.inner[byte_start..byte_start + size_bytes]
49    }
50
51    fn byte_aligned(&self) -> bool {
52        self.bit_start % 8 == 0 && self.bit_len % 8 == 0
53    }
54}
55
56impl BitBuf for BitsMut {
57    fn advance_bits(&mut self, count: usize) {
58        assert!(
59            count <= self.remaining_bits(),
60            "advance past end of BitsMut"
61        );
62        self.bit_start += count;
63        self.bit_len -= count;
64        self.capacity -= count;
65    }
66
67    fn remaining_bits(&self) -> usize {
68        self.len_bits()
69    }
70
71    fn chunk_bits(&self) -> &BitSlice {
72        &BitSlice::from_slice(&self.inner)[self.bit_start..self.bit_start + self.bit_len]
73    }
74
75    fn chunk_bytes(&self) -> &[u8] {
76        assert!(self.byte_aligned());
77
78        let byte_start = self.bit_start / 8;
79
80        &self.inner[byte_start..]
81    }
82
83    fn byte_aligned(&self) -> bool {
84        self.bit_start % 8 == 0 && self.bit_len % 8 == 0
85    }
86}
87
88impl BitBuf for &[u8] {
89    fn advance_bits(&mut self, count: usize) {
90        if self.len() < count {
91            panic!("Can't advance past the end of slice");
92        }
93        *self = &self[count..];
94    }
95
96    fn remaining_bits(&self) -> usize {
97        self.len() * 8
98    }
99
100    fn chunk_bits(&self) -> &BitSlice {
101        self[..].view_bits()
102    }
103
104    fn chunk_bytes(&self) -> &[u8] {
105        self
106    }
107
108    fn byte_aligned(&self) -> bool {
109        true
110    }
111}
112
113// TODO: I think we're gonna get bit by not supporting BitSlice<O> here, but come back to that
114// later--hopefully we don't need a generic on the trait
115// impl BitBuf for &BitSlice {
116impl BitBuf for &BitSlice {
117    fn advance_bits(&mut self, count: usize) {
118        if self.len() < count {
119            panic!("Can't advance past end of BitSlice");
120        }
121        *self = &self[count..];
122    }
123
124    fn remaining_bits(&self) -> usize {
125        self.len()
126    }
127
128    fn chunk_bits(&self) -> &BitSlice {
129        self
130    }
131
132    fn chunk_bytes(&self) -> &[u8] {
133        assert!(self.byte_aligned());
134        let bitvec::domain::Domain::Region { body, .. } = self.domain() else {
135            unreachable!("Verified by the assert above");
136        };
137
138        body
139    }
140
141    fn byte_aligned(&self) -> bool {
142        matches!(
143            self.domain(),
144            bitvec::domain::Domain::Region {
145                head: None,
146                tail: None,
147                ..
148            }
149        )
150    }
151}
152
153impl BitBuf for BitCursor<&[u8]> {
154    fn advance_bits(&mut self, count: usize) {
155        let byte_len = self.get_ref().len();
156        let bit_len = byte_len * 8;
157        let bit_pos = self.position();
158
159        let max_count = bit_len.saturating_sub(bit_pos as usize);
160        if count > max_count {
161            panic!("Can't advance beyond end of buffer");
162        }
163        self.set_position(bit_pos + count as u64);
164    }
165
166    fn remaining_bits(&self) -> usize {
167        let byte_len = self.get_ref().len();
168        let bit_len = byte_len * 8;
169
170        bit_len.saturating_sub(self.position() as usize)
171    }
172
173    fn chunk_bits(&self) -> &BitSlice {
174        let slice = self.get_ref().view_bits();
175        let start = slice.len().min(self.position() as usize);
176        &slice[start..]
177    }
178
179    fn chunk_bytes(&self) -> &[u8] {
180        assert!(self.byte_aligned());
181        let byte_start_position = (self.position() / 8) as usize;
182
183        &self.get_ref()[byte_start_position..]
184    }
185
186    fn byte_aligned(&self) -> bool {
187        // We know the underlying storage (a &[u8]) is inherently byte-aligned, so we just need to
188        // make sure the position is also byte-aligned
189        self.position() % 8 == 0
190    }
191}
192
193impl BitBuf for BitCursor<&BitSlice> {
194    fn advance_bits(&mut self, count: usize) {
195        let len = self.get_ref().len();
196        let pos = self.position();
197
198        let max_count = len.saturating_sub(pos as usize);
199        if count > max_count {
200            panic!("Can't advance beyond end of buffer");
201        }
202        self.set_position(pos + count as u64);
203    }
204
205    fn remaining_bits(&self) -> usize {
206        self.get_ref()
207            .len()
208            .saturating_sub(self.position() as usize)
209    }
210
211    fn chunk_bits(&self) -> &BitSlice {
212        let slice = self.get_ref();
213        let start = slice.len().min(self.position() as usize);
214        &slice[start..]
215    }
216
217    fn chunk_bytes(&self) -> &[u8] {
218        assert!(self.byte_aligned());
219        let bitslice = self.get_ref();
220        let bitvec::domain::Domain::Region { body, .. } = bitslice.domain() else {
221            unreachable!("Verified by the assert above");
222        };
223        // TODO: if/when 'byte_aligned' changes to account for position and underlying storage
224        // alinging on a boundary we'll need to change the way we calculate the starting byte
225        // position into 'body'
226        let start_byte_position = (self.position() / 8) as usize;
227
228        &body[start_byte_position..]
229    }
230
231    fn byte_aligned(&self) -> bool {
232        // Need to ensure that both the underlying storage is byte-aligned and the position is on a
233        // byte boundary.
234        // TODO: technically these two could 'cancel eachother out': the position may be on a
235        // byte-boundary on the underlying storage.  Should handle that case here as well.
236        matches!(
237            self.get_ref().domain(),
238            bitvec::domain::Domain::Region {
239                head: None,
240                tail: None,
241                ..
242            }
243        ) && self.position() % 8 == 0
244    }
245}
246
247#[cfg(test)]
248mod tests {
249    use super::*;
250
251    // TODO: write a set of common tests that take B: BitBuf and then run them with different
252    // types that impl BitBuf.
253
254    #[test]
255    fn test_byte_aligned() {
256        // Exactly one byte worth of bits
257        let bits = bits![0; 8];
258        assert!(bits.byte_aligned());
259        // Bits within one byte but not the entire byte shouldn't be considered byte-aligned
260        let bits = bits![1, 1, 1];
261        assert!(!bits.byte_aligned());
262        // 2 bytes worth of bits should be considered byte-aligned
263        let bits = bits![0; 16];
264        assert!(bits.byte_aligned());
265        // 1 byte's worth but not at the start shouldn't be considered byte-aligned
266        let bits = bits![0; 9];
267        let slice = &bits[1..];
268        assert_eq!(8, slice.len());
269        assert!(!slice.byte_aligned());
270    }
271
272    #[test]
273    fn test_bit_buf_bits_advance() {
274        let mut bits = Bits::copy_from_bit_slice(bits![1, 1, 1, 1, 0, 0, 0, 0]);
275
276        bits.advance_bits(4);
277        assert_eq!(bits.len_bits(), 4);
278        assert_eq!(bits.chunk_bits(), bits![0, 0, 0, 0]);
279    }
280
281    #[test]
282    fn test_bit_buf_bits_mut_advance() {
283        let mut bits_mut = BitsMut::zeroed_bits(16);
284        bits_mut.advance_bits(8);
285        assert_eq!(bits_mut.len_bits(), 8);
286    }
287
288    #[test]
289    fn test_bits_copy_to_slice() {
290        let mut bits = Bits::copy_from_bit_slice(bits![1, 1, 1, 1, 0, 0, 0, 0]);
291
292        let dest = bits![mut 0; 4];
293        bits.copy_to_bit_slice(dest);
294        assert_eq!(dest, bits![1, 1, 1, 1,]);
295
296        bits.copy_to_bit_slice(dest);
297        assert_eq!(dest, bits![0, 0, 0, 0]);
298    }
299
300    #[test]
301    fn test_chunk_bytes() {
302        {
303            let bits = Bits::copy_from_bit_slice(bits![1, 1, 1, 1, 0, 0, 0, 0]);
304
305            let chunk_bytes = bits.chunk_bytes();
306            assert_eq!(chunk_bytes.len(), 1);
307            assert_eq!(chunk_bytes[0], 0b11110000);
308        }
309        {
310            let mut bits = Bits::copy_from_bit_slice(bits![
311                0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0
312            ]);
313            bits.advance_bits(8);
314            let chunk_bytes = bits.chunk_bytes();
315            assert_eq!(chunk_bytes.len(), 2);
316            assert_eq!(chunk_bytes, [0b11111111, 0b10101010]);
317        }
318    }
319
320    #[test]
321    fn test_chunk_after_split() {
322        // Make sure that a call to chunk after some kind of split respects the new limit
323        let mut bits = Bits::from_static_bytes(&[1, 2, 3, 4, 5]);
324
325        let start = bits.split_to_bytes(2);
326        let start_chunk = start.chunk_bytes();
327        assert_eq!(start_chunk.len(), 2);
328    }
329
330    #[test]
331    fn test_copy_to_slice_bytes() {
332        let mut dest = [0; 4];
333
334        let mut bits = Bits::from_owner_bytes([42, 43, 44, 45]);
335
336        bits.copy_to_slice_bytes(&mut dest);
337        assert_eq!(dest, [42, 43, 44, 45]);
338    }
339
340    #[test]
341    fn test_bitslice_bitbuf() {
342        let mut bits = bits![1, 0, 1, 0, 1, 0];
343        assert_eq!(6, bits.remaining_bits());
344        bits.advance_bits(3);
345        assert_eq!(3, bits.remaining_bits());
346    }
347
348    #[test]
349    fn test_take() {
350        let mut bits = Bits::from_static_bytes(&[1, 2, 3, 4]);
351
352        let mut head = (&mut bits).take_bits(16);
353        let value = head.get_u16::<NetworkOrder>().unwrap();
354        assert!(head.get_bool().is_err());
355        assert_eq!(value, 0x0102);
356        let mut tail = (&mut bits).take_bits(16);
357        let value = tail.get_u16::<NetworkOrder>().unwrap();
358        assert!(tail.get_bool().is_err());
359        assert_eq!(value, 0x0304);
360    }
361
362    #[test]
363    fn test_cursor_bit_slice() {
364        #[rustfmt::skip]
365        let bits = bits![
366            1, 0, 1, 0, 1, 0, 1, 0,
367            1, 1, 1, 1, 0, 0, 0, 0,
368            0, 0, 0, 0, 1, 1, 1, 1,
369        ];
370        let mut cursor = BitCursor::new(bits);
371
372        cursor.get_u1().unwrap();
373        assert!(!cursor.byte_aligned());
374        cursor.get_u7().unwrap();
375        assert!(cursor.byte_aligned());
376        let chunk = cursor.chunk_bytes();
377        assert_eq!(chunk, &[0b11110000, 0b00001111]);
378    }
379
380    #[test]
381    fn test_cursor_u8_slice() {
382        let data = &[0b11110000u8, 2, 3, 4][..];
383        let mut cursor = BitCursor::new(data);
384
385        cursor.get_u4().unwrap();
386        assert!(!cursor.byte_aligned());
387        cursor.get_u4().unwrap();
388        assert!(cursor.byte_aligned());
389
390        let chunk = cursor.chunk_bytes();
391        assert_eq!(chunk, &[2, 3, 4]);
392    }
393}