hkalbasi_rustc_ap_rustc_serialize/
opaque.rs

1use crate::leb128;
2use crate::serialize::{Decodable, Decoder, Encodable, Encoder};
3use std::fs::File;
4use std::io::{self, Write};
5use std::marker::PhantomData;
6use std::mem::MaybeUninit;
7use std::ops::Range;
8use std::path::Path;
9use std::ptr;
10
11// -----------------------------------------------------------------------------
12// Encoder
13// -----------------------------------------------------------------------------
14
15pub type FileEncodeResult = Result<usize, io::Error>;
16
17/// The size of the buffer in `FileEncoder`.
18const BUF_SIZE: usize = 8192;
19
20/// `FileEncoder` encodes data to file via fixed-size buffer.
21///
22/// There used to be a `MemEncoder` type that encoded all the data into a
23/// `Vec`. `FileEncoder` is better because its memory use is determined by the
24/// size of the buffer, rather than the full length of the encoded data, and
25/// because it doesn't need to reallocate memory along the way.
26pub struct FileEncoder {
27    /// The input buffer. For adequate performance, we need more control over
28    /// buffering than `BufWriter` offers. If `BufWriter` ever offers a raw
29    /// buffer access API, we can use it, and remove `buf` and `buffered`.
30    buf: Box<[MaybeUninit<u8>]>,
31    buffered: usize,
32    flushed: usize,
33    file: File,
34    // This is used to implement delayed error handling, as described in the
35    // comment on `trait Encoder`.
36    res: Result<(), io::Error>,
37}
38
39impl FileEncoder {
40    pub fn new<P: AsRef<Path>>(path: P) -> io::Result<Self> {
41        // Create the file for reading and writing, because some encoders do both
42        // (e.g. the metadata encoder when -Zmeta-stats is enabled)
43        let file = File::options().read(true).write(true).create(true).truncate(true).open(path)?;
44
45        Ok(FileEncoder {
46            buf: Box::new_uninit_slice(BUF_SIZE),
47            buffered: 0,
48            flushed: 0,
49            file,
50            res: Ok(()),
51        })
52    }
53
54    #[inline]
55    pub fn position(&self) -> usize {
56        // Tracking position this way instead of having a `self.position` field
57        // means that we don't have to update the position on every write call.
58        self.flushed + self.buffered
59    }
60
61    pub fn flush(&mut self) {
62        // This is basically a copy of `BufWriter::flush`. If `BufWriter` ever
63        // offers a raw buffer access API, we can use it, and remove this.
64
65        /// Helper struct to ensure the buffer is updated after all the writes
66        /// are complete. It tracks the number of written bytes and drains them
67        /// all from the front of the buffer when dropped.
68        struct BufGuard<'a> {
69            buffer: &'a mut [u8],
70            encoder_buffered: &'a mut usize,
71            encoder_flushed: &'a mut usize,
72            flushed: usize,
73        }
74
75        impl<'a> BufGuard<'a> {
76            fn new(
77                buffer: &'a mut [u8],
78                encoder_buffered: &'a mut usize,
79                encoder_flushed: &'a mut usize,
80            ) -> Self {
81                assert_eq!(buffer.len(), *encoder_buffered);
82                Self { buffer, encoder_buffered, encoder_flushed, flushed: 0 }
83            }
84
85            /// The unwritten part of the buffer
86            fn remaining(&self) -> &[u8] {
87                &self.buffer[self.flushed..]
88            }
89
90            /// Flag some bytes as removed from the front of the buffer
91            fn consume(&mut self, amt: usize) {
92                self.flushed += amt;
93            }
94
95            /// true if all of the bytes have been written
96            fn done(&self) -> bool {
97                self.flushed >= *self.encoder_buffered
98            }
99        }
100
101        impl Drop for BufGuard<'_> {
102            fn drop(&mut self) {
103                if self.flushed > 0 {
104                    if self.done() {
105                        *self.encoder_flushed += *self.encoder_buffered;
106                        *self.encoder_buffered = 0;
107                    } else {
108                        self.buffer.copy_within(self.flushed.., 0);
109                        *self.encoder_flushed += self.flushed;
110                        *self.encoder_buffered -= self.flushed;
111                    }
112                }
113            }
114        }
115
116        // If we've already had an error, do nothing. It'll get reported after
117        // `finish` is called.
118        if self.res.is_err() {
119            return;
120        }
121
122        let mut guard = BufGuard::new(
123            unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[..self.buffered]) },
124            &mut self.buffered,
125            &mut self.flushed,
126        );
127
128        while !guard.done() {
129            match self.file.write(guard.remaining()) {
130                Ok(0) => {
131                    self.res = Err(io::Error::new(
132                        io::ErrorKind::WriteZero,
133                        "failed to write the buffered data",
134                    ));
135                    return;
136                }
137                Ok(n) => guard.consume(n),
138                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
139                Err(e) => {
140                    self.res = Err(e);
141                    return;
142                }
143            }
144        }
145    }
146
147    pub fn file(&self) -> &File {
148        &self.file
149    }
150
151    #[inline]
152    fn write_one(&mut self, value: u8) {
153        let mut buffered = self.buffered;
154
155        if std::intrinsics::unlikely(buffered + 1 > BUF_SIZE) {
156            self.flush();
157            buffered = 0;
158        }
159
160        // SAFETY: The above check and `flush` ensures that there is enough
161        // room to write the input to the buffer.
162        unsafe {
163            *MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered) = value;
164        }
165
166        self.buffered = buffered + 1;
167    }
168
169    #[inline]
170    fn write_all(&mut self, buf: &[u8]) {
171        let buf_len = buf.len();
172
173        if std::intrinsics::likely(buf_len <= BUF_SIZE) {
174            let mut buffered = self.buffered;
175
176            if std::intrinsics::unlikely(buffered + buf_len > BUF_SIZE) {
177                self.flush();
178                buffered = 0;
179            }
180
181            // SAFETY: The above check and `flush` ensures that there is enough
182            // room to write the input to the buffer.
183            unsafe {
184                let src = buf.as_ptr();
185                let dst = MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered);
186                ptr::copy_nonoverlapping(src, dst, buf_len);
187            }
188
189            self.buffered = buffered + buf_len;
190        } else {
191            self.write_all_unbuffered(buf);
192        }
193    }
194
195    fn write_all_unbuffered(&mut self, mut buf: &[u8]) {
196        // If we've already had an error, do nothing. It'll get reported after
197        // `finish` is called.
198        if self.res.is_err() {
199            return;
200        }
201
202        if self.buffered > 0 {
203            self.flush();
204        }
205
206        // This is basically a copy of `Write::write_all` but also updates our
207        // `self.flushed`. It's necessary because `Write::write_all` does not
208        // return the number of bytes written when an error is encountered, and
209        // without that, we cannot accurately update `self.flushed` on error.
210        while !buf.is_empty() {
211            match self.file.write(buf) {
212                Ok(0) => {
213                    self.res = Err(io::Error::new(
214                        io::ErrorKind::WriteZero,
215                        "failed to write whole buffer",
216                    ));
217                    return;
218                }
219                Ok(n) => {
220                    buf = &buf[n..];
221                    self.flushed += n;
222                }
223                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
224                Err(e) => {
225                    self.res = Err(e);
226                    return;
227                }
228            }
229        }
230    }
231
232    pub fn finish(mut self) -> Result<usize, io::Error> {
233        self.flush();
234
235        let res = std::mem::replace(&mut self.res, Ok(()));
236        res.map(|()| self.position())
237    }
238}
239
240impl Drop for FileEncoder {
241    fn drop(&mut self) {
242        // Likely to be a no-op, because `finish` should have been called and
243        // it also flushes. But do it just in case.
244        let _result = self.flush();
245    }
246}
247
248macro_rules! write_leb128 {
249    ($this_fn:ident, $int_ty:ty, $write_leb_fn:ident) => {
250        #[inline]
251        fn $this_fn(&mut self, v: $int_ty) {
252            const MAX_ENCODED_LEN: usize = $crate::leb128::max_leb128_len::<$int_ty>();
253
254            let mut buffered = self.buffered;
255
256            // This can't overflow because BUF_SIZE and MAX_ENCODED_LEN are both
257            // quite small.
258            if std::intrinsics::unlikely(buffered + MAX_ENCODED_LEN > BUF_SIZE) {
259                self.flush();
260                buffered = 0;
261            }
262
263            // SAFETY: The above check and flush ensures that there is enough
264            // room to write the encoded value to the buffer.
265            let buf = unsafe {
266                &mut *(self.buf.as_mut_ptr().add(buffered)
267                    as *mut [MaybeUninit<u8>; MAX_ENCODED_LEN])
268            };
269
270            let encoded = leb128::$write_leb_fn(buf, v);
271            self.buffered = buffered + encoded.len();
272        }
273    };
274}
275
276impl Encoder for FileEncoder {
277    write_leb128!(emit_usize, usize, write_usize_leb128);
278    write_leb128!(emit_u128, u128, write_u128_leb128);
279    write_leb128!(emit_u64, u64, write_u64_leb128);
280    write_leb128!(emit_u32, u32, write_u32_leb128);
281
282    #[inline]
283    fn emit_u16(&mut self, v: u16) {
284        self.write_all(&v.to_le_bytes());
285    }
286
287    #[inline]
288    fn emit_u8(&mut self, v: u8) {
289        self.write_one(v);
290    }
291
292    write_leb128!(emit_isize, isize, write_isize_leb128);
293    write_leb128!(emit_i128, i128, write_i128_leb128);
294    write_leb128!(emit_i64, i64, write_i64_leb128);
295    write_leb128!(emit_i32, i32, write_i32_leb128);
296
297    #[inline]
298    fn emit_i16(&mut self, v: i16) {
299        self.write_all(&v.to_le_bytes());
300    }
301
302    #[inline]
303    fn emit_raw_bytes(&mut self, s: &[u8]) {
304        self.write_all(s);
305    }
306}
307
308// -----------------------------------------------------------------------------
309// Decoder
310// -----------------------------------------------------------------------------
311
312// Conceptually, `MemDecoder` wraps a `&[u8]` with a cursor into it that is always valid.
313// This is implemented with three pointers, two which represent the original slice and a
314// third that is our cursor.
315// It is an invariant of this type that start <= current <= end.
316// Additionally, the implementation of this type never modifies start and end.
317pub struct MemDecoder<'a> {
318    start: *const u8,
319    current: *const u8,
320    end: *const u8,
321    _marker: PhantomData<&'a u8>,
322}
323
324impl<'a> MemDecoder<'a> {
325    #[inline]
326    pub fn new(data: &'a [u8], position: usize) -> MemDecoder<'a> {
327        let Range { start, end } = data.as_ptr_range();
328        MemDecoder { start, current: data[position..].as_ptr(), end, _marker: PhantomData }
329    }
330
331    #[inline]
332    pub fn data(&self) -> &'a [u8] {
333        // SAFETY: This recovers the original slice, only using members we never modify.
334        unsafe { std::slice::from_raw_parts(self.start, self.len()) }
335    }
336
337    #[inline]
338    pub fn len(&self) -> usize {
339        // SAFETY: This recovers the length of the original slice, only using members we never modify.
340        unsafe { self.end.sub_ptr(self.start) }
341    }
342
343    #[inline]
344    pub fn remaining(&self) -> usize {
345        // SAFETY: This type guarantees current <= end.
346        unsafe { self.end.sub_ptr(self.current) }
347    }
348
349    #[cold]
350    #[inline(never)]
351    fn decoder_exhausted() -> ! {
352        panic!("MemDecoder exhausted")
353    }
354
355    #[inline]
356    fn read_array<const N: usize>(&mut self) -> [u8; N] {
357        self.read_raw_bytes(N).try_into().unwrap()
358    }
359
360    /// While we could manually expose manipulation of the decoder position,
361    /// all current users of that method would need to reset the position later,
362    /// incurring the bounds check of set_position twice.
363    #[inline]
364    pub fn with_position<F, T>(&mut self, pos: usize, func: F) -> T
365    where
366        F: Fn(&mut MemDecoder<'a>) -> T,
367    {
368        struct SetOnDrop<'a, 'guarded> {
369            decoder: &'guarded mut MemDecoder<'a>,
370            current: *const u8,
371        }
372        impl Drop for SetOnDrop<'_, '_> {
373            fn drop(&mut self) {
374                self.decoder.current = self.current;
375            }
376        }
377
378        if pos >= self.len() {
379            Self::decoder_exhausted();
380        }
381        let previous = self.current;
382        // SAFETY: We just checked if this add is in-bounds above.
383        unsafe {
384            self.current = self.start.add(pos);
385        }
386        let guard = SetOnDrop { current: previous, decoder: self };
387        func(guard.decoder)
388    }
389}
390
391macro_rules! read_leb128 {
392    ($this_fn:ident, $int_ty:ty, $read_leb_fn:ident) => {
393        #[inline]
394        fn $this_fn(&mut self) -> $int_ty {
395            leb128::$read_leb_fn(self)
396        }
397    };
398}
399
400impl<'a> Decoder for MemDecoder<'a> {
401    read_leb128!(read_usize, usize, read_usize_leb128);
402    read_leb128!(read_u128, u128, read_u128_leb128);
403    read_leb128!(read_u64, u64, read_u64_leb128);
404    read_leb128!(read_u32, u32, read_u32_leb128);
405
406    #[inline]
407    fn read_u16(&mut self) -> u16 {
408        u16::from_le_bytes(self.read_array())
409    }
410
411    #[inline]
412    fn read_u8(&mut self) -> u8 {
413        if self.current == self.end {
414            Self::decoder_exhausted();
415        }
416        // SAFETY: This type guarantees current <= end, and we just checked current == end.
417        unsafe {
418            let byte = *self.current;
419            self.current = self.current.add(1);
420            byte
421        }
422    }
423
424    read_leb128!(read_isize, isize, read_isize_leb128);
425    read_leb128!(read_i128, i128, read_i128_leb128);
426    read_leb128!(read_i64, i64, read_i64_leb128);
427    read_leb128!(read_i32, i32, read_i32_leb128);
428
429    #[inline]
430    fn read_i16(&mut self) -> i16 {
431        i16::from_le_bytes(self.read_array())
432    }
433
434    #[inline]
435    fn read_raw_bytes(&mut self, bytes: usize) -> &'a [u8] {
436        if bytes > self.remaining() {
437            Self::decoder_exhausted();
438        }
439        // SAFETY: We just checked if this range is in-bounds above.
440        unsafe {
441            let slice = std::slice::from_raw_parts(self.current, bytes);
442            self.current = self.current.add(bytes);
443            slice
444        }
445    }
446
447    #[inline]
448    fn peek_byte(&self) -> u8 {
449        if self.current == self.end {
450            Self::decoder_exhausted();
451        }
452        // SAFETY: This type guarantees current is inbounds or one-past-the-end, which is end.
453        // Since we just checked current == end, the current pointer must be inbounds.
454        unsafe { *self.current }
455    }
456
457    #[inline]
458    fn position(&self) -> usize {
459        // SAFETY: This type guarantees start <= current
460        unsafe { self.current.sub_ptr(self.start) }
461    }
462}
463
464// Specializations for contiguous byte sequences follow. The default implementations for slices
465// encode and decode each element individually. This isn't necessary for `u8` slices when using
466// opaque encoders and decoders, because each `u8` is unchanged by encoding and decoding.
467// Therefore, we can use more efficient implementations that process the entire sequence at once.
468
469// Specialize encoding byte slices. This specialization also applies to encoding `Vec<u8>`s, etc.,
470// since the default implementations call `encode` on their slices internally.
471impl Encodable<FileEncoder> for [u8] {
472    fn encode(&self, e: &mut FileEncoder) {
473        Encoder::emit_usize(e, self.len());
474        e.emit_raw_bytes(self);
475    }
476}
477
478// Specialize decoding `Vec<u8>`. This specialization also applies to decoding `Box<[u8]>`s, etc.,
479// since the default implementations call `decode` to produce a `Vec<u8>` internally.
480impl<'a> Decodable<MemDecoder<'a>> for Vec<u8> {
481    fn decode(d: &mut MemDecoder<'a>) -> Self {
482        let len = Decoder::read_usize(d);
483        d.read_raw_bytes(len).to_owned()
484    }
485}
486
487/// An integer that will always encode to 8 bytes.
488pub struct IntEncodedWithFixedSize(pub u64);
489
490impl IntEncodedWithFixedSize {
491    pub const ENCODED_SIZE: usize = 8;
492}
493
494impl Encodable<FileEncoder> for IntEncodedWithFixedSize {
495    #[inline]
496    fn encode(&self, e: &mut FileEncoder) {
497        let _start_pos = e.position();
498        e.emit_raw_bytes(&self.0.to_le_bytes());
499        let _end_pos = e.position();
500        debug_assert_eq!((_end_pos - _start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
501    }
502}
503
504impl<'a> Decodable<MemDecoder<'a>> for IntEncodedWithFixedSize {
505    #[inline]
506    fn decode(decoder: &mut MemDecoder<'a>) -> IntEncodedWithFixedSize {
507        let bytes = decoder.read_array::<{ IntEncodedWithFixedSize::ENCODED_SIZE }>();
508        IntEncodedWithFixedSize(u64::from_le_bytes(bytes))
509    }
510}