feldera_storage/fbuf.rs
1//! A custom buffer type that works with our read/write APIs and the
2//! buffer-cache.
3
4// The code in this file is originally derived from the rkyv `AlignedVec`
5// type, and adapted for use within the Feldera storage engine.
6//
7// Original rkyv attribution of the code:
8//
9// SPDX-FileCopyrightText: Copyright © 2021 David Koloski
10//
11// SPDX-License-Identifier: MIT
12
13use std::{
14 alloc,
15 borrow::{Borrow, BorrowMut},
16 cmp::Ordering,
17 fmt,
18 fs::File,
19 io::{self, Error as IoError, ErrorKind, Read},
20 mem,
21 ops::{Deref, DerefMut, Index, IndexMut},
22 os::fd::AsRawFd,
23 ptr::NonNull,
24 slice,
25};
26
27use libc::c_void;
28use rkyv::{
29 Archive, Archived, Serialize,
30 ser::{ScratchSpace, Serializer},
31 vec::ArchivedVec,
32};
33use rkyv::{ArchiveUnsized, Fallible, RelPtr, vec::VecResolver};
34
35/// A custom buffer type that works with our read/write APIs and the
36/// buffer-cache.
37///
38/// # Invariants
39/// - `data.as_ptr()` has 512-byte alignment: This is the minimal alignment
40/// required for Direct IO.
41pub struct FBuf {
42 ptr: NonNull<u8>,
43 cap: usize,
44 len: usize,
45}
46
47impl Drop for FBuf {
48 #[inline]
49 fn drop(&mut self) {
50 if self.cap != 0 {
51 unsafe {
52 alloc::dealloc(self.ptr.as_ptr(), self.layout());
53 }
54 }
55 }
56}
57
58impl FBuf {
59 /// The alignment of the vector
60 pub const ALIGNMENT: usize = 512;
61
62 /// Maximum capacity of the vector.
63 /// Dictated by the requirements of
64 /// [`alloc::Layout`](https://doc.rust-lang.org/alloc/alloc/struct.Layout.html).
65 /// "`size`, when rounded up to the nearest multiple of `align`, must not
66 /// overflow `isize` (i.e. the rounded value must be less than or equal
67 /// to `isize::MAX`)".
68 pub const MAX_CAPACITY: usize = isize::MAX as usize - (Self::ALIGNMENT - 1);
69
70 /// Constructs a new, empty `FBuf`.
71 ///
72 /// The vector will not allocate until elements are pushed into it.
73 #[inline]
74 pub fn new() -> Self {
75 FBuf {
76 ptr: NonNull::dangling(),
77 cap: 0,
78 len: 0,
79 }
80 }
81
82 /// Constructs a new, empty `FBuf` with the specified capacity.
83 ///
84 /// The vector will be able to hold exactly `capacity` bytes without
85 /// reallocating. If `capacity` is 0, the vector will not allocate.
86 #[inline]
87 pub fn with_capacity(capacity: usize) -> Self {
88 if capacity == 0 {
89 Self::new()
90 } else {
91 assert!(
92 capacity <= Self::MAX_CAPACITY,
93 "`capacity` cannot exceed isize::MAX - 15"
94 );
95 let ptr = unsafe {
96 let layout = alloc::Layout::from_size_align_unchecked(capacity, Self::ALIGNMENT);
97 let ptr = alloc::alloc(layout);
98 if ptr.is_null() {
99 alloc::handle_alloc_error(layout);
100 }
101 NonNull::new_unchecked(ptr)
102 };
103 Self {
104 ptr,
105 cap: capacity,
106 len: 0,
107 }
108 }
109 }
110
111 #[inline]
112 fn layout(&self) -> alloc::Layout {
113 unsafe { alloc::Layout::from_size_align_unchecked(self.cap, Self::ALIGNMENT) }
114 }
115
116 /// Clears the vector, removing all values.
117 ///
118 /// Note that this method has no effect on the allocated capacity of the
119 /// vector.
120 #[inline]
121 pub fn clear(&mut self) {
122 self.len = 0;
123 }
124
125 /// Change capacity of vector.
126 ///
127 /// Will set capacity to exactly `new_cap`.
128 /// Can be used to either grow or shrink capacity.
129 /// Backing memory will be reallocated.
130 ///
131 /// Usually the safe methods `reserve` or `reserve_exact` are a better
132 /// choice. This method only exists as a micro-optimization for very
133 /// performance-sensitive code where the calculation of capacity
134 /// required has already been performed, and you want to avoid doing it
135 /// again, or if you want to implement a different growth strategy.
136 ///
137 /// # Safety
138 ///
139 /// - `new_cap` must be less than or equal to
140 /// [`MAX_CAPACITY`](FBuf::MAX_CAPACITY)
141 /// - `new_cap` must be greater than or equal to [`len()`](FBuf::len)
142 #[inline]
143 pub unsafe fn change_capacity(&mut self, new_cap: usize) {
144 unsafe {
145 debug_assert!(new_cap <= Self::MAX_CAPACITY);
146 debug_assert!(new_cap >= self.len);
147
148 if new_cap > 0 {
149 let new_ptr = if self.cap > 0 {
150 let new_ptr = alloc::realloc(self.ptr.as_ptr(), self.layout(), new_cap);
151 if new_ptr.is_null() {
152 alloc::handle_alloc_error(alloc::Layout::from_size_align_unchecked(
153 new_cap,
154 Self::ALIGNMENT,
155 ));
156 }
157 new_ptr
158 } else {
159 let layout = alloc::Layout::from_size_align_unchecked(new_cap, Self::ALIGNMENT);
160 let new_ptr = alloc::alloc(layout);
161 if new_ptr.is_null() {
162 alloc::handle_alloc_error(layout);
163 }
164 new_ptr
165 };
166 self.ptr = NonNull::new_unchecked(new_ptr);
167 self.cap = new_cap;
168 } else if self.cap > 0 {
169 alloc::dealloc(self.ptr.as_ptr(), self.layout());
170 self.ptr = NonNull::dangling();
171 self.cap = 0;
172 }
173 }
174 }
175
176 /// Shrinks the capacity of the vector as much as possible.
177 ///
178 /// It will drop down as close as possible to the length but the allocator
179 /// may still inform the vector that there is space for a few more
180 /// elements.
181 #[inline]
182 pub fn shrink_to_fit(&mut self) {
183 if self.cap != self.len {
184 // New capacity cannot exceed max as it's shrinking
185 unsafe { self.change_capacity(self.len) };
186 }
187 }
188
189 /// Returns an unsafe mutable pointer to the vector's buffer.
190 ///
191 /// The caller must ensure that the vector outlives the pointer this
192 /// function returns, or else it will end up pointing to garbage.
193 /// Modifying the vector may cause its buffer to be reallocated, which
194 /// would also make any pointers to it invalid.
195 #[inline]
196 pub fn as_mut_ptr(&mut self) -> *mut u8 {
197 self.ptr.as_ptr()
198 }
199
200 /// Extracts a mutable slice of the entire vector.
201 ///
202 /// Equivalent to `&mut s[..]`.
203 #[inline]
204 pub fn as_mut_slice(&mut self) -> &mut [u8] {
205 unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
206 }
207
208 /// Returns a raw pointer to the vector's buffer.
209 ///
210 /// The caller must ensure that the vector outlives the pointer this
211 /// function returns, or else it will end up pointing to garbage.
212 /// Modifying the vector may cause its buffer to be reallocated, which
213 /// would also make any pointers to it invalid.
214 ///
215 /// The caller must also ensure that the memory the pointer
216 /// (non-transitively) points to is never written to (except inside an
217 /// `UnsafeCell`) using this pointer or any pointer derived from it. If
218 /// you need to mutate the contents of the slice, use
219 /// [`as_mut_ptr`](FBuf::as_mut_ptr).
220 #[inline]
221 pub fn as_ptr(&self) -> *const u8 {
222 self.ptr.as_ptr()
223 }
224
225 /// Extracts a slice containing the entire vector.
226 ///
227 /// Equivalent to `&s[..]`.
228 #[inline]
229 pub fn as_slice(&self) -> &[u8] {
230 unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
231 }
232
233 /// Returns the number of elements the vector can hold without reallocating.
234 #[inline]
235 pub fn capacity(&self) -> usize {
236 self.cap
237 }
238
239 /// Reserves capacity for at least `additional` more bytes to be inserted
240 /// into the given `FBuf`. The collection may reserve more space
241 /// to avoid frequent reallocations. After calling `reserve`, capacity
242 /// will be greater than or equal to `self.len() + additional`. Does
243 /// nothing if capacity is already sufficient.
244 ///
245 /// # Panics
246 ///
247 /// Panics if the new capacity exceeds `isize::MAX - 15` bytes.
248 #[inline]
249 pub fn reserve(&mut self, additional: usize) {
250 // Cannot wrap because capacity always exceeds len,
251 // but avoids having to handle potential overflow here
252 let remaining = self.cap.wrapping_sub(self.len);
253 if additional > remaining {
254 self.do_reserve(additional);
255 }
256 }
257
258 /// Extend capacity after `reserve` has found it's necessary.
259 ///
260 /// Actually performing the extension is in this separate function marked
261 /// `#[cold]` to hint to compiler that this branch is not often taken.
262 /// This keeps the path for common case where capacity is already sufficient
263 /// as fast as possible, and makes `reserve` more likely to be inlined.
264 /// This is the same trick that Rust's `Vec::reserve` uses.
265 #[cold]
266 fn do_reserve(&mut self, additional: usize) {
267 let new_cap = self
268 .len
269 .checked_add(additional)
270 .expect("cannot reserve a larger FBuf");
271 unsafe { self.grow_capacity_to(new_cap) };
272 }
273
274 /// Grows total capacity of vector to `new_cap` or more.
275 ///
276 /// Capacity after this call will be `new_cap` rounded up to next power of
277 /// 2, unless that would exceed maximum capacity, in which case capacity
278 /// is capped at the maximum.
279 ///
280 /// This is same growth strategy used by `reserve`, `push` and
281 /// `extend_from_slice`.
282 ///
283 /// Usually the safe methods `reserve` or `reserve_exact` are a better
284 /// choice. This method only exists as a micro-optimization for very
285 /// performance-sensitive code where where the calculation of capacity
286 /// required has already been performed, and you want to avoid doing it
287 /// again.
288 ///
289 /// Maximum capacity is `isize::MAX - 15` bytes.
290 ///
291 /// # Panics
292 ///
293 /// Panics if `new_cap` exceeds `isize::MAX - 15` bytes.
294 ///
295 /// # Safety
296 ///
297 /// - `new_cap` must be greater than current [`capacity()`](FBuf::capacity)
298 #[inline]
299 pub unsafe fn grow_capacity_to(&mut self, new_cap: usize) {
300 unsafe {
301 debug_assert!(new_cap > self.cap);
302
303 let new_cap = if new_cap > (isize::MAX as usize + 1) >> 1 {
304 // Rounding up to next power of 2 would result in `isize::MAX + 1` or higher,
305 // which exceeds max capacity. So cap at max instead.
306 assert!(
307 new_cap <= Self::MAX_CAPACITY,
308 "cannot reserve a larger FBuf"
309 );
310 Self::MAX_CAPACITY
311 } else {
312 // Cannot overflow due to check above
313 new_cap.next_power_of_two()
314 };
315 self.change_capacity(new_cap);
316 }
317 }
318
319 /// Resizes the Vec in-place so that len is equal to new_len.
320 ///
321 /// If new_len is greater than len, the Vec is extended by the difference,
322 /// with each additional slot filled with value. If new_len is less than
323 /// len, the Vec is simply truncated.
324 ///
325 /// # Panics
326 ///
327 /// Panics if the new length exceeds `isize::MAX - 15` bytes.
328 #[inline]
329 pub fn resize(&mut self, new_len: usize, value: u8) {
330 if new_len > self.len {
331 let additional = new_len - self.len;
332 self.reserve(additional);
333 unsafe {
334 core::ptr::write_bytes(self.ptr.as_ptr().add(self.len), value, additional);
335 }
336 }
337 unsafe {
338 self.set_len(new_len);
339 }
340 }
341
342 /// Returns `true` if the vector contains no elements.
343 #[inline]
344 pub fn is_empty(&self) -> bool {
345 self.len == 0
346 }
347
348 /// Returns the number of elements in the vector, also referred to as its
349 /// 'length'.
350 #[inline]
351 pub fn len(&self) -> usize {
352 self.len
353 }
354
355 /// Copies and appends all bytes in a slice to the `FBuf`.
356 ///
357 /// The elements of the slice are appended in-order.
358 #[inline]
359 pub fn extend_from_slice(&mut self, other: &[u8]) {
360 if !other.is_empty() {
361 self.reserve(other.len());
362 unsafe {
363 core::ptr::copy_nonoverlapping(
364 other.as_ptr(),
365 self.as_mut_ptr().add(self.len()),
366 other.len(),
367 );
368 }
369 self.len += other.len();
370 }
371 }
372
373 /// Removes the last element from a vector and returns it, or `None` if it
374 /// is empty.
375 #[inline]
376 pub fn pop(&mut self) -> Option<u8> {
377 if self.len == 0 {
378 None
379 } else {
380 let result = self[self.len - 1];
381 self.len -= 1;
382 Some(result)
383 }
384 }
385
386 /// Appends an element to the back of a collection.
387 ///
388 /// # Panics
389 ///
390 /// Panics if the new capacity exceeds `isize::MAX - 15` bytes.
391 #[inline]
392 pub fn push(&mut self, value: u8) {
393 if self.len == self.cap {
394 self.reserve_for_push();
395 }
396
397 unsafe {
398 self.as_mut_ptr().add(self.len).write(value);
399 self.len += 1;
400 }
401 }
402
403 /// Extend capacity by at least 1 byte after `push` has found it's
404 /// necessary.
405 ///
406 /// Actually performing the extension is in this separate function marked
407 /// `#[cold]` to hint to compiler that this branch is not often taken.
408 /// This keeps the path for common case where capacity is already sufficient
409 /// as fast as possible, and makes `push` more likely to be inlined.
410 /// This is the same trick that Rust's `Vec::push` uses.
411 #[cold]
412 fn reserve_for_push(&mut self) {
413 // `len` is always less than `isize::MAX`, so no possibility of overflow here
414 let new_cap = self.len + 1;
415 unsafe { self.grow_capacity_to(new_cap) };
416 }
417
418 /// Reserves the minimum capacity for exactly `additional` more elements to
419 /// be inserted in the given `FBuf`. After calling
420 /// `reserve_exact`, capacity will be greater than or equal
421 /// to `self.len() + additional`. Does nothing if the capacity is already
422 /// sufficient.
423 ///
424 /// Note that the allocator may give the collection more space than it
425 /// requests. Therefore, capacity can not be relied upon to be precisely
426 /// minimal. Prefer reserve if future insertions are expected.
427 ///
428 /// # Panics
429 ///
430 /// Panics if the new capacity overflows `isize::MAX - 15`.
431 #[inline]
432 pub fn reserve_exact(&mut self, additional: usize) {
433 // This function does not use the hot/cold paths trick that `reserve`
434 // and `push` do, on assumption that user probably knows this will require
435 // an increase in capacity. Otherwise, they'd likely use `reserve`.
436 let new_cap = self
437 .len
438 .checked_add(additional)
439 .expect("cannot reserve a larger FBuf");
440 if new_cap > self.cap {
441 assert!(
442 new_cap <= Self::MAX_CAPACITY,
443 "cannot reserve a larger FBuf"
444 );
445 unsafe { self.change_capacity(new_cap) };
446 }
447 }
448
449 /// Forces the length of the vector to `new_len`.
450 ///
451 /// This is a low-level operation that maintains none of the normal
452 /// invariants of the type.
453 ///
454 /// # Safety
455 ///
456 /// - `new_len` must be less than or equal to [`capacity()`](FBuf::capacity)
457 /// - The elements at `old_len..new_len` must be initialized
458 #[inline]
459 pub unsafe fn set_len(&mut self, new_len: usize) {
460 debug_assert!(new_len <= self.capacity());
461
462 self.len = new_len;
463 }
464
465 /// Converts the vector into `Box<[u8]>`.
466 ///
467 /// This method reallocates and copies the underlying bytes. Any excess
468 /// capacity is dropped.
469 #[inline]
470 pub fn into_boxed_slice(self) -> Box<[u8]> {
471 self.into_vec().into_boxed_slice()
472 }
473
474 /// Converts the vector into `Vec<u8>`.
475 ///
476 /// This method reallocates and copies the underlying bytes. Any excess
477 /// capacity is dropped.
478 #[inline]
479 pub fn into_vec(self) -> Vec<u8> {
480 Vec::from(self.as_ref())
481 }
482
483 /// Reads all bytes until EOF from `r` and appends them to this
484 /// `FBuf`.
485 ///
486 /// If successful, this function will return the total number of bytes
487 /// read.
488 pub fn extend_from_reader<R: Read + ?Sized>(&mut self, r: &mut R) -> io::Result<usize> {
489 let start_len = self.len();
490 let start_cap = self.capacity();
491
492 // Extra initialized bytes from previous loop iteration.
493 let mut initialized = 0;
494 loop {
495 if self.len() == self.capacity() {
496 // No available capacity, reserve some space.
497 self.reserve(32);
498 }
499
500 let read_buf_start = unsafe { self.as_mut_ptr().add(self.len) };
501 let read_buf_len = self.capacity() - self.len();
502
503 // Initialize the uninitialized portion of the available space.
504 unsafe {
505 // The first `initialized` bytes don't need to be zeroed.
506 // This leaves us `read_buf_len - initialized` bytes to zero
507 // starting at `initialized`.
508 core::ptr::write_bytes(
509 read_buf_start.add(initialized),
510 0,
511 read_buf_len - initialized,
512 );
513 }
514
515 // The entire read buffer is now initialized, so we can create a
516 // mutable slice of it.
517 let read_buf = unsafe { core::slice::from_raw_parts_mut(read_buf_start, read_buf_len) };
518
519 match r.read(read_buf) {
520 Ok(read) => {
521 // We filled `read` additional bytes.
522 unsafe {
523 self.set_len(self.len() + read);
524 }
525 initialized = read_buf_len - read;
526
527 if read == 0 {
528 return Ok(self.len() - start_len);
529 }
530 }
531 Err(e) if e.kind() == ErrorKind::Interrupted => continue,
532 Err(e) => return Err(e),
533 }
534
535 if self.len() == self.capacity() && self.capacity() == start_cap {
536 // The buffer might be an exact fit. Let's read into a probe buffer
537 // and see if it returns `Ok(0)`. If so, we've avoided an
538 // unnecessary doubling of the capacity. But if not, append the
539 // probe buffer to the primary buffer and let its capacity grow.
540 let mut probe = [0u8; 32];
541
542 loop {
543 match r.read(&mut probe) {
544 Ok(0) => return Ok(self.len() - start_len),
545 Ok(n) => {
546 self.extend_from_slice(&probe[..n]);
547 break;
548 }
549 Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
550 Err(e) => return Err(e),
551 }
552 }
553 }
554 }
555 }
556
557 /// Reads `len` bytes from `file` at the given `offset` and appends them to
558 /// this `FBuf`.
559 ///
560 /// This avoids zero-initializing the buffer before reading into it.
561 pub fn read_exact_at(
562 &mut self,
563 file: &File,
564 mut offset: u64,
565 mut len: usize,
566 ) -> Result<(), IoError> {
567 self.reserve(len);
568 while len > 0 {
569 let retval = unsafe {
570 libc::pread(
571 file.as_raw_fd(),
572 self.as_mut_ptr().add(self.len) as *mut c_void,
573 len,
574 offset as i64,
575 )
576 };
577 match retval.cmp(&0) {
578 Ordering::Equal => return Err(ErrorKind::UnexpectedEof.into()),
579 Ordering::Less => {
580 let error = IoError::last_os_error();
581 if error.kind() != ErrorKind::Interrupted {
582 return Err(error);
583 }
584 }
585 Ordering::Greater => {
586 self.len += retval as usize;
587 len -= retval as usize;
588 offset += retval as u64;
589 }
590 }
591 }
592 Ok(())
593 }
594}
595
596impl From<FBuf> for Vec<u8> {
597 #[inline]
598 fn from(aligned: FBuf) -> Self {
599 aligned.to_vec()
600 }
601}
602
603impl AsMut<[u8]> for FBuf {
604 #[inline]
605 fn as_mut(&mut self) -> &mut [u8] {
606 self.as_mut_slice()
607 }
608}
609
610impl AsRef<[u8]> for FBuf {
611 #[inline]
612 fn as_ref(&self) -> &[u8] {
613 self.as_slice()
614 }
615}
616
617impl Borrow<[u8]> for FBuf {
618 #[inline]
619 fn borrow(&self) -> &[u8] {
620 self.as_slice()
621 }
622}
623
624impl BorrowMut<[u8]> for FBuf {
625 #[inline]
626 fn borrow_mut(&mut self) -> &mut [u8] {
627 self.as_mut_slice()
628 }
629}
630
631impl Clone for FBuf {
632 #[inline]
633 fn clone(&self) -> Self {
634 unsafe {
635 let mut result = FBuf::with_capacity(self.len);
636 result.len = self.len;
637 core::ptr::copy_nonoverlapping(self.as_ptr(), result.as_mut_ptr(), self.len);
638 result
639 }
640 }
641}
642
643impl fmt::Debug for FBuf {
644 #[inline]
645 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
646 self.as_slice().fmt(f)
647 }
648}
649
650impl Default for FBuf {
651 #[inline]
652 fn default() -> Self {
653 Self::new()
654 }
655}
656
657impl Deref for FBuf {
658 type Target = [u8];
659
660 #[inline]
661 fn deref(&self) -> &Self::Target {
662 self.as_slice()
663 }
664}
665
666impl DerefMut for FBuf {
667 #[inline]
668 fn deref_mut(&mut self) -> &mut Self::Target {
669 self.as_mut_slice()
670 }
671}
672
673impl<I: slice::SliceIndex<[u8]>> Index<I> for FBuf {
674 type Output = <I as slice::SliceIndex<[u8]>>::Output;
675
676 #[inline]
677 fn index(&self, index: I) -> &Self::Output {
678 &self.as_slice()[index]
679 }
680}
681
682impl<I: slice::SliceIndex<[u8]>> IndexMut<I> for FBuf {
683 #[inline]
684 fn index_mut(&mut self, index: I) -> &mut Self::Output {
685 &mut self.as_mut_slice()[index]
686 }
687}
688
689impl io::Write for FBuf {
690 #[inline]
691 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
692 self.extend_from_slice(buf);
693 Ok(buf.len())
694 }
695
696 #[inline]
697 fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
698 let len = bufs.iter().map(|b| b.len()).sum();
699 self.reserve(len);
700 for buf in bufs {
701 self.extend_from_slice(buf);
702 }
703 Ok(len)
704 }
705
706 fn flush(&mut self) -> io::Result<()> {
707 Ok(())
708 }
709
710 #[inline]
711 fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
712 self.extend_from_slice(buf);
713 Ok(())
714 }
715}
716
717#[cfg(test)]
718impl Eq for FBuf {}
719
720#[cfg(test)]
721impl PartialEq<Self> for FBuf {
722 fn eq(&self, other: &Self) -> bool {
723 self.as_slice() == other.as_slice()
724 }
725}
726
727#[cfg(test)]
728impl PartialOrd for FBuf {
729 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
730 self.as_slice().partial_cmp(other.as_slice())
731 }
732}
733
734impl Archive for FBuf {
735 type Archived = ArchivedVec<u8>;
736 type Resolver = VecResolver;
737
738 #[inline]
739 unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) {
740 unsafe {
741 ArchivedVec::resolve_from_slice(self.as_slice(), pos, resolver, out);
742 }
743 }
744}
745
746impl<S: ScratchSpace + Serializer + ?Sized> Serialize<S> for FBuf {
747 #[inline]
748 fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
749 serializer.align(Self::ALIGNMENT)?;
750 ArchivedVec::<Archived<u8>>::serialize_from_slice(self.as_slice(), serializer)
751 }
752}
753
754// SAFETY: FBuf is safe to send to another thread
755unsafe impl Send for FBuf {}
756
757// SAFETY: FBuf is safe to share between threads
758unsafe impl Sync for FBuf {}
759
760impl Unpin for FBuf {}
761
762/// An [`rkyv`] serializer made specifically to work with [`FBuf`].
763///
764/// This serializer makes it easier for the compiler to perform emplacement
765/// optimizations and may give better performance than a basic
766/// `WriteSerializer`.
767#[derive(Debug)]
768pub struct FBufSerializer<A> {
769 inner: A,
770 limit: usize,
771}
772
773#[derive(Debug)]
774pub struct LimitExceeded;
775
776impl<A: Borrow<FBuf>> FBufSerializer<A> {
777 /// Creates a new `FBufSerializer` by wrapping a `Borrow<FBuf>`.
778 #[inline]
779 pub fn new(inner: A, limit: usize) -> Self {
780 Self { inner, limit }
781 }
782
783 /// Consumes the serializer and returns the underlying type.
784 #[inline]
785 pub fn into_inner(self) -> A {
786 self.inner
787 }
788}
789
790impl<A: Default> Default for FBufSerializer<A> {
791 #[inline]
792 fn default() -> Self {
793 Self {
794 inner: A::default(),
795 limit: usize::MAX,
796 }
797 }
798}
799
800impl<A> Fallible for FBufSerializer<A> {
801 type Error = LimitExceeded;
802}
803
804impl<A: Borrow<FBuf> + BorrowMut<FBuf>> Serializer for FBufSerializer<A> {
805 #[inline]
806 fn pos(&self) -> usize {
807 self.inner.borrow().len()
808 }
809
810 #[inline]
811 fn write(&mut self, bytes: &[u8]) -> Result<(), Self::Error> {
812 let vec = self.inner.borrow_mut();
813 if vec.len() + bytes.len() > self.limit {
814 Err(LimitExceeded)
815 } else {
816 vec.extend_from_slice(bytes);
817 Ok(())
818 }
819 }
820
821 #[inline]
822 unsafe fn resolve_aligned<T: Archive + ?Sized>(
823 &mut self,
824 value: &T,
825 resolver: T::Resolver,
826 ) -> Result<usize, Self::Error> {
827 unsafe {
828 let pos = self.pos();
829 debug_assert_eq!(pos & (mem::align_of::<T::Archived>() - 1), 0);
830 let vec = self.inner.borrow_mut();
831 let additional = mem::size_of::<T::Archived>();
832 if vec.len() + additional > self.limit {
833 return Err(LimitExceeded);
834 }
835 vec.reserve(additional);
836 vec.set_len(vec.len() + additional);
837
838 let ptr = vec.as_mut_ptr().add(pos).cast::<T::Archived>();
839 ptr.write_bytes(0, 1);
840 value.resolve(pos, resolver, ptr);
841
842 Ok(pos)
843 }
844 }
845
846 #[inline]
847 unsafe fn resolve_unsized_aligned<T: ArchiveUnsized + ?Sized>(
848 &mut self,
849 value: &T,
850 to: usize,
851 metadata_resolver: T::MetadataResolver,
852 ) -> Result<usize, Self::Error> {
853 unsafe {
854 let from = self.pos();
855 debug_assert_eq!(from & (mem::align_of::<RelPtr<T::Archived>>() - 1), 0);
856 let vec = self.inner.borrow_mut();
857 let additional = mem::size_of::<RelPtr<T::Archived>>();
858 if vec.len() + additional > self.limit {
859 return Err(LimitExceeded);
860 }
861 vec.reserve(additional);
862 vec.set_len(vec.len() + additional);
863
864 let ptr = vec.as_mut_ptr().add(from).cast::<RelPtr<T::Archived>>();
865 ptr.write_bytes(0, 1);
866
867 value.resolve_unsized(from, to, metadata_resolver, ptr);
868 Ok(from)
869 }
870 }
871}