#[cfg(not(feature = "std"))]
use alloc::{vec, vec::Vec};
use core::cmp::max;
use core::convert::Infallible;
use core::fmt::{Debug, Display};
use core::iter::{DoubleEndedIterator, ExactSizeIterator};
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Deref, DerefMut, Index, IndexMut, Sub, SubAssign};
use core::ptr::write_bytes;
use crate::endian_scalar::emplace_scalar;
use crate::primitives::*;
use crate::push::{Push, PushAlignment};
use crate::read_scalar;
use crate::table::Table;
use crate::vector::Vector;
use crate::vtable::{field_index_to_field_offset, VTable};
use crate::vtable_writer::VTableWriter;
pub unsafe trait Allocator: DerefMut<Target = [u8]> {
type Error: Display + Debug;
fn grow_downwards(&mut self) -> Result<(), Self::Error>;
fn len(&self) -> usize;
}
#[derive(Default)]
pub struct DefaultAllocator(Vec<u8>);
impl DefaultAllocator {
pub fn from_vec(buffer: Vec<u8>) -> Self {
Self(buffer)
}
}
impl Deref for DefaultAllocator {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for DefaultAllocator {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
unsafe impl Allocator for DefaultAllocator {
type Error = Infallible;
fn grow_downwards(&mut self) -> Result<(), Self::Error> {
let old_len = self.0.len();
let new_len = max(1, old_len * 2);
self.0.resize(new_len, 0);
if new_len == 1 {
return Ok(());
}
let middle = new_len / 2;
{
let (left, right) = &mut self.0[..].split_at_mut(middle);
right.copy_from_slice(left);
}
{
let ptr = self.0[..middle].as_mut_ptr();
unsafe {
write_bytes(ptr, 0, middle);
}
}
Ok(())
}
fn len(&self) -> usize {
self.0.len()
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct FieldLoc {
off: UOffsetT,
id: VOffsetT,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FlatBufferBuilder<'fbb, A: Allocator = DefaultAllocator> {
allocator: A,
head: ReverseIndex,
field_locs: Vec<FieldLoc>,
written_vtable_revpos: Vec<UOffsetT>,
nested: bool,
finished: bool,
min_align: usize,
force_defaults: bool,
strings_pool: Vec<WIPOffset<&'fbb str>>,
_phantom: PhantomData<&'fbb ()>,
}
impl<'fbb> FlatBufferBuilder<'fbb, DefaultAllocator> {
pub fn new() -> Self {
Self::with_capacity(0)
}
#[deprecated(note = "replaced with `with_capacity`", since = "0.8.5")]
pub fn new_with_capacity(size: usize) -> Self {
Self::with_capacity(size)
}
pub fn with_capacity(size: usize) -> Self {
Self::from_vec(vec![0; size])
}
pub fn from_vec(buffer: Vec<u8>) -> Self {
assert!(
buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
let allocator = DefaultAllocator::from_vec(buffer);
Self::new_in(allocator)
}
pub fn collapse(self) -> (Vec<u8>, usize) {
let index = self.head.to_forward_index(&self.allocator);
(self.allocator.0, index)
}
}
impl<'fbb, A: Allocator> FlatBufferBuilder<'fbb, A> {
pub fn new_in(allocator: A) -> Self {
let head = ReverseIndex::end();
FlatBufferBuilder {
allocator,
head,
field_locs: Vec::new(),
written_vtable_revpos: Vec::new(),
nested: false,
finished: false,
min_align: 0,
force_defaults: false,
strings_pool: Vec::new(),
_phantom: PhantomData,
}
}
pub fn collapse_in(self) -> (A, usize) {
let index = self.head.to_forward_index(&self.allocator);
(self.allocator, index)
}
pub fn reset(&mut self) {
self.allocator[self.head.range_to_end()].iter_mut().for_each(|x| *x = 0);
self.head = ReverseIndex::end();
self.written_vtable_revpos.clear();
self.nested = false;
self.finished = false;
self.min_align = 0;
self.strings_pool.clear();
}
#[inline]
pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
let sz = P::size();
self.align(sz, P::alignment());
self.make_space(sz);
{
let (dst, rest) = self.allocator[self.head.range_to_end()].split_at_mut(sz);
unsafe { x.push(dst, rest.len()) };
}
WIPOffset::new(self.used_space() as UOffsetT)
}
#[inline]
pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
self.assert_nested("push_slot");
if x != default || self.force_defaults {
self.push_slot_always(slotoff, x);
}
}
#[inline]
pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
self.assert_nested("push_slot_always");
let off = self.push(x);
self.track_field(slotoff, off.value());
}
#[inline]
pub fn num_written_vtables(&self) -> usize {
self.written_vtable_revpos.len()
}
#[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT)
}
#[inline]
pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table");
let o = self.write_vtable(off);
self.nested = false;
self.field_locs.clear();
WIPOffset::new(o.value())
}
#[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
}
#[inline]
pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
self.assert_nested("end_vector");
self.nested = false;
let o = self.push::<UOffsetT>(num_elems as UOffsetT);
WIPOffset::new(o.value())
}
#[inline]
pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_shared_string can not be called when a table or vector is under construction",
);
let buf = &self.allocator;
let found = self.strings_pool.binary_search_by(|offset| {
let ptr = offset.value() as usize;
let str_memory = &buf[buf.len() - ptr..];
let size =
u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
as usize;
let string_size: usize = 4;
let iter = str_memory[string_size..size + string_size].iter();
iter.cloned().cmp(s.bytes())
});
match found {
Ok(index) => self.strings_pool[index],
Err(index) => {
let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
self.strings_pool.insert(index, address);
address
}
}
}
#[inline]
pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
}
#[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8);
self.push_bytes_unprefixed(data);
self.push(data.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
#[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
let slice_size = items.len() * elem_size;
self.align(slice_size, T::alignment().max_of(SIZE_UOFFSET));
self.ensure_capacity(slice_size + UOffsetT::size());
self.head -= slice_size;
let mut written_len = self.head.distance_to_end();
let buf = &mut self.allocator[self.head.range_to(self.head + slice_size)];
for (item, out) in items.iter().zip(buf.chunks_exact_mut(elem_size)) {
written_len -= elem_size;
unsafe { item.push(out, written_len) };
}
WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
}
#[inline]
pub fn create_vector_from_iter<T: Push>(
&mut self,
items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
let mut actual = 0;
for item in items.rev() {
self.push(item);
actual += 1;
}
WIPOffset::new(self.push::<UOffsetT>(actual).value())
}
#[inline]
pub fn force_defaults(&mut self, force_defaults: bool) {
self.force_defaults = force_defaults;
}
#[inline]
pub fn unfinished_data(&self) -> &[u8] {
&self.allocator[self.head.range_to_end()]
}
#[inline]
pub fn finished_data(&self) -> &[u8] {
self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
&self.allocator[self.head.range_to_end()]
}
#[inline]
pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
let index = self.head.to_forward_index(&self.allocator);
(&mut self.allocator[..], index)
}
#[inline]
pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize;
let tab = unsafe { Table::new(&self.allocator[self.head.range_to_end()], idx) };
let o = tab.vtable().get(slot_byte_loc) as usize;
assert!(o != 0, "missing required field {}", assert_msg_name);
}
#[inline]
pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, true);
}
#[inline]
pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, false);
}
#[inline]
pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
self.finish_with_opts(root, None, false);
}
#[inline]
fn used_space(&self) -> usize {
self.head.distance_to_end()
}
#[inline]
fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
let fl = FieldLoc { id: slot_off, off };
self.field_locs.push(fl);
}
fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable");
let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
self.make_space(vtable_byte_len);
let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
debug_assert!(table_object_size < 0x10000);
let vt_start_pos = self.head;
let vt_end_pos = self.head + vtable_byte_len;
{
let vtfw =
&mut VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]);
vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
vtfw.write_object_inline_size(table_object_size as VOffsetT);
for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
vtfw.write_field_offset(fl.id, pos);
}
}
let new_vt_bytes = &self.allocator[vt_start_pos.range_to(vt_end_pos)];
let found = self.written_vtable_revpos.binary_search_by(|old_vtable_revpos: &UOffsetT| {
let old_vtable_pos = self.allocator.len() - *old_vtable_revpos as usize;
let old_vtable = unsafe { VTable::init(&self.allocator, old_vtable_pos) };
new_vt_bytes.cmp(old_vtable.as_bytes())
});
let final_vtable_revpos = match found {
Ok(i) => {
VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]).clear();
self.head += vtable_byte_len;
self.written_vtable_revpos[i]
}
Err(i) => {
let new_vt_revpos = self.used_space() as UOffsetT;
self.written_vtable_revpos.insert(i, new_vt_revpos);
new_vt_revpos
}
};
let table_pos = self.allocator.len() - object_revloc_to_vtable.value() as usize;
if cfg!(debug_assertions) {
let tmp_soffset_to_vt = unsafe {
read_scalar::<UOffsetT>(&self.allocator[table_pos..table_pos + SIZE_UOFFSET])
};
assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
}
let buf = &mut self.allocator[table_pos..table_pos + SIZE_SOFFSET];
unsafe {
emplace_scalar::<SOffsetT>(
buf,
final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
}
self.field_locs.clear();
object_revloc_to_vtable
}
#[inline]
fn grow_allocator(&mut self) {
let starting_active_size = self.used_space();
self.allocator.grow_downwards().expect("Flatbuffer allocation failure");
let ending_active_size = self.used_space();
debug_assert_eq!(starting_active_size, ending_active_size);
}
fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear();
let to_align = {
let a = SIZE_UOFFSET;
let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
let c = if file_identifier.is_some() { FILE_IDENTIFIER_LENGTH } else { 0 };
a + b + c
};
{
let ma = PushAlignment::new(self.min_align);
self.align(to_align, ma);
}
if let Some(ident) = file_identifier {
debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
self.push_bytes_unprefixed(ident.as_bytes());
}
self.push(root);
if size_prefixed {
let sz = self.used_space() as UOffsetT;
self.push::<UOffsetT>(sz);
}
self.finished = true;
}
#[inline]
fn align(&mut self, len: usize, alignment: PushAlignment) {
self.track_min_align(alignment.value());
let s = self.used_space() as usize;
self.make_space(padding_bytes(s + len, alignment.value()));
}
#[inline]
fn track_min_align(&mut self, alignment: usize) {
self.min_align = max(self.min_align, alignment);
}
#[inline]
fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
let n = self.make_space(x.len());
self.allocator[n.range_to(n + x.len())].copy_from_slice(x);
n.to_forward_index(&self.allocator) as UOffsetT
}
#[inline]
fn make_space(&mut self, want: usize) -> ReverseIndex {
self.ensure_capacity(want);
self.head -= want;
self.head
}
#[inline]
fn ensure_capacity(&mut self, want: usize) -> usize {
if self.unused_ready_space() >= want {
return want;
}
assert!(want <= FLATBUFFERS_MAX_BUFFER_SIZE, "cannot grow buffer beyond 2 gigabytes");
while self.unused_ready_space() < want {
self.grow_allocator();
}
want
}
#[inline]
fn unused_ready_space(&self) -> usize {
self.allocator.len() - self.head.distance_to_end()
}
#[inline]
fn assert_nested(&self, fn_name: &'static str) {
debug_assert!(
self.nested,
"incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
fn_name
);
}
#[inline]
fn assert_not_nested(&self, msg: &'static str) {
debug_assert!(!self.nested, "{}", msg);
}
#[inline]
fn assert_finished(&self, msg: &'static str) {
debug_assert!(self.finished, "{}", msg);
}
#[inline]
fn assert_not_finished(&self, msg: &'static str) {
debug_assert!(!self.finished, "{}", msg);
}
}
#[inline]
fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
let max_voffset = field_locs.iter().map(|fl| fl.id).max();
match max_voffset {
None => field_index_to_field_offset(0) as usize,
Some(mv) => mv as usize + SIZE_VOFFSET,
}
}
#[inline]
fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
(!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
}
impl<'fbb> Default for FlatBufferBuilder<'fbb> {
fn default() -> Self {
Self::with_capacity(0)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct ReverseIndex(usize);
impl ReverseIndex {
pub fn end() -> Self {
Self(0)
}
pub fn range_to_end(self) -> ReverseIndexRange {
ReverseIndexRange(self, ReverseIndex::end())
}
pub fn range_to(self, end: ReverseIndex) -> ReverseIndexRange {
ReverseIndexRange(self, end)
}
pub fn to_forward_index<T>(self, buf: &[T]) -> usize {
buf.len() - self.0
}
pub fn distance_to_end(&self) -> usize {
self.0
}
}
impl Sub<usize> for ReverseIndex {
type Output = Self;
fn sub(self, rhs: usize) -> Self::Output {
Self(self.0 + rhs)
}
}
impl SubAssign<usize> for ReverseIndex {
fn sub_assign(&mut self, rhs: usize) {
*self = *self - rhs;
}
}
impl Add<usize> for ReverseIndex {
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
Self(self.0 - rhs)
}
}
impl AddAssign<usize> for ReverseIndex {
fn add_assign(&mut self, rhs: usize) {
*self = *self + rhs;
}
}
impl<T> Index<ReverseIndex> for [T] {
type Output = T;
fn index(&self, index: ReverseIndex) -> &Self::Output {
let index = index.to_forward_index(self);
&self[index]
}
}
impl<T> IndexMut<ReverseIndex> for [T] {
fn index_mut(&mut self, index: ReverseIndex) -> &mut Self::Output {
let index = index.to_forward_index(self);
&mut self[index]
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct ReverseIndexRange(ReverseIndex, ReverseIndex);
impl<T> Index<ReverseIndexRange> for [T] {
type Output = [T];
fn index(&self, index: ReverseIndexRange) -> &Self::Output {
let start = index.0.to_forward_index(self);
let end = index.1.to_forward_index(self);
&self[start..end]
}
}
impl<T> IndexMut<ReverseIndexRange> for [T] {
fn index_mut(&mut self, index: ReverseIndexRange) -> &mut Self::Output {
let start = index.0.to_forward_index(self);
let end = index.1.to_forward_index(self);
&mut self[start..end]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reverse_index_test() {
let buf = [0, 1, 2, 3, 4, 5];
let idx = ReverseIndex::end() - 2;
assert_eq!(&buf[idx.range_to_end()], &[4, 5]);
assert_eq!(&buf[idx.range_to(idx + 1)], &[4]);
assert_eq!(idx.to_forward_index(&buf), 4);
}
}