extern crate smallvec;
use std::cmp::max;
use std::iter::{DoubleEndedIterator, ExactSizeIterator};
use std::marker::PhantomData;
use std::ptr::write_bytes;
use std::slice::from_raw_parts;
use crate::endian_scalar::{emplace_scalar, read_scalar_at};
use crate::primitives::*;
use crate::push::{Push, PushAlignment};
use crate::table::Table;
use crate::vector::{SafeSliceAccess, Vector};
use crate::vtable::{field_index_to_field_offset, VTable};
use crate::vtable_writer::VTableWriter;
pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct FieldLoc {
off: UOffsetT,
id: VOffsetT,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FlatBufferBuilder<'fbb> {
owned_buf: Vec<u8>,
head: usize,
field_locs: Vec<FieldLoc>,
written_vtable_revpos: Vec<UOffsetT>,
nested: bool,
finished: bool,
min_align: usize,
force_defaults: bool,
strings_pool: Vec<WIPOffset<&'fbb str>>,
_phantom: PhantomData<&'fbb ()>,
}
impl<'fbb> FlatBufferBuilder<'fbb> {
pub fn new() -> Self {
Self::new_with_capacity(0)
}
pub fn new_with_capacity(size: usize) -> Self {
assert!(
size <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
FlatBufferBuilder {
owned_buf: vec![0u8; size],
head: size,
field_locs: Vec::new(),
written_vtable_revpos: Vec::new(),
nested: false,
finished: false,
min_align: 0,
force_defaults: false,
strings_pool: Vec::new(),
_phantom: PhantomData,
}
}
pub fn reset(&mut self) {
{
let to_clear = self.owned_buf.len() - self.head;
let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, to_clear);
}
}
self.head = self.owned_buf.len();
self.written_vtable_revpos.clear();
self.nested = false;
self.finished = false;
self.min_align = 0;
self.strings_pool.clear();
}
pub fn collapse(self) -> (Vec<u8>, usize) {
(self.owned_buf, self.head)
}
#[inline]
pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
let sz = P::size();
self.align(sz, P::alignment());
self.make_space(sz);
{
let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
x.push(dst, rest);
}
WIPOffset::new(self.used_space() as UOffsetT)
}
#[inline]
pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
self.assert_nested("push_slot");
if x != default || self.force_defaults {
self.push_slot_always(slotoff, x);
}
}
#[inline]
pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
self.assert_nested("push_slot_always");
let off = self.push(x);
self.track_field(slotoff, off.value());
}
#[inline]
pub fn num_written_vtables(&self) -> usize {
self.written_vtable_revpos.len()
}
#[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT)
}
#[inline]
pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table");
let o = self.write_vtable(off);
self.nested = false;
self.field_locs.clear();
WIPOffset::new(o.value())
}
#[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
}
#[inline]
pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
self.assert_nested("end_vector");
self.nested = false;
let o = self.push::<UOffsetT>(num_elems as UOffsetT);
WIPOffset::new(o.value())
}
#[inline]
pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_shared_string can not be called when a table or vector is under construction",
);
let buf = &self.owned_buf;
let found = self.strings_pool.binary_search_by(|offset| {
let ptr = offset.value() as usize;
let str_memory = &buf[buf.len() - ptr..];
let size =
u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
as usize;
let string_size: usize = 4;
let iter = str_memory[string_size..size + string_size].iter();
iter.cloned().cmp(s.bytes())
});
match found {
Ok(index) => self.strings_pool[index],
Err(index) => {
let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
self.strings_pool.insert(index, address);
address
}
}
}
#[inline]
pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
}
#[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8);
self.push_bytes_unprefixed(data);
self.push(data.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
#[inline]
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested(
"create_vector_direct can not be called when a table or vector is under construction",
);
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
let bytes = {
let ptr = items.as_ptr() as *const T as *const u8;
unsafe { from_raw_parts(ptr, items.len() * elem_size) }
};
self.push_bytes_unprefixed(bytes);
self.push(items.len() as UOffsetT);
WIPOffset::new(self.used_space() as UOffsetT)
}
#[inline]
pub fn create_vector_of_strings<'a, 'b>(
&'a mut self,
xs: &'b [&'b str],
) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
smallvec::SmallVec::with_capacity(xs.len());
unsafe {
offsets.set_len(xs.len());
}
for (i, &s) in xs.iter().enumerate().rev() {
let o = self.create_string(s);
offsets[i] = o;
}
self.create_vector(&offsets[..])
}
#[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for i in (0..items.len()).rev() {
self.push(items[i]);
}
WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
}
#[inline]
pub fn create_vector_from_iter<T: Push + Copy>(
&mut self,
items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
let len = items.len();
self.align(len * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for item in items.rev() {
self.push(item);
}
WIPOffset::new(self.push::<UOffsetT>(len as UOffsetT).value())
}
#[inline]
pub fn force_defaults(&mut self, force_defaults: bool) {
self.force_defaults = force_defaults;
}
#[inline]
pub fn unfinished_data(&self) -> &[u8] {
&self.owned_buf[self.head..]
}
#[inline]
pub fn finished_data(&self) -> &[u8] {
self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
&self.owned_buf[self.head..]
}
#[inline]
pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize;
let tab = Table::new(&self.owned_buf[self.head..], idx);
let o = tab.vtable().get(slot_byte_loc) as usize;
assert!(o != 0, "missing required field {}", assert_msg_name);
}
#[inline]
pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, true);
}
#[inline]
pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
self.finish_with_opts(root, file_identifier, false);
}
#[inline]
pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
self.finish_with_opts(root, None, false);
}
#[inline]
fn used_space(&self) -> usize {
self.owned_buf.len() - self.head as usize
}
#[inline]
fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
let fl = FieldLoc { id: slot_off, off };
self.field_locs.push(fl);
}
fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable");
let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
self.make_space(vtable_byte_len);
let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
debug_assert!(table_object_size < 0x10000);
let vt_start_pos = self.head;
let vt_end_pos = self.head + vtable_byte_len;
{
let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
vtfw.write_object_inline_size(table_object_size as VOffsetT);
for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
debug_assert_eq!(
vtfw.get_field_offset(fl.id),
0,
"tried to write a vtable field multiple times"
);
vtfw.write_field_offset(fl.id, pos);
}
}
let dup_vt_use = {
let this_vt = VTable::init(&self.owned_buf[..], self.head);
self.find_duplicate_stored_vtable_revloc(this_vt)
};
let vt_use = match dup_vt_use {
Some(n) => {
VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
self.head += vtable_byte_len;
n
}
None => {
let new_vt_use = self.used_space() as UOffsetT;
self.written_vtable_revpos.push(new_vt_use);
new_vt_use
}
};
{
let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
debug_assert_eq!(saw, 0xF0F0_F0F0);
emplace_scalar::<SOffsetT>(
&mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
}
self.field_locs.clear();
object_revloc_to_vtable
}
#[inline]
fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
for &revloc in self.written_vtable_revpos.iter().rev() {
let o = VTable::init(
&self.owned_buf[..],
self.head + self.used_space() - revloc as usize,
);
if needle == o {
return Some(revloc);
}
}
None
}
#[inline]
fn grow_owned_buf(&mut self) {
let old_len = self.owned_buf.len();
let new_len = max(1, old_len * 2);
let starting_active_size = self.used_space();
let diff = new_len - old_len;
self.owned_buf.resize(new_len, 0);
self.head += diff;
let ending_active_size = self.used_space();
debug_assert_eq!(starting_active_size, ending_active_size);
if new_len == 1 {
return;
}
let middle = new_len / 2;
{
let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
right.copy_from_slice(left);
}
{
let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
unsafe {
write_bytes(ptr, 0, middle);
}
}
}
fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear();
let to_align = {
let a = SIZE_UOFFSET;
let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
let c = if file_identifier.is_some() {
FILE_IDENTIFIER_LENGTH
} else {
0
};
a + b + c
};
{
let ma = PushAlignment::new(self.min_align);
self.align(to_align, ma);
}
if let Some(ident) = file_identifier {
debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
self.push_bytes_unprefixed(ident.as_bytes());
}
self.push(root);
if size_prefixed {
let sz = self.used_space() as UOffsetT;
self.push::<UOffsetT>(sz);
}
self.finished = true;
}
#[inline]
fn align(&mut self, len: usize, alignment: PushAlignment) {
self.track_min_align(alignment.value());
let s = self.used_space() as usize;
self.make_space(padding_bytes(s + len, alignment.value()));
}
#[inline]
fn track_min_align(&mut self, alignment: usize) {
self.min_align = max(self.min_align, alignment);
}
#[inline]
fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
let n = self.make_space(x.len());
self.owned_buf[n..n + x.len()].copy_from_slice(x);
n as UOffsetT
}
#[inline]
fn make_space(&mut self, want: usize) -> usize {
self.ensure_capacity(want);
self.head -= want;
self.head
}
#[inline]
fn ensure_capacity(&mut self, want: usize) -> usize {
if self.unused_ready_space() >= want {
return want;
}
assert!(
want <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot grow buffer beyond 2 gigabytes"
);
while self.unused_ready_space() < want {
self.grow_owned_buf();
}
want
}
#[inline]
fn unused_ready_space(&self) -> usize {
self.head
}
#[inline]
fn assert_nested(&self, fn_name: &'static str) {
debug_assert!(
self.nested,
"incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
fn_name
);
}
#[inline]
fn assert_not_nested(&self, msg: &'static str) {
debug_assert!(!self.nested, "{}", msg);
}
#[inline]
fn assert_finished(&self, msg: &'static str) {
debug_assert!(self.finished, "{}", msg);
}
#[inline]
fn assert_not_finished(&self, msg: &'static str) {
debug_assert!(!self.finished, "{}", msg);
}
}
#[inline]
fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
let max_voffset = field_locs.iter().map(|fl| fl.id).max();
match max_voffset {
None => field_index_to_field_offset(0) as usize,
Some(mv) => mv as usize + SIZE_VOFFSET,
}
}
#[inline]
fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
(!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
}
impl<'fbb> Default for FlatBufferBuilder<'fbb> {
fn default() -> Self {
Self::new_with_capacity(0)
}
}