1#[cfg(not(feature = "std"))]
18use alloc::{vec, vec::Vec};
19use core::cmp::max;
20use core::convert::Infallible;
21use core::fmt::{Debug, Display};
22use core::iter::{DoubleEndedIterator, ExactSizeIterator};
23use core::marker::PhantomData;
24use core::ops::{Add, AddAssign, Deref, DerefMut, Index, IndexMut, Sub, SubAssign};
25use core::ptr::write_bytes;
26
27use crate::endian_scalar::emplace_scalar;
28use crate::primitives::*;
29use crate::push::{Push, PushAlignment};
30use crate::read_scalar;
31use crate::table::Table;
32use crate::vector::Vector;
33use crate::vtable::{field_index_to_field_offset, VTable};
34use crate::vtable_writer::VTableWriter;
35
36pub unsafe trait Allocator: DerefMut<Target = [u8]> {
46 type Error: Display + Debug;
48 fn grow_downwards(&mut self) -> Result<(), Self::Error>;
53
54 fn len(&self) -> usize;
56}
57
58#[derive(Default)]
60pub struct DefaultAllocator(Vec<u8>);
61
62impl DefaultAllocator {
63 pub fn from_vec(buffer: Vec<u8>) -> Self {
65 Self(buffer)
66 }
67}
68
69impl Deref for DefaultAllocator {
70 type Target = [u8];
71
72 fn deref(&self) -> &Self::Target {
73 &self.0
74 }
75}
76
77impl DerefMut for DefaultAllocator {
78 fn deref_mut(&mut self) -> &mut Self::Target {
79 &mut self.0
80 }
81}
82
83unsafe impl Allocator for DefaultAllocator {
85 type Error = Infallible;
86 fn grow_downwards(&mut self) -> Result<(), Self::Error> {
87 let old_len = self.0.len();
88 let new_len = max(1, old_len * 2);
89
90 self.0.resize(new_len, 0);
91
92 if new_len == 1 {
93 return Ok(());
94 }
95
96 let middle = new_len / 2;
99 {
100 let (left, right) = &mut self.0[..].split_at_mut(middle);
101 right.copy_from_slice(left);
102 }
103 {
105 let ptr = self.0[..middle].as_mut_ptr();
106 unsafe {
109 write_bytes(ptr, 0, middle);
110 }
111 }
112 Ok(())
113 }
114
115 fn len(&self) -> usize {
116 self.0.len()
117 }
118}
119
120#[derive(Clone, Copy, Debug, Eq, PartialEq)]
121struct FieldLoc {
122 off: UOffsetT,
123 id: VOffsetT,
124}
125
126#[derive(Clone, Debug, Eq, PartialEq)]
130pub struct FlatBufferBuilder<'fbb, A: Allocator = DefaultAllocator> {
131 allocator: A,
132 head: ReverseIndex,
133
134 field_locs: Vec<FieldLoc>,
135 written_vtable_revpos: Vec<UOffsetT>,
136
137 nested: bool,
138 finished: bool,
139
140 min_align: usize,
141 force_defaults: bool,
142 strings_pool: Vec<WIPOffset<&'fbb str>>,
143
144 _phantom: PhantomData<&'fbb ()>,
145}
146
147impl<'fbb> FlatBufferBuilder<'fbb, DefaultAllocator> {
148 pub fn new() -> Self {
150 Self::with_capacity(0)
151 }
152 #[deprecated(note = "replaced with `with_capacity`", since = "0.8.5")]
153 pub fn new_with_capacity(size: usize) -> Self {
154 Self::with_capacity(size)
155 }
156 pub fn with_capacity(size: usize) -> Self {
161 Self::from_vec(vec![0; size])
162 }
163 pub fn from_vec(buffer: Vec<u8>) -> Self {
166 assert!(
169 buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
170 "cannot initialize buffer bigger than 2 gigabytes"
171 );
172 let allocator = DefaultAllocator::from_vec(buffer);
173 Self::new_in(allocator)
174 }
175
176 pub fn collapse(self) -> (Vec<u8>, usize) {
179 let index = self.head.to_forward_index(&self.allocator);
180 (self.allocator.0, index)
181 }
182}
183
184impl<'fbb, A: Allocator> FlatBufferBuilder<'fbb, A> {
185 pub fn new_in(allocator: A) -> Self {
187 let head = ReverseIndex::end();
188 FlatBufferBuilder {
189 allocator,
190 head,
191
192 field_locs: Vec::new(),
193 written_vtable_revpos: Vec::new(),
194
195 nested: false,
196 finished: false,
197
198 min_align: 0,
199 force_defaults: false,
200 strings_pool: Vec::new(),
201
202 _phantom: PhantomData,
203 }
204 }
205
206 pub fn collapse_in(self) -> (A, usize) {
209 let index = self.head.to_forward_index(&self.allocator);
210 (self.allocator, index)
211 }
212
213 pub fn reset(&mut self) {
225 self.allocator[self.head.range_to_end()].iter_mut().for_each(|x| *x = 0);
227
228 self.head = ReverseIndex::end();
229 self.written_vtable_revpos.clear();
230
231 self.nested = false;
232 self.finished = false;
233
234 self.min_align = 0;
235 self.strings_pool.clear();
236 }
237
238 #[inline]
243 pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
244 let sz = P::size();
245 self.align(sz, P::alignment());
246 self.make_space(sz);
247 {
248 let (dst, rest) = self.allocator[self.head.range_to_end()].split_at_mut(sz);
249 unsafe { x.push(dst, rest.len()) };
252 }
253 WIPOffset::new(self.used_space() as UOffsetT)
254 }
255
256 #[inline]
260 pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
261 self.assert_nested("push_slot");
262 if x != default || self.force_defaults {
263 self.push_slot_always(slotoff, x);
264 }
265 }
266
267 #[inline]
270 pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
271 self.assert_nested("push_slot_always");
272 let off = self.push(x);
273 self.track_field(slotoff, off.value());
274 }
275
276 #[inline]
279 pub fn num_written_vtables(&self) -> usize {
280 self.written_vtable_revpos.len()
281 }
282
283 #[inline]
289 pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
290 self.assert_not_nested(
291 "start_table can not be called when a table or vector is under construction",
292 );
293 self.nested = true;
294
295 WIPOffset::new(self.used_space() as UOffsetT)
296 }
297
298 #[inline]
302 pub fn end_table(
303 &mut self,
304 off: WIPOffset<TableUnfinishedWIPOffset>,
305 ) -> WIPOffset<TableFinishedWIPOffset> {
306 self.assert_nested("end_table");
307
308 let o = self.write_vtable(off);
309
310 self.nested = false;
311 self.field_locs.clear();
312
313 WIPOffset::new(o.value())
314 }
315
316 #[inline]
324 pub fn start_vector<T: Push>(&mut self, num_items: usize) {
325 self.assert_not_nested(
326 "start_vector can not be called when a table or vector is under construction",
327 );
328 self.nested = true;
329 self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
330 }
331
332 #[inline]
339 pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
340 self.assert_nested("end_vector");
341 self.nested = false;
342 let o = self.push::<UOffsetT>(num_elems as UOffsetT);
343 WIPOffset::new(o.value())
344 }
345
346 #[inline]
347 pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
348 self.assert_not_nested(
349 "create_shared_string can not be called when a table or vector is under construction",
350 );
351
352 let buf = &self.allocator;
355
356 let found = self.strings_pool.binary_search_by(|offset| {
357 let ptr = offset.value() as usize;
358 let str_memory = &buf[buf.len() - ptr..];
360 let size =
362 u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
363 as usize;
364 let string_size: usize = 4;
366 let iter = str_memory[string_size..size + string_size].iter();
369 iter.cloned().cmp(s.bytes())
371 });
372
373 match found {
374 Ok(index) => self.strings_pool[index],
375 Err(index) => {
376 let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
377 self.strings_pool.insert(index, address);
378 address
379 }
380 }
381 }
382
383 #[inline]
387 pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
388 self.assert_not_nested(
389 "create_string can not be called when a table or vector is under construction",
390 );
391 WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
392 }
393
394 #[inline]
396 pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
397 self.assert_not_nested(
398 "create_byte_string can not be called when a table or vector is under construction",
399 );
400 self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
401 self.push(0u8);
402 self.push_bytes_unprefixed(data);
403 self.push(data.len() as UOffsetT);
404 WIPOffset::new(self.used_space() as UOffsetT)
405 }
406
407 #[inline]
412 pub fn create_vector<'a: 'b, 'b, T: Push + 'b>(
413 &'a mut self,
414 items: &'b [T],
415 ) -> WIPOffset<Vector<'fbb, T::Output>> {
416 let elem_size = T::size();
417 let slice_size = items.len() * elem_size;
418 self.align(slice_size, T::alignment().max_of(SIZE_UOFFSET));
419 self.ensure_capacity(slice_size + UOffsetT::size());
420
421 self.head -= slice_size;
422 let mut written_len = self.head.distance_to_end();
423
424 let buf = &mut self.allocator[self.head.range_to(self.head + slice_size)];
425 for (item, out) in items.iter().zip(buf.chunks_exact_mut(elem_size)) {
426 written_len -= elem_size;
427
428 unsafe { item.push(out, written_len) };
431 }
432
433 WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
434 }
435
436 #[inline]
441 pub fn create_vector_from_iter<T: Push>(
442 &mut self,
443 items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
444 ) -> WIPOffset<Vector<'fbb, T::Output>> {
445 let elem_size = T::size();
446 self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
447 let mut actual = 0;
448 for item in items.rev() {
449 self.push(item);
450 actual += 1;
451 }
452 WIPOffset::new(self.push::<UOffsetT>(actual).value())
453 }
454
455 #[inline]
463 pub fn force_defaults(&mut self, force_defaults: bool) {
464 self.force_defaults = force_defaults;
465 }
466
467 #[inline]
470 pub fn unfinished_data(&self) -> &[u8] {
471 &self.allocator[self.head.range_to_end()]
472 }
473 #[inline]
478 pub fn finished_data(&self) -> &[u8] {
479 self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
480 &self.allocator[self.head.range_to_end()]
481 }
482 #[inline]
487 pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
488 let index = self.head.to_forward_index(&self.allocator);
489 (&mut self.allocator[..], index)
490 }
491 #[inline]
495 pub fn required(
496 &self,
497 tab_revloc: WIPOffset<TableFinishedWIPOffset>,
498 slot_byte_loc: VOffsetT,
499 assert_msg_name: &'static str,
500 ) {
501 let idx = self.used_space() - tab_revloc.value() as usize;
502
503 let tab = unsafe { Table::new(&self.allocator[self.head.range_to_end()], idx) };
511 let o = tab.vtable().get(slot_byte_loc) as usize;
512 assert!(o != 0, "missing required field {}", assert_msg_name);
513 }
514
515 #[inline]
520 pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
521 self.finish_with_opts(root, file_identifier, true);
522 }
523
524 #[inline]
529 pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
530 self.finish_with_opts(root, file_identifier, false);
531 }
532
533 #[inline]
537 pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
538 self.finish_with_opts(root, None, false);
539 }
540
541 #[inline]
542 fn used_space(&self) -> usize {
543 self.head.distance_to_end()
544 }
545
546 #[inline]
547 fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
548 let fl = FieldLoc { id: slot_off, off };
549 self.field_locs.push(fl);
550 }
551
552 fn write_vtable(
554 &mut self,
555 table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
556 ) -> WIPOffset<VTableWIPOffset> {
557 self.assert_nested("write_vtable");
558
559 let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
562 WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
563
564 let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
605 self.make_space(vtable_byte_len);
606
607 let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
609 debug_assert!(table_object_size < 0x10000); let vt_start_pos = self.head;
613 let vt_end_pos = self.head + vtable_byte_len;
614 {
615 let vtfw =
617 &mut VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]);
618 vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
619 vtfw.write_object_inline_size(table_object_size as VOffsetT);
620
621 for &fl in self.field_locs.iter() {
623 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
624 vtfw.write_field_offset(fl.id, pos);
625 }
626 }
627 let new_vt_bytes = &self.allocator[vt_start_pos.range_to(vt_end_pos)];
628 let found = self.written_vtable_revpos.binary_search_by(|old_vtable_revpos: &UOffsetT| {
629 let old_vtable_pos = self.allocator.len() - *old_vtable_revpos as usize;
630 let old_vtable = unsafe { VTable::init(&self.allocator, old_vtable_pos) };
633 new_vt_bytes.cmp(old_vtable.as_bytes())
634 });
635 let final_vtable_revpos = match found {
636 Ok(i) => {
637 VTableWriter::init(&mut self.allocator[vt_start_pos.range_to(vt_end_pos)]).clear();
639 self.head += vtable_byte_len;
640 self.written_vtable_revpos[i]
641 }
642 Err(i) => {
643 let new_vt_revpos = self.used_space() as UOffsetT;
645 self.written_vtable_revpos.insert(i, new_vt_revpos);
646 new_vt_revpos
647 }
648 };
649 let table_pos = self.allocator.len() - object_revloc_to_vtable.value() as usize;
651 if cfg!(debug_assertions) {
652 let tmp_soffset_to_vt = unsafe {
655 read_scalar::<UOffsetT>(&self.allocator[table_pos..table_pos + SIZE_UOFFSET])
656 };
657 assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
658 }
659
660 let buf = &mut self.allocator[table_pos..table_pos + SIZE_SOFFSET];
661 unsafe {
664 emplace_scalar::<SOffsetT>(
665 buf,
666 final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
667 );
668 }
669
670 self.field_locs.clear();
671
672 object_revloc_to_vtable
673 }
674
675 #[inline]
677 fn grow_allocator(&mut self) {
678 let starting_active_size = self.used_space();
679 self.allocator.grow_downwards().expect("Flatbuffer allocation failure");
680
681 let ending_active_size = self.used_space();
682 debug_assert_eq!(starting_active_size, ending_active_size);
683 }
684
685 fn finish_with_opts<T>(
688 &mut self,
689 root: WIPOffset<T>,
690 file_identifier: Option<&str>,
691 size_prefixed: bool,
692 ) {
693 self.assert_not_finished("buffer cannot be finished when it is already finished");
694 self.assert_not_nested(
695 "buffer cannot be finished when a table or vector is under construction",
696 );
697 self.written_vtable_revpos.clear();
698
699 let to_align = {
700 let a = SIZE_UOFFSET;
702 let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
704 let c = if file_identifier.is_some() { FILE_IDENTIFIER_LENGTH } else { 0 };
706 a + b + c
707 };
708
709 {
710 let ma = PushAlignment::new(self.min_align);
711 self.align(to_align, ma);
712 }
713
714 if let Some(ident) = file_identifier {
715 debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
716 self.push_bytes_unprefixed(ident.as_bytes());
717 }
718
719 self.push(root);
720
721 if size_prefixed {
722 let sz = self.used_space() as UOffsetT;
723 self.push::<UOffsetT>(sz);
724 }
725 self.finished = true;
726 }
727
728 #[inline]
729 fn align(&mut self, len: usize, alignment: PushAlignment) {
730 self.track_min_align(alignment.value());
731 let s = self.used_space() as usize;
732 self.make_space(padding_bytes(s + len, alignment.value()));
733 }
734
735 #[inline]
736 fn track_min_align(&mut self, alignment: usize) {
737 self.min_align = max(self.min_align, alignment);
738 }
739
740 #[inline]
741 fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
742 let n = self.make_space(x.len());
743 self.allocator[n.range_to(n + x.len())].copy_from_slice(x);
744
745 n.to_forward_index(&self.allocator) as UOffsetT
746 }
747
748 #[inline]
749 fn make_space(&mut self, want: usize) -> ReverseIndex {
750 self.ensure_capacity(want);
751 self.head -= want;
752 self.head
753 }
754
755 #[inline]
756 fn ensure_capacity(&mut self, want: usize) -> usize {
757 if self.unused_ready_space() >= want {
758 return want;
759 }
760 assert!(want <= FLATBUFFERS_MAX_BUFFER_SIZE, "cannot grow buffer beyond 2 gigabytes");
761
762 while self.unused_ready_space() < want {
763 self.grow_allocator();
764 }
765 want
766 }
767 #[inline]
768 fn unused_ready_space(&self) -> usize {
769 self.allocator.len() - self.head.distance_to_end()
770 }
771 #[inline]
772 fn assert_nested(&self, fn_name: &'static str) {
773 debug_assert!(
776 self.nested,
777 "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
778 fn_name
779 );
780 }
781 #[inline]
782 fn assert_not_nested(&self, msg: &'static str) {
783 debug_assert!(!self.nested, "{}", msg);
784 }
785 #[inline]
786 fn assert_finished(&self, msg: &'static str) {
787 debug_assert!(self.finished, "{}", msg);
788 }
789 #[inline]
790 fn assert_not_finished(&self, msg: &'static str) {
791 debug_assert!(!self.finished, "{}", msg);
792 }
793}
794
795#[inline]
799fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
800 let max_voffset = field_locs.iter().map(|fl| fl.id).max();
801 match max_voffset {
802 None => field_index_to_field_offset(0) as usize,
803 Some(mv) => mv as usize + SIZE_VOFFSET,
804 }
805}
806
807#[inline]
808fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
809 (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
811}
812
813impl<'fbb> Default for FlatBufferBuilder<'fbb> {
814 fn default() -> Self {
815 Self::with_capacity(0)
816 }
817}
818
819#[derive(Clone, Copy, Debug, Eq, PartialEq)]
834struct ReverseIndex(usize);
835
836impl ReverseIndex {
837 pub fn end() -> Self {
841 Self(0)
842 }
843
844 pub fn range_to_end(self) -> ReverseIndexRange {
846 ReverseIndexRange(self, ReverseIndex::end())
847 }
848
849 pub fn range_to(self, end: ReverseIndex) -> ReverseIndexRange {
851 ReverseIndexRange(self, end)
852 }
853
854 pub fn to_forward_index<T>(self, buf: &[T]) -> usize {
856 buf.len() - self.0
857 }
858
859 pub fn distance_to_end(&self) -> usize {
861 self.0
862 }
863}
864
865impl Sub<usize> for ReverseIndex {
866 type Output = Self;
867
868 fn sub(self, rhs: usize) -> Self::Output {
869 Self(self.0 + rhs)
870 }
871}
872
873impl SubAssign<usize> for ReverseIndex {
874 fn sub_assign(&mut self, rhs: usize) {
875 *self = *self - rhs;
876 }
877}
878
879impl Add<usize> for ReverseIndex {
880 type Output = Self;
881
882 fn add(self, rhs: usize) -> Self::Output {
883 Self(self.0 - rhs)
884 }
885}
886
887impl AddAssign<usize> for ReverseIndex {
888 fn add_assign(&mut self, rhs: usize) {
889 *self = *self + rhs;
890 }
891}
892impl<T> Index<ReverseIndex> for [T] {
893 type Output = T;
894
895 fn index(&self, index: ReverseIndex) -> &Self::Output {
896 let index = index.to_forward_index(self);
897 &self[index]
898 }
899}
900
901impl<T> IndexMut<ReverseIndex> for [T] {
902 fn index_mut(&mut self, index: ReverseIndex) -> &mut Self::Output {
903 let index = index.to_forward_index(self);
904 &mut self[index]
905 }
906}
907
908#[derive(Clone, Copy, Debug, Eq, PartialEq)]
909struct ReverseIndexRange(ReverseIndex, ReverseIndex);
910
911impl<T> Index<ReverseIndexRange> for [T] {
912 type Output = [T];
913
914 fn index(&self, index: ReverseIndexRange) -> &Self::Output {
915 let start = index.0.to_forward_index(self);
916 let end = index.1.to_forward_index(self);
917 &self[start..end]
918 }
919}
920
921impl<T> IndexMut<ReverseIndexRange> for [T] {
922 fn index_mut(&mut self, index: ReverseIndexRange) -> &mut Self::Output {
923 let start = index.0.to_forward_index(self);
924 let end = index.1.to_forward_index(self);
925 &mut self[start..end]
926 }
927}
928
929#[cfg(test)]
930mod tests {
931 use super::*;
932
933 #[test]
934 fn reverse_index_test() {
935 let buf = [0, 1, 2, 3, 4, 5];
936 let idx = ReverseIndex::end() - 2;
937 assert_eq!(&buf[idx.range_to_end()], &[4, 5]);
938 assert_eq!(&buf[idx.range_to(idx + 1)], &[4]);
939 assert_eq!(idx.to_forward_index(&buf), 4);
940 }
941}