package flatbuffers
import "sort"
type Builder struct {
Bytes []byte
minalign int
vtable []UOffsetT
objectEnd UOffsetT
vtables []UOffsetT
head UOffsetT
nested bool
finished bool
sharedStrings map[string]UOffsetT
}
const fileIdentifierLength = 4
const sizePrefixLength = 4
func NewBuilder(initialSize int) *Builder {
if initialSize <= 0 {
initialSize = 0
}
b := &Builder{}
b.Bytes = make([]byte, initialSize)
b.head = UOffsetT(initialSize)
b.minalign = 1
b.vtables = make([]UOffsetT, 0, 16) return b
}
func (b *Builder) Reset() {
if b.Bytes != nil {
b.Bytes = b.Bytes[:cap(b.Bytes)]
}
if b.vtables != nil {
b.vtables = b.vtables[:0]
}
if b.vtable != nil {
b.vtable = b.vtable[:0]
}
if b.sharedStrings != nil {
for key := range b.sharedStrings {
delete(b.sharedStrings, key)
}
}
b.head = UOffsetT(len(b.Bytes))
b.minalign = 1
b.nested = false
b.finished = false
}
func (b *Builder) FinishedBytes() []byte {
b.assertFinished()
return b.Bytes[b.Head():]
}
func (b *Builder) StartObject(numfields int) {
b.assertNotNested()
b.nested = true
if cap(b.vtable) < numfields || b.vtable == nil {
b.vtable = make([]UOffsetT, numfields)
} else {
b.vtable = b.vtable[:numfields]
for i := 0; i < len(b.vtable); i++ {
b.vtable[i] = 0
}
}
b.objectEnd = b.Offset()
}
func (b *Builder) WriteVtable() (n UOffsetT) {
b.PrependSOffsetT(0)
objectOffset := b.Offset()
existingVtable := UOffsetT(0)
i := len(b.vtable) - 1
for ; i >= 0 && b.vtable[i] == 0; i-- {
}
b.vtable = b.vtable[:i+1]
for i := len(b.vtables) - 1; i >= 0; i-- {
vt2Offset := b.vtables[i]
vt2Start := len(b.Bytes) - int(vt2Offset)
vt2Len := GetVOffsetT(b.Bytes[vt2Start:])
metadata := VtableMetadataFields * SizeVOffsetT
vt2End := vt2Start + int(vt2Len)
vt2 := b.Bytes[vt2Start+metadata : vt2End]
if vtableEqual(b.vtable, objectOffset, vt2) {
existingVtable = vt2Offset
break
}
}
if existingVtable == 0 {
for i := len(b.vtable) - 1; i >= 0; i-- {
var off UOffsetT
if b.vtable[i] != 0 {
off = objectOffset - b.vtable[i]
}
b.PrependVOffsetT(VOffsetT(off))
}
objectSize := objectOffset - b.objectEnd
b.PrependVOffsetT(VOffsetT(objectSize))
vBytes := (len(b.vtable) + VtableMetadataFields) * SizeVOffsetT
b.PrependVOffsetT(VOffsetT(vBytes))
objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset)
WriteSOffsetT(b.Bytes[objectStart:],
SOffsetT(b.Offset())-SOffsetT(objectOffset))
b.vtables = append(b.vtables, b.Offset())
} else {
objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset)
b.head = UOffsetT(objectStart)
WriteSOffsetT(b.Bytes[b.head:],
SOffsetT(existingVtable)-SOffsetT(objectOffset))
}
b.vtable = b.vtable[:0]
return objectOffset
}
func (b *Builder) EndObject() UOffsetT {
b.assertNested()
n := b.WriteVtable()
b.nested = false
return n
}
func (b *Builder) growByteBuffer() {
if (int64(len(b.Bytes)) & int64(0xC0000000)) != 0 {
panic("cannot grow buffer beyond 2 gigabytes")
}
newLen := len(b.Bytes) * 2
if newLen == 0 {
newLen = 1
}
if cap(b.Bytes) >= newLen {
b.Bytes = b.Bytes[:newLen]
} else {
extension := make([]byte, newLen-len(b.Bytes))
b.Bytes = append(b.Bytes, extension...)
}
middle := newLen / 2
copy(b.Bytes[middle:], b.Bytes[:middle])
}
func (b *Builder) Head() UOffsetT {
return b.head
}
func (b *Builder) Offset() UOffsetT {
return UOffsetT(len(b.Bytes)) - b.head
}
func (b *Builder) Pad(n int) {
for i := 0; i < n; i++ {
b.PlaceByte(0)
}
}
func (b *Builder) Prep(size, additionalBytes int) {
if size > b.minalign {
b.minalign = size
}
alignSize := (^(len(b.Bytes) - int(b.Head()) + additionalBytes)) + 1
alignSize &= (size - 1)
for int(b.head) <= alignSize+size+additionalBytes {
oldBufSize := len(b.Bytes)
b.growByteBuffer()
b.head += UOffsetT(len(b.Bytes) - oldBufSize)
}
b.Pad(alignSize)
}
func (b *Builder) PrependSOffsetT(off SOffsetT) {
b.Prep(SizeSOffsetT, 0) if !(UOffsetT(off) <= b.Offset()) {
panic("unreachable: off <= b.Offset()")
}
off2 := SOffsetT(b.Offset()) - off + SOffsetT(SizeSOffsetT)
b.PlaceSOffsetT(off2)
}
func (b *Builder) PrependUOffsetT(off UOffsetT) {
b.Prep(SizeUOffsetT, 0) if !(off <= b.Offset()) {
panic("unreachable: off <= b.Offset()")
}
off2 := b.Offset() - off + UOffsetT(SizeUOffsetT)
b.PlaceUOffsetT(off2)
}
func (b *Builder) StartVector(elemSize, numElems, alignment int) UOffsetT {
b.assertNotNested()
b.nested = true
b.Prep(SizeUint32, elemSize*numElems)
b.Prep(alignment, elemSize*numElems) return b.Offset()
}
func (b *Builder) EndVector(vectorNumElems int) UOffsetT {
b.assertNested()
b.PlaceUOffsetT(UOffsetT(vectorNumElems))
b.nested = false
return b.Offset()
}
func (b *Builder) CreateVectorOfTables(offsets []UOffsetT) UOffsetT {
b.assertNotNested()
b.StartVector(4, len(offsets), 4)
for i := len(offsets) - 1; i >= 0; i-- {
b.PrependUOffsetT(offsets[i])
}
return b.EndVector(len(offsets))
}
type KeyCompare func(o1, o2 UOffsetT, buf []byte) bool
func (b *Builder) CreateVectorOfSortedTables(offsets []UOffsetT, keyCompare KeyCompare) UOffsetT {
sort.Slice(offsets, func(i, j int) bool {
return keyCompare(offsets[i], offsets[j], b.Bytes)
})
return b.CreateVectorOfTables(offsets)
}
func (b *Builder) CreateSharedString(s string) UOffsetT {
if b.sharedStrings == nil {
b.sharedStrings = make(map[string]UOffsetT)
}
if v, ok := b.sharedStrings[s]; ok {
return v
}
off := b.CreateString(s)
b.sharedStrings[s] = off
return off
}
func (b *Builder) CreateString(s string) UOffsetT {
b.assertNotNested()
b.nested = true
b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte)
b.PlaceByte(0)
l := UOffsetT(len(s))
b.head -= l
copy(b.Bytes[b.head:b.head+l], s)
return b.EndVector(len(s))
}
func (b *Builder) CreateByteString(s []byte) UOffsetT {
b.assertNotNested()
b.nested = true
b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte)
b.PlaceByte(0)
l := UOffsetT(len(s))
b.head -= l
copy(b.Bytes[b.head:b.head+l], s)
return b.EndVector(len(s))
}
func (b *Builder) CreateByteVector(v []byte) UOffsetT {
b.assertNotNested()
b.nested = true
b.Prep(int(SizeUOffsetT), len(v)*SizeByte)
l := UOffsetT(len(v))
b.head -= l
copy(b.Bytes[b.head:b.head+l], v)
return b.EndVector(len(v))
}
func (b *Builder) assertNested() {
if !b.nested {
panic("Incorrect creation order: must be inside object.")
}
}
func (b *Builder) assertNotNested() {
if b.nested {
panic("Incorrect creation order: object must not be nested.")
}
}
func (b *Builder) assertFinished() {
if !b.finished {
panic("Incorrect use of FinishedBytes(): must call 'Finish' first.")
}
}
func (b *Builder) PrependBoolSlot(o int, x, d bool) {
val := byte(0)
if x {
val = 1
}
def := byte(0)
if d {
def = 1
}
b.PrependByteSlot(o, val, def)
}
func (b *Builder) PrependByteSlot(o int, x, d byte) {
if x != d {
b.PrependByte(x)
b.Slot(o)
}
}
func (b *Builder) PrependUint8Slot(o int, x, d uint8) {
if x != d {
b.PrependUint8(x)
b.Slot(o)
}
}
func (b *Builder) PrependUint16Slot(o int, x, d uint16) {
if x != d {
b.PrependUint16(x)
b.Slot(o)
}
}
func (b *Builder) PrependUint32Slot(o int, x, d uint32) {
if x != d {
b.PrependUint32(x)
b.Slot(o)
}
}
func (b *Builder) PrependUint64Slot(o int, x, d uint64) {
if x != d {
b.PrependUint64(x)
b.Slot(o)
}
}
func (b *Builder) PrependInt8Slot(o int, x, d int8) {
if x != d {
b.PrependInt8(x)
b.Slot(o)
}
}
func (b *Builder) PrependInt16Slot(o int, x, d int16) {
if x != d {
b.PrependInt16(x)
b.Slot(o)
}
}
func (b *Builder) PrependInt32Slot(o int, x, d int32) {
if x != d {
b.PrependInt32(x)
b.Slot(o)
}
}
func (b *Builder) PrependInt64Slot(o int, x, d int64) {
if x != d {
b.PrependInt64(x)
b.Slot(o)
}
}
func (b *Builder) PrependFloat32Slot(o int, x, d float32) {
if x != d {
b.PrependFloat32(x)
b.Slot(o)
}
}
func (b *Builder) PrependFloat64Slot(o int, x, d float64) {
if x != d {
b.PrependFloat64(x)
b.Slot(o)
}
}
func (b *Builder) PrependUOffsetTSlot(o int, x, d UOffsetT) {
if x != d {
b.PrependUOffsetT(x)
b.Slot(o)
}
}
func (b *Builder) PrependStructSlot(voffset int, x, d UOffsetT) {
if x != d {
b.assertNested()
if x != b.Offset() {
panic("inline data write outside of object")
}
b.Slot(voffset)
}
}
func (b *Builder) Slot(slotnum int) {
b.vtable[slotnum] = UOffsetT(b.Offset())
}
func (b *Builder) FinishWithFileIdentifier(rootTable UOffsetT, fid []byte) {
if fid == nil || len(fid) != fileIdentifierLength {
panic("incorrect file identifier length")
}
b.Prep(b.minalign, SizeInt32+fileIdentifierLength)
for i := fileIdentifierLength - 1; i >= 0; i-- {
b.PlaceByte(fid[i])
}
b.Finish(rootTable)
}
func (b *Builder) FinishSizePrefixed(rootTable UOffsetT) {
b.finish(rootTable, true)
}
func (b *Builder) FinishSizePrefixedWithFileIdentifier(rootTable UOffsetT, fid []byte) {
if fid == nil || len(fid) != fileIdentifierLength {
panic("incorrect file identifier length")
}
b.Prep(b.minalign, SizeInt32+fileIdentifierLength+sizePrefixLength)
for i := fileIdentifierLength - 1; i >= 0; i-- {
b.PlaceByte(fid[i])
}
b.finish(rootTable, true)
}
func (b *Builder) Finish(rootTable UOffsetT) {
b.finish(rootTable, false)
}
func (b *Builder) finish(rootTable UOffsetT, sizePrefix bool) {
b.assertNotNested()
if sizePrefix {
b.Prep(b.minalign, SizeUOffsetT+sizePrefixLength)
} else {
b.Prep(b.minalign, SizeUOffsetT)
}
b.PrependUOffsetT(rootTable)
if sizePrefix {
b.PlaceUint32(uint32(b.Offset()))
}
b.finished = true
}
func vtableEqual(a []UOffsetT, objectStart UOffsetT, b []byte) bool {
if len(a)*SizeVOffsetT != len(b) {
return false
}
for i := 0; i < len(a); i++ {
x := GetVOffsetT(b[i*SizeVOffsetT : (i+1)*SizeVOffsetT])
if x == 0 && a[i] == 0 {
continue
}
y := SOffsetT(objectStart) - SOffsetT(a[i])
if SOffsetT(x) != y {
return false
}
}
return true
}
func (b *Builder) PrependBool(x bool) {
b.Prep(SizeBool, 0)
b.PlaceBool(x)
}
func (b *Builder) PrependUint8(x uint8) {
b.Prep(SizeUint8, 0)
b.PlaceUint8(x)
}
func (b *Builder) PrependUint16(x uint16) {
b.Prep(SizeUint16, 0)
b.PlaceUint16(x)
}
func (b *Builder) PrependUint32(x uint32) {
b.Prep(SizeUint32, 0)
b.PlaceUint32(x)
}
func (b *Builder) PrependUint64(x uint64) {
b.Prep(SizeUint64, 0)
b.PlaceUint64(x)
}
func (b *Builder) PrependInt8(x int8) {
b.Prep(SizeInt8, 0)
b.PlaceInt8(x)
}
func (b *Builder) PrependInt16(x int16) {
b.Prep(SizeInt16, 0)
b.PlaceInt16(x)
}
func (b *Builder) PrependInt32(x int32) {
b.Prep(SizeInt32, 0)
b.PlaceInt32(x)
}
func (b *Builder) PrependInt64(x int64) {
b.Prep(SizeInt64, 0)
b.PlaceInt64(x)
}
func (b *Builder) PrependFloat32(x float32) {
b.Prep(SizeFloat32, 0)
b.PlaceFloat32(x)
}
func (b *Builder) PrependFloat64(x float64) {
b.Prep(SizeFloat64, 0)
b.PlaceFloat64(x)
}
func (b *Builder) PrependByte(x byte) {
b.Prep(SizeByte, 0)
b.PlaceByte(x)
}
func (b *Builder) PrependVOffsetT(x VOffsetT) {
b.Prep(SizeVOffsetT, 0)
b.PlaceVOffsetT(x)
}
func (b *Builder) PlaceBool(x bool) {
b.head -= UOffsetT(SizeBool)
WriteBool(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceUint8(x uint8) {
b.head -= UOffsetT(SizeUint8)
WriteUint8(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceUint16(x uint16) {
b.head -= UOffsetT(SizeUint16)
WriteUint16(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceUint32(x uint32) {
b.head -= UOffsetT(SizeUint32)
WriteUint32(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceUint64(x uint64) {
b.head -= UOffsetT(SizeUint64)
WriteUint64(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceInt8(x int8) {
b.head -= UOffsetT(SizeInt8)
WriteInt8(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceInt16(x int16) {
b.head -= UOffsetT(SizeInt16)
WriteInt16(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceInt32(x int32) {
b.head -= UOffsetT(SizeInt32)
WriteInt32(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceInt64(x int64) {
b.head -= UOffsetT(SizeInt64)
WriteInt64(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceFloat32(x float32) {
b.head -= UOffsetT(SizeFloat32)
WriteFloat32(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceFloat64(x float64) {
b.head -= UOffsetT(SizeFloat64)
WriteFloat64(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceByte(x byte) {
b.head -= UOffsetT(SizeByte)
WriteByte(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceVOffsetT(x VOffsetT) {
b.head -= UOffsetT(SizeVOffsetT)
WriteVOffsetT(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceSOffsetT(x SOffsetT) {
b.head -= UOffsetT(SizeSOffsetT)
WriteSOffsetT(b.Bytes[b.head:], x)
}
func (b *Builder) PlaceUOffsetT(x UOffsetT) {
b.head -= UOffsetT(SizeUOffsetT)
WriteUOffsetT(b.Bytes[b.head:], x)
}