use core::mem;
use core::cmp::Ordering;
extern crate flatbuffers;
use self::flatbuffers::{EndianScalar, Follow};
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_COMPRESSION: u8 = 0;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_COMPRESSION: u8 = 1;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_COMPRESSION: [Compression; 2] = [
Compression::None,
Compression::LZ4,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct Compression(pub u8);
#[allow(non_upper_case_globals)]
impl Compression {
pub const None: Self = Self(0);
pub const LZ4: Self = Self(1);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 1;
pub const ENUM_VALUES: &'static [Self] = &[
Self::None,
Self::LZ4,
];
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::None => Some("None"),
Self::LZ4 => Some("LZ4"),
_ => None,
}
}
}
impl core::fmt::Debug for Compression {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for Compression {
type Inner = Self;
#[inline]
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = unsafe { flatbuffers::read_scalar_at::<u8>(buf, loc) };
Self(b)
}
}
impl flatbuffers::Push for Compression {
type Output = Compression;
#[inline]
unsafe fn push(&self, dst: &mut [u8], _written_len: usize) {
unsafe { flatbuffers::emplace_scalar::<u8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for Compression {
type Scalar = u8;
#[inline]
fn to_little_endian(self) -> u8 {
self.0.to_le()
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(v: u8) -> Self {
let b = u8::from_le(v);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for Compression {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for Compression {}
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_PRECISION: u8 = 0;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_PRECISION: u8 = 1;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_PRECISION: [Precision; 2] = [
Precision::Inexact,
Precision::Exact,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct Precision(pub u8);
#[allow(non_upper_case_globals)]
impl Precision {
pub const Inexact: Self = Self(0);
pub const Exact: Self = Self(1);
pub const ENUM_MIN: u8 = 0;
pub const ENUM_MAX: u8 = 1;
pub const ENUM_VALUES: &'static [Self] = &[
Self::Inexact,
Self::Exact,
];
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::Inexact => Some("Inexact"),
Self::Exact => Some("Exact"),
_ => None,
}
}
}
impl core::fmt::Debug for Precision {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for Precision {
type Inner = Self;
#[inline]
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = unsafe { flatbuffers::read_scalar_at::<u8>(buf, loc) };
Self(b)
}
}
impl flatbuffers::Push for Precision {
type Output = Precision;
#[inline]
unsafe fn push(&self, dst: &mut [u8], _written_len: usize) {
unsafe { flatbuffers::emplace_scalar::<u8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for Precision {
type Scalar = u8;
#[inline]
fn to_little_endian(self) -> u8 {
self.0.to_le()
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(v: u8) -> Self {
let b = u8::from_le(v);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for Precision {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
u8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for Precision {}
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct Buffer(pub [u8; 8]);
impl Default for Buffer {
fn default() -> Self {
Self([0; 8])
}
}
impl core::fmt::Debug for Buffer {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("Buffer")
.field("padding", &self.padding())
.field("alignment_exponent", &self.alignment_exponent())
.field("compression", &self.compression())
.field("length", &self.length())
.finish()
}
}
impl flatbuffers::SimpleToVerifyInSlice for Buffer {}
impl<'a> flatbuffers::Follow<'a> for Buffer {
type Inner = &'a Buffer;
#[inline]
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
unsafe { <&'a Buffer>::follow(buf, loc) }
}
}
impl<'a> flatbuffers::Follow<'a> for &'a Buffer {
type Inner = &'a Buffer;
#[inline]
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
unsafe { flatbuffers::follow_cast_ref::<Buffer>(buf, loc) }
}
}
impl<'b> flatbuffers::Push for Buffer {
type Output = Buffer;
#[inline]
unsafe fn push(&self, dst: &mut [u8], _written_len: usize) {
let src = unsafe { ::core::slice::from_raw_parts(self as *const Buffer as *const u8, <Self as flatbuffers::Push>::size()) };
dst.copy_from_slice(src);
}
#[inline]
fn alignment() -> flatbuffers::PushAlignment {
flatbuffers::PushAlignment::new(4)
}
}
impl<'a> flatbuffers::Verifiable for Buffer {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
v.in_buffer::<Self>(pos)
}
}
impl<'a> Buffer {
#[allow(clippy::too_many_arguments)]
pub fn new(
padding: u16,
alignment_exponent: u8,
compression: Compression,
length: u32,
) -> Self {
let mut s = Self([0; 8]);
s.set_padding(padding);
s.set_alignment_exponent(alignment_exponent);
s.set_compression(compression);
s.set_length(length);
s
}
pub fn padding(&self) -> u16 {
let mut mem = core::mem::MaybeUninit::<<u16 as EndianScalar>::Scalar>::uninit();
EndianScalar::from_little_endian(unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<<u16 as EndianScalar>::Scalar>(),
);
mem.assume_init()
})
}
pub fn set_padding(&mut self, x: u16) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const _ as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<<u16 as EndianScalar>::Scalar>(),
);
}
}
pub fn alignment_exponent(&self) -> u8 {
let mut mem = core::mem::MaybeUninit::<<u8 as EndianScalar>::Scalar>::uninit();
EndianScalar::from_little_endian(unsafe {
core::ptr::copy_nonoverlapping(
self.0[2..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<<u8 as EndianScalar>::Scalar>(),
);
mem.assume_init()
})
}
pub fn set_alignment_exponent(&mut self, x: u8) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const _ as *const u8,
self.0[2..].as_mut_ptr(),
core::mem::size_of::<<u8 as EndianScalar>::Scalar>(),
);
}
}
pub fn compression(&self) -> Compression {
let mut mem = core::mem::MaybeUninit::<<Compression as EndianScalar>::Scalar>::uninit();
EndianScalar::from_little_endian(unsafe {
core::ptr::copy_nonoverlapping(
self.0[3..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<<Compression as EndianScalar>::Scalar>(),
);
mem.assume_init()
})
}
pub fn set_compression(&mut self, x: Compression) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const _ as *const u8,
self.0[3..].as_mut_ptr(),
core::mem::size_of::<<Compression as EndianScalar>::Scalar>(),
);
}
}
pub fn length(&self) -> u32 {
let mut mem = core::mem::MaybeUninit::<<u32 as EndianScalar>::Scalar>::uninit();
EndianScalar::from_little_endian(unsafe {
core::ptr::copy_nonoverlapping(
self.0[4..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<<u32 as EndianScalar>::Scalar>(),
);
mem.assume_init()
})
}
pub fn set_length(&mut self, x: u32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const _ as *const u8,
self.0[4..].as_mut_ptr(),
core::mem::size_of::<<u32 as EndianScalar>::Scalar>(),
);
}
}
}
pub enum ArrayOffset {}
#[derive(Copy, Clone, PartialEq)]
pub struct Array<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for Array<'a> {
type Inner = Array<'a>;
#[inline]
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self { _tab: unsafe { flatbuffers::Table::new(buf, loc) } }
}
}
impl<'a> Array<'a> {
pub const VT_ROOT: flatbuffers::VOffsetT = 4;
pub const VT_BUFFERS: flatbuffers::VOffsetT = 6;
#[inline]
pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
Array { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
args: &'args ArrayArgs<'args>
) -> flatbuffers::WIPOffset<Array<'bldr>> {
let mut builder = ArrayBuilder::new(_fbb);
if let Some(x) = args.buffers { builder.add_buffers(x); }
if let Some(x) = args.root { builder.add_root(x); }
builder.finish()
}
#[inline]
pub fn root(&self) -> Option<ArrayNode<'a>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<ArrayNode>>(Array::VT_ROOT, None)}
}
#[inline]
pub fn buffers(&self) -> Option<flatbuffers::Vector<'a, Buffer>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, Buffer>>>(Array::VT_BUFFERS, None)}
}
}
impl flatbuffers::Verifiable for Array<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<flatbuffers::ForwardsUOffset<ArrayNode>>("root", Self::VT_ROOT, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, Buffer>>>("buffers", Self::VT_BUFFERS, false)?
.finish();
Ok(())
}
}
pub struct ArrayArgs<'a> {
pub root: Option<flatbuffers::WIPOffset<ArrayNode<'a>>>,
pub buffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, Buffer>>>,
}
impl<'a> Default for ArrayArgs<'a> {
#[inline]
fn default() -> Self {
ArrayArgs {
root: None,
buffers: None,
}
}
}
pub struct ArrayBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> ArrayBuilder<'a, 'b, A> {
#[inline]
pub fn add_root(&mut self, root: flatbuffers::WIPOffset<ArrayNode<'b >>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<ArrayNode>>(Array::VT_ROOT, root);
}
#[inline]
pub fn add_buffers(&mut self, buffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b , Buffer>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Array::VT_BUFFERS, buffers);
}
#[inline]
pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> ArrayBuilder<'a, 'b, A> {
let start = _fbb.start_table();
ArrayBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<Array<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl core::fmt::Debug for Array<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut ds = f.debug_struct("Array");
ds.field("root", &self.root());
ds.field("buffers", &self.buffers());
ds.finish()
}
}
pub enum ArrayNodeOffset {}
#[derive(Copy, Clone, PartialEq)]
pub struct ArrayNode<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for ArrayNode<'a> {
type Inner = ArrayNode<'a>;
#[inline]
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self { _tab: unsafe { flatbuffers::Table::new(buf, loc) } }
}
}
impl<'a> ArrayNode<'a> {
pub const VT_ENCODING: flatbuffers::VOffsetT = 4;
pub const VT_METADATA: flatbuffers::VOffsetT = 6;
pub const VT_CHILDREN: flatbuffers::VOffsetT = 8;
pub const VT_BUFFERS: flatbuffers::VOffsetT = 10;
pub const VT_STATS: flatbuffers::VOffsetT = 12;
#[inline]
pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
ArrayNode { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
args: &'args ArrayNodeArgs<'args>
) -> flatbuffers::WIPOffset<ArrayNode<'bldr>> {
let mut builder = ArrayNodeBuilder::new(_fbb);
if let Some(x) = args.stats { builder.add_stats(x); }
if let Some(x) = args.buffers { builder.add_buffers(x); }
if let Some(x) = args.children { builder.add_children(x); }
if let Some(x) = args.metadata { builder.add_metadata(x); }
builder.add_encoding(args.encoding);
builder.finish()
}
#[inline]
pub fn encoding(&self) -> u16 {
unsafe { self._tab.get::<u16>(ArrayNode::VT_ENCODING, Some(0)).unwrap()}
}
#[inline]
pub fn metadata(&self) -> Option<flatbuffers::Vector<'a, u8>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayNode::VT_METADATA, None)}
}
#[inline]
pub fn children(&self) -> Option<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<ArrayNode<'a>>>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<ArrayNode>>>>(ArrayNode::VT_CHILDREN, None)}
}
#[inline]
pub fn buffers(&self) -> Option<flatbuffers::Vector<'a, u16>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u16>>>(ArrayNode::VT_BUFFERS, None)}
}
#[inline]
pub fn stats(&self) -> Option<ArrayStats<'a>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<ArrayStats>>(ArrayNode::VT_STATS, None)}
}
}
impl flatbuffers::Verifiable for ArrayNode<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<u16>("encoding", Self::VT_ENCODING, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("metadata", Self::VT_METADATA, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset<ArrayNode>>>>("children", Self::VT_CHILDREN, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u16>>>("buffers", Self::VT_BUFFERS, false)?
.visit_field::<flatbuffers::ForwardsUOffset<ArrayStats>>("stats", Self::VT_STATS, false)?
.finish();
Ok(())
}
}
pub struct ArrayNodeArgs<'a> {
pub encoding: u16,
pub metadata: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
pub children: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<ArrayNode<'a>>>>>,
pub buffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u16>>>,
pub stats: Option<flatbuffers::WIPOffset<ArrayStats<'a>>>,
}
impl<'a> Default for ArrayNodeArgs<'a> {
#[inline]
fn default() -> Self {
ArrayNodeArgs {
encoding: 0,
metadata: None,
children: None,
buffers: None,
stats: None,
}
}
}
pub struct ArrayNodeBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> ArrayNodeBuilder<'a, 'b, A> {
#[inline]
pub fn add_encoding(&mut self, encoding: u16) {
self.fbb_.push_slot::<u16>(ArrayNode::VT_ENCODING, encoding, 0);
}
#[inline]
pub fn add_metadata(&mut self, metadata: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayNode::VT_METADATA, metadata);
}
#[inline]
pub fn add_children(&mut self, children: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<ArrayNode<'b >>>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayNode::VT_CHILDREN, children);
}
#[inline]
pub fn add_buffers(&mut self, buffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u16>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayNode::VT_BUFFERS, buffers);
}
#[inline]
pub fn add_stats(&mut self, stats: flatbuffers::WIPOffset<ArrayStats<'b >>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<ArrayStats>>(ArrayNode::VT_STATS, stats);
}
#[inline]
pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> ArrayNodeBuilder<'a, 'b, A> {
let start = _fbb.start_table();
ArrayNodeBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<ArrayNode<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl core::fmt::Debug for ArrayNode<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut ds = f.debug_struct("ArrayNode");
ds.field("encoding", &self.encoding());
ds.field("metadata", &self.metadata());
ds.field("children", &self.children());
ds.field("buffers", &self.buffers());
ds.field("stats", &self.stats());
ds.finish()
}
}
pub enum ArrayStatsOffset {}
#[derive(Copy, Clone, PartialEq)]
pub struct ArrayStats<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for ArrayStats<'a> {
type Inner = ArrayStats<'a>;
#[inline]
unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self { _tab: unsafe { flatbuffers::Table::new(buf, loc) } }
}
}
impl<'a> ArrayStats<'a> {
pub const VT_MIN: flatbuffers::VOffsetT = 4;
pub const VT_MIN_PRECISION: flatbuffers::VOffsetT = 6;
pub const VT_MAX: flatbuffers::VOffsetT = 8;
pub const VT_MAX_PRECISION: flatbuffers::VOffsetT = 10;
pub const VT_SUM: flatbuffers::VOffsetT = 12;
pub const VT_IS_SORTED: flatbuffers::VOffsetT = 14;
pub const VT_IS_STRICT_SORTED: flatbuffers::VOffsetT = 16;
pub const VT_IS_CONSTANT: flatbuffers::VOffsetT = 18;
pub const VT_NULL_COUNT: flatbuffers::VOffsetT = 20;
pub const VT_UNCOMPRESSED_SIZE_IN_BYTES: flatbuffers::VOffsetT = 22;
pub const VT_NAN_COUNT: flatbuffers::VOffsetT = 24;
#[inline]
pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
ArrayStats { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
args: &'args ArrayStatsArgs<'args>
) -> flatbuffers::WIPOffset<ArrayStats<'bldr>> {
let mut builder = ArrayStatsBuilder::new(_fbb);
if let Some(x) = args.nan_count { builder.add_nan_count(x); }
if let Some(x) = args.uncompressed_size_in_bytes { builder.add_uncompressed_size_in_bytes(x); }
if let Some(x) = args.null_count { builder.add_null_count(x); }
if let Some(x) = args.sum { builder.add_sum(x); }
if let Some(x) = args.max { builder.add_max(x); }
if let Some(x) = args.min { builder.add_min(x); }
if let Some(x) = args.is_constant { builder.add_is_constant(x); }
if let Some(x) = args.is_strict_sorted { builder.add_is_strict_sorted(x); }
if let Some(x) = args.is_sorted { builder.add_is_sorted(x); }
builder.add_max_precision(args.max_precision);
builder.add_min_precision(args.min_precision);
builder.finish()
}
#[inline]
pub fn min(&self) -> Option<flatbuffers::Vector<'a, u8>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayStats::VT_MIN, None)}
}
#[inline]
pub fn min_precision(&self) -> Precision {
unsafe { self._tab.get::<Precision>(ArrayStats::VT_MIN_PRECISION, Some(Precision::Inexact)).unwrap()}
}
#[inline]
pub fn max(&self) -> Option<flatbuffers::Vector<'a, u8>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayStats::VT_MAX, None)}
}
#[inline]
pub fn max_precision(&self) -> Precision {
unsafe { self._tab.get::<Precision>(ArrayStats::VT_MAX_PRECISION, Some(Precision::Inexact)).unwrap()}
}
#[inline]
pub fn sum(&self) -> Option<flatbuffers::Vector<'a, u8>> {
unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayStats::VT_SUM, None)}
}
#[inline]
pub fn is_sorted(&self) -> Option<bool> {
unsafe { self._tab.get::<bool>(ArrayStats::VT_IS_SORTED, None)}
}
#[inline]
pub fn is_strict_sorted(&self) -> Option<bool> {
unsafe { self._tab.get::<bool>(ArrayStats::VT_IS_STRICT_SORTED, None)}
}
#[inline]
pub fn is_constant(&self) -> Option<bool> {
unsafe { self._tab.get::<bool>(ArrayStats::VT_IS_CONSTANT, None)}
}
#[inline]
pub fn null_count(&self) -> Option<u64> {
unsafe { self._tab.get::<u64>(ArrayStats::VT_NULL_COUNT, None)}
}
#[inline]
pub fn uncompressed_size_in_bytes(&self) -> Option<u64> {
unsafe { self._tab.get::<u64>(ArrayStats::VT_UNCOMPRESSED_SIZE_IN_BYTES, None)}
}
#[inline]
pub fn nan_count(&self) -> Option<u64> {
unsafe { self._tab.get::<u64>(ArrayStats::VT_NAN_COUNT, None)}
}
}
impl flatbuffers::Verifiable for ArrayStats<'_> {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
v.visit_table(pos)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("min", Self::VT_MIN, false)?
.visit_field::<Precision>("min_precision", Self::VT_MIN_PRECISION, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("max", Self::VT_MAX, false)?
.visit_field::<Precision>("max_precision", Self::VT_MAX_PRECISION, false)?
.visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("sum", Self::VT_SUM, false)?
.visit_field::<bool>("is_sorted", Self::VT_IS_SORTED, false)?
.visit_field::<bool>("is_strict_sorted", Self::VT_IS_STRICT_SORTED, false)?
.visit_field::<bool>("is_constant", Self::VT_IS_CONSTANT, false)?
.visit_field::<u64>("null_count", Self::VT_NULL_COUNT, false)?
.visit_field::<u64>("uncompressed_size_in_bytes", Self::VT_UNCOMPRESSED_SIZE_IN_BYTES, false)?
.visit_field::<u64>("nan_count", Self::VT_NAN_COUNT, false)?
.finish();
Ok(())
}
}
pub struct ArrayStatsArgs<'a> {
pub min: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
pub min_precision: Precision,
pub max: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
pub max_precision: Precision,
pub sum: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
pub is_sorted: Option<bool>,
pub is_strict_sorted: Option<bool>,
pub is_constant: Option<bool>,
pub null_count: Option<u64>,
pub uncompressed_size_in_bytes: Option<u64>,
pub nan_count: Option<u64>,
}
impl<'a> Default for ArrayStatsArgs<'a> {
#[inline]
fn default() -> Self {
ArrayStatsArgs {
min: None,
min_precision: Precision::Inexact,
max: None,
max_precision: Precision::Inexact,
sum: None,
is_sorted: None,
is_strict_sorted: None,
is_constant: None,
null_count: None,
uncompressed_size_in_bytes: None,
nan_count: None,
}
}
}
pub struct ArrayStatsBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> ArrayStatsBuilder<'a, 'b, A> {
#[inline]
pub fn add_min(&mut self, min: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayStats::VT_MIN, min);
}
#[inline]
pub fn add_min_precision(&mut self, min_precision: Precision) {
self.fbb_.push_slot::<Precision>(ArrayStats::VT_MIN_PRECISION, min_precision, Precision::Inexact);
}
#[inline]
pub fn add_max(&mut self, max: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayStats::VT_MAX, max);
}
#[inline]
pub fn add_max_precision(&mut self, max_precision: Precision) {
self.fbb_.push_slot::<Precision>(ArrayStats::VT_MAX_PRECISION, max_precision, Precision::Inexact);
}
#[inline]
pub fn add_sum(&mut self, sum: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayStats::VT_SUM, sum);
}
#[inline]
pub fn add_is_sorted(&mut self, is_sorted: bool) {
self.fbb_.push_slot_always::<bool>(ArrayStats::VT_IS_SORTED, is_sorted);
}
#[inline]
pub fn add_is_strict_sorted(&mut self, is_strict_sorted: bool) {
self.fbb_.push_slot_always::<bool>(ArrayStats::VT_IS_STRICT_SORTED, is_strict_sorted);
}
#[inline]
pub fn add_is_constant(&mut self, is_constant: bool) {
self.fbb_.push_slot_always::<bool>(ArrayStats::VT_IS_CONSTANT, is_constant);
}
#[inline]
pub fn add_null_count(&mut self, null_count: u64) {
self.fbb_.push_slot_always::<u64>(ArrayStats::VT_NULL_COUNT, null_count);
}
#[inline]
pub fn add_uncompressed_size_in_bytes(&mut self, uncompressed_size_in_bytes: u64) {
self.fbb_.push_slot_always::<u64>(ArrayStats::VT_UNCOMPRESSED_SIZE_IN_BYTES, uncompressed_size_in_bytes);
}
#[inline]
pub fn add_nan_count(&mut self, nan_count: u64) {
self.fbb_.push_slot_always::<u64>(ArrayStats::VT_NAN_COUNT, nan_count);
}
#[inline]
pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> ArrayStatsBuilder<'a, 'b, A> {
let start = _fbb.start_table();
ArrayStatsBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<ArrayStats<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
impl core::fmt::Debug for ArrayStats<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut ds = f.debug_struct("ArrayStats");
ds.field("min", &self.min());
ds.field("min_precision", &self.min_precision());
ds.field("max", &self.max());
ds.field("max_precision", &self.max_precision());
ds.field("sum", &self.sum());
ds.field("is_sorted", &self.is_sorted());
ds.field("is_strict_sorted", &self.is_strict_sorted());
ds.field("is_constant", &self.is_constant());
ds.field("null_count", &self.null_count());
ds.field("uncompressed_size_in_bytes", &self.uncompressed_size_in_bytes());
ds.field("nan_count", &self.nan_count());
ds.finish()
}
}
#[inline]
pub fn root_as_array(buf: &[u8]) -> Result<Array, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root::<Array>(buf)
}
#[inline]
pub fn size_prefixed_root_as_array(buf: &[u8]) -> Result<Array, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root::<Array>(buf)
}
#[inline]
pub fn root_as_array_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<Array<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::root_with_opts::<Array<'b>>(opts, buf)
}
#[inline]
pub fn size_prefixed_root_as_array_with_opts<'b, 'o>(
opts: &'o flatbuffers::VerifierOptions,
buf: &'b [u8],
) -> Result<Array<'b>, flatbuffers::InvalidFlatbuffer> {
flatbuffers::size_prefixed_root_with_opts::<Array<'b>>(opts, buf)
}
#[inline]
pub unsafe fn root_as_array_unchecked(buf: &[u8]) -> Array {
unsafe { flatbuffers::root_unchecked::<Array>(buf) }
}
#[inline]
pub unsafe fn size_prefixed_root_as_array_unchecked(buf: &[u8]) -> Array {
unsafe { flatbuffers::size_prefixed_root_unchecked::<Array>(buf) }
}
#[inline]
pub fn finish_array_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
root: flatbuffers::WIPOffset<Array<'a>>) {
fbb.finish(root, None);
}
#[inline]
pub fn finish_size_prefixed_array_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>(fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, root: flatbuffers::WIPOffset<Array<'a>>) {
fbb.finish_size_prefixed(root, None);
}