#![allow(dead_code)]
#![allow(unused_imports)]
use crate::ipc::gen::Schema::*;
use crate::ipc::gen::Tensor::*;
use flatbuffers::EndianScalar;
use std::{cmp::Ordering, mem};
#[allow(non_camel_case_types)]
#[repr(i16)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum SparseMatrixCompressedAxis {
Row = 0,
Column = 1,
}
const ENUM_MIN_SPARSE_MATRIX_COMPRESSED_AXIS: i16 = 0;
const ENUM_MAX_SPARSE_MATRIX_COMPRESSED_AXIS: i16 = 1;
impl<'a> flatbuffers::Follow<'a> for SparseMatrixCompressedAxis {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
flatbuffers::read_scalar_at::<Self>(buf, loc)
}
}
impl flatbuffers::EndianScalar for SparseMatrixCompressedAxis {
#[inline]
fn to_little_endian(self) -> Self {
let n = i16::to_le(self as i16);
let p = &n as *const i16 as *const SparseMatrixCompressedAxis;
unsafe { *p }
}
#[inline]
fn from_little_endian(self) -> Self {
let n = i16::from_le(self as i16);
let p = &n as *const i16 as *const SparseMatrixCompressedAxis;
unsafe { *p }
}
}
impl flatbuffers::Push for SparseMatrixCompressedAxis {
type Output = SparseMatrixCompressedAxis;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<SparseMatrixCompressedAxis>(dst, *self);
}
}
#[allow(non_camel_case_types)]
const ENUM_VALUES_SPARSE_MATRIX_COMPRESSED_AXIS: [SparseMatrixCompressedAxis; 2] = [
SparseMatrixCompressedAxis::Row,
SparseMatrixCompressedAxis::Column,
];
#[allow(non_camel_case_types)]
const ENUM_NAMES_SPARSE_MATRIX_COMPRESSED_AXIS: [&'static str; 2] = ["Row", "Column"];
pub fn enum_name_sparse_matrix_compressed_axis(
e: SparseMatrixCompressedAxis,
) -> &'static str {
let index = e as i16;
ENUM_NAMES_SPARSE_MATRIX_COMPRESSED_AXIS[index as usize]
}
#[allow(non_camel_case_types)]
#[repr(u8)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum SparseTensorIndex {
NONE = 0,
SparseTensorIndexCOO = 1,
SparseMatrixIndexCSX = 2,
}
const ENUM_MIN_SPARSE_TENSOR_INDEX: u8 = 0;
const ENUM_MAX_SPARSE_TENSOR_INDEX: u8 = 2;
impl<'a> flatbuffers::Follow<'a> for SparseTensorIndex {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
flatbuffers::read_scalar_at::<Self>(buf, loc)
}
}
impl flatbuffers::EndianScalar for SparseTensorIndex {
#[inline]
fn to_little_endian(self) -> Self {
let n = u8::to_le(self as u8);
let p = &n as *const u8 as *const SparseTensorIndex;
unsafe { *p }
}
#[inline]
fn from_little_endian(self) -> Self {
let n = u8::from_le(self as u8);
let p = &n as *const u8 as *const SparseTensorIndex;
unsafe { *p }
}
}
impl flatbuffers::Push for SparseTensorIndex {
type Output = SparseTensorIndex;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
flatbuffers::emplace_scalar::<SparseTensorIndex>(dst, *self);
}
}
#[allow(non_camel_case_types)]
const ENUM_VALUES_SPARSE_TENSOR_INDEX: [SparseTensorIndex; 3] = [
SparseTensorIndex::NONE,
SparseTensorIndex::SparseTensorIndexCOO,
SparseTensorIndex::SparseMatrixIndexCSX,
];
#[allow(non_camel_case_types)]
const ENUM_NAMES_SPARSE_TENSOR_INDEX: [&'static str; 3] =
["NONE", "SparseTensorIndexCOO", "SparseMatrixIndexCSX"];
pub fn enum_name_sparse_tensor_index(e: SparseTensorIndex) -> &'static str {
let index = e as u8;
ENUM_NAMES_SPARSE_TENSOR_INDEX[index as usize]
}
pub struct SparseTensorIndexUnionTableOffset {}
pub enum SparseTensorIndexCOOOffset {}
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct SparseTensorIndexCOO<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for SparseTensorIndexCOO<'a> {
type Inner = SparseTensorIndexCOO<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf: buf, loc: loc },
}
}
}
impl<'a> SparseTensorIndexCOO<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
SparseTensorIndexCOO { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args SparseTensorIndexCOOArgs<'args>,
) -> flatbuffers::WIPOffset<SparseTensorIndexCOO<'bldr>> {
let mut builder = SparseTensorIndexCOOBuilder::new(_fbb);
if let Some(x) = args.indicesBuffer {
builder.add_indicesBuffer(x);
}
if let Some(x) = args.indicesStrides {
builder.add_indicesStrides(x);
}
if let Some(x) = args.indicesType {
builder.add_indicesType(x);
}
builder.finish()
}
pub const VT_INDICESTYPE: flatbuffers::VOffsetT = 4;
pub const VT_INDICESSTRIDES: flatbuffers::VOffsetT = 6;
pub const VT_INDICESBUFFER: flatbuffers::VOffsetT = 8;
#[inline]
pub fn indicesType(&self) -> Option<Int<'a>> {
self._tab.get::<flatbuffers::ForwardsUOffset<Int<'a>>>(
SparseTensorIndexCOO::VT_INDICESTYPE,
None,
)
}
#[inline]
pub fn indicesStrides(&self) -> Option<flatbuffers::Vector<'a, i64>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, i64>>>(
SparseTensorIndexCOO::VT_INDICESSTRIDES,
None,
)
}
#[inline]
pub fn indicesBuffer(&self) -> Option<&'a Buffer> {
self._tab
.get::<Buffer>(SparseTensorIndexCOO::VT_INDICESBUFFER, None)
}
}
pub struct SparseTensorIndexCOOArgs<'a> {
pub indicesType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indicesStrides: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, i64>>>,
pub indicesBuffer: Option<&'a Buffer>,
}
impl<'a> Default for SparseTensorIndexCOOArgs<'a> {
#[inline]
fn default() -> Self {
SparseTensorIndexCOOArgs {
indicesType: None,
indicesStrides: None,
indicesBuffer: None,
}
}
}
pub struct SparseTensorIndexCOOBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> SparseTensorIndexCOOBuilder<'a, 'b> {
#[inline]
pub fn add_indicesType(&mut self, indicesType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseTensorIndexCOO::VT_INDICESTYPE,
indicesType,
);
}
#[inline]
pub fn add_indicesStrides(
&mut self,
indicesStrides: flatbuffers::WIPOffset<flatbuffers::Vector<'b, i64>>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
SparseTensorIndexCOO::VT_INDICESSTRIDES,
indicesStrides,
);
}
#[inline]
pub fn add_indicesBuffer(&mut self, indicesBuffer: &'b Buffer) {
self.fbb_.push_slot_always::<&Buffer>(
SparseTensorIndexCOO::VT_INDICESBUFFER,
indicesBuffer,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> SparseTensorIndexCOOBuilder<'a, 'b> {
let start = _fbb.start_table();
SparseTensorIndexCOOBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<SparseTensorIndexCOO<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
pub enum SparseMatrixIndexCSXOffset {}
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct SparseMatrixIndexCSX<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for SparseMatrixIndexCSX<'a> {
type Inner = SparseMatrixIndexCSX<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf: buf, loc: loc },
}
}
}
impl<'a> SparseMatrixIndexCSX<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
SparseMatrixIndexCSX { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args SparseMatrixIndexCSXArgs<'args>,
) -> flatbuffers::WIPOffset<SparseMatrixIndexCSX<'bldr>> {
let mut builder = SparseMatrixIndexCSXBuilder::new(_fbb);
if let Some(x) = args.indicesBuffer {
builder.add_indicesBuffer(x);
}
if let Some(x) = args.indicesType {
builder.add_indicesType(x);
}
if let Some(x) = args.indptrBuffer {
builder.add_indptrBuffer(x);
}
if let Some(x) = args.indptrType {
builder.add_indptrType(x);
}
builder.add_compressedAxis(args.compressedAxis);
builder.finish()
}
pub const VT_COMPRESSEDAXIS: flatbuffers::VOffsetT = 4;
pub const VT_INDPTRTYPE: flatbuffers::VOffsetT = 6;
pub const VT_INDPTRBUFFER: flatbuffers::VOffsetT = 8;
pub const VT_INDICESTYPE: flatbuffers::VOffsetT = 10;
pub const VT_INDICESBUFFER: flatbuffers::VOffsetT = 12;
#[inline]
pub fn compressedAxis(&self) -> SparseMatrixCompressedAxis {
self._tab
.get::<SparseMatrixCompressedAxis>(
SparseMatrixIndexCSX::VT_COMPRESSEDAXIS,
Some(SparseMatrixCompressedAxis::Row),
)
.unwrap()
}
#[inline]
pub fn indptrType(&self) -> Option<Int<'a>> {
self._tab.get::<flatbuffers::ForwardsUOffset<Int<'a>>>(
SparseMatrixIndexCSX::VT_INDPTRTYPE,
None,
)
}
#[inline]
pub fn indptrBuffer(&self) -> Option<&'a Buffer> {
self._tab
.get::<Buffer>(SparseMatrixIndexCSX::VT_INDPTRBUFFER, None)
}
#[inline]
pub fn indicesType(&self) -> Option<Int<'a>> {
self._tab.get::<flatbuffers::ForwardsUOffset<Int<'a>>>(
SparseMatrixIndexCSX::VT_INDICESTYPE,
None,
)
}
#[inline]
pub fn indicesBuffer(&self) -> Option<&'a Buffer> {
self._tab
.get::<Buffer>(SparseMatrixIndexCSX::VT_INDICESBUFFER, None)
}
}
pub struct SparseMatrixIndexCSXArgs<'a> {
pub compressedAxis: SparseMatrixCompressedAxis,
pub indptrType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indptrBuffer: Option<&'a Buffer>,
pub indicesType: Option<flatbuffers::WIPOffset<Int<'a>>>,
pub indicesBuffer: Option<&'a Buffer>,
}
impl<'a> Default for SparseMatrixIndexCSXArgs<'a> {
#[inline]
fn default() -> Self {
SparseMatrixIndexCSXArgs {
compressedAxis: SparseMatrixCompressedAxis::Row,
indptrType: None,
indptrBuffer: None,
indicesType: None,
indicesBuffer: None,
}
}
}
pub struct SparseMatrixIndexCSXBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> SparseMatrixIndexCSXBuilder<'a, 'b> {
#[inline]
pub fn add_compressedAxis(&mut self, compressedAxis: SparseMatrixCompressedAxis) {
self.fbb_.push_slot::<SparseMatrixCompressedAxis>(
SparseMatrixIndexCSX::VT_COMPRESSEDAXIS,
compressedAxis,
SparseMatrixCompressedAxis::Row,
);
}
#[inline]
pub fn add_indptrType(&mut self, indptrType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseMatrixIndexCSX::VT_INDPTRTYPE,
indptrType,
);
}
#[inline]
pub fn add_indptrBuffer(&mut self, indptrBuffer: &'b Buffer) {
self.fbb_.push_slot_always::<&Buffer>(
SparseMatrixIndexCSX::VT_INDPTRBUFFER,
indptrBuffer,
);
}
#[inline]
pub fn add_indicesType(&mut self, indicesType: flatbuffers::WIPOffset<Int<'b>>) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Int>>(
SparseMatrixIndexCSX::VT_INDICESTYPE,
indicesType,
);
}
#[inline]
pub fn add_indicesBuffer(&mut self, indicesBuffer: &'b Buffer) {
self.fbb_.push_slot_always::<&Buffer>(
SparseMatrixIndexCSX::VT_INDICESBUFFER,
indicesBuffer,
);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> SparseMatrixIndexCSXBuilder<'a, 'b> {
let start = _fbb.start_table();
SparseMatrixIndexCSXBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<SparseMatrixIndexCSX<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
pub enum SparseTensorOffset {}
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct SparseTensor<'a> {
pub _tab: flatbuffers::Table<'a>,
}
impl<'a> flatbuffers::Follow<'a> for SparseTensor<'a> {
type Inner = SparseTensor<'a>;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
Self {
_tab: flatbuffers::Table { buf: buf, loc: loc },
}
}
}
impl<'a> SparseTensor<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
SparseTensor { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args SparseTensorArgs<'args>,
) -> flatbuffers::WIPOffset<SparseTensor<'bldr>> {
let mut builder = SparseTensorBuilder::new(_fbb);
builder.add_non_zero_length(args.non_zero_length);
if let Some(x) = args.data {
builder.add_data(x);
}
if let Some(x) = args.sparseIndex {
builder.add_sparseIndex(x);
}
if let Some(x) = args.shape {
builder.add_shape(x);
}
if let Some(x) = args.type_ {
builder.add_type_(x);
}
builder.add_sparseIndex_type(args.sparseIndex_type);
builder.add_type_type(args.type_type);
builder.finish()
}
pub const VT_TYPE_TYPE: flatbuffers::VOffsetT = 4;
pub const VT_TYPE_: flatbuffers::VOffsetT = 6;
pub const VT_SHAPE: flatbuffers::VOffsetT = 8;
pub const VT_NON_ZERO_LENGTH: flatbuffers::VOffsetT = 10;
pub const VT_SPARSEINDEX_TYPE: flatbuffers::VOffsetT = 12;
pub const VT_SPARSEINDEX: flatbuffers::VOffsetT = 14;
pub const VT_DATA: flatbuffers::VOffsetT = 16;
#[inline]
pub fn type_type(&self) -> Type {
self._tab
.get::<Type>(SparseTensor::VT_TYPE_TYPE, Some(Type::NONE))
.unwrap()
}
#[inline]
pub fn type_(&self) -> Option<flatbuffers::Table<'a>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(
SparseTensor::VT_TYPE_,
None,
)
}
#[inline]
pub fn shape(
&self,
) -> Option<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<TensorDim<'a>>>>
{
self._tab.get::<flatbuffers::ForwardsUOffset<
flatbuffers::Vector<flatbuffers::ForwardsUOffset<TensorDim<'a>>>,
>>(SparseTensor::VT_SHAPE, None)
}
#[inline]
pub fn non_zero_length(&self) -> i64 {
self._tab
.get::<i64>(SparseTensor::VT_NON_ZERO_LENGTH, Some(0))
.unwrap()
}
#[inline]
pub fn sparseIndex_type(&self) -> SparseTensorIndex {
self._tab
.get::<SparseTensorIndex>(
SparseTensor::VT_SPARSEINDEX_TYPE,
Some(SparseTensorIndex::NONE),
)
.unwrap()
}
#[inline]
pub fn sparseIndex(&self) -> Option<flatbuffers::Table<'a>> {
self._tab
.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(
SparseTensor::VT_SPARSEINDEX,
None,
)
}
#[inline]
pub fn data(&self) -> Option<&'a Buffer> {
self._tab.get::<Buffer>(SparseTensor::VT_DATA, None)
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_null(&self) -> Option<Null<'a>> {
if self.type_type() == Type::Null {
self.type_().map(|u| Null::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_int(&self) -> Option<Int<'a>> {
if self.type_type() == Type::Int {
self.type_().map(|u| Int::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_floating_point(&self) -> Option<FloatingPoint<'a>> {
if self.type_type() == Type::FloatingPoint {
self.type_().map(|u| FloatingPoint::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_binary(&self) -> Option<Binary<'a>> {
if self.type_type() == Type::Binary {
self.type_().map(|u| Binary::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_utf_8(&self) -> Option<Utf8<'a>> {
if self.type_type() == Type::Utf8 {
self.type_().map(|u| Utf8::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_bool(&self) -> Option<Bool<'a>> {
if self.type_type() == Type::Bool {
self.type_().map(|u| Bool::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_decimal(&self) -> Option<Decimal<'a>> {
if self.type_type() == Type::Decimal {
self.type_().map(|u| Decimal::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_date(&self) -> Option<Date<'a>> {
if self.type_type() == Type::Date {
self.type_().map(|u| Date::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_time(&self) -> Option<Time<'a>> {
if self.type_type() == Type::Time {
self.type_().map(|u| Time::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_timestamp(&self) -> Option<Timestamp<'a>> {
if self.type_type() == Type::Timestamp {
self.type_().map(|u| Timestamp::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_interval(&self) -> Option<Interval<'a>> {
if self.type_type() == Type::Interval {
self.type_().map(|u| Interval::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_list(&self) -> Option<List<'a>> {
if self.type_type() == Type::List {
self.type_().map(|u| List::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_struct_(&self) -> Option<Struct_<'a>> {
if self.type_type() == Type::Struct_ {
self.type_().map(|u| Struct_::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_union(&self) -> Option<Union<'a>> {
if self.type_type() == Type::Union {
self.type_().map(|u| Union::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_fixed_size_binary(&self) -> Option<FixedSizeBinary<'a>> {
if self.type_type() == Type::FixedSizeBinary {
self.type_().map(|u| FixedSizeBinary::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_fixed_size_list(&self) -> Option<FixedSizeList<'a>> {
if self.type_type() == Type::FixedSizeList {
self.type_().map(|u| FixedSizeList::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_map(&self) -> Option<Map<'a>> {
if self.type_type() == Type::Map {
self.type_().map(|u| Map::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_duration(&self) -> Option<Duration<'a>> {
if self.type_type() == Type::Duration {
self.type_().map(|u| Duration::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_large_binary(&self) -> Option<LargeBinary<'a>> {
if self.type_type() == Type::LargeBinary {
self.type_().map(|u| LargeBinary::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_large_utf_8(&self) -> Option<LargeUtf8<'a>> {
if self.type_type() == Type::LargeUtf8 {
self.type_().map(|u| LargeUtf8::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn type_as_large_list(&self) -> Option<LargeList<'a>> {
if self.type_type() == Type::LargeList {
self.type_().map(|u| LargeList::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn sparseIndex_as_sparse_tensor_index_coo(
&self,
) -> Option<SparseTensorIndexCOO<'a>> {
if self.sparseIndex_type() == SparseTensorIndex::SparseTensorIndexCOO {
self.sparseIndex()
.map(|u| SparseTensorIndexCOO::init_from_table(u))
} else {
None
}
}
#[inline]
#[allow(non_snake_case)]
pub fn sparseIndex_as_sparse_matrix_index_csx(
&self,
) -> Option<SparseMatrixIndexCSX<'a>> {
if self.sparseIndex_type() == SparseTensorIndex::SparseMatrixIndexCSX {
self.sparseIndex()
.map(|u| SparseMatrixIndexCSX::init_from_table(u))
} else {
None
}
}
}
pub struct SparseTensorArgs<'a> {
pub type_type: Type,
pub type_: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>,
pub shape: Option<
flatbuffers::WIPOffset<
flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<TensorDim<'a>>>,
>,
>,
pub non_zero_length: i64,
pub sparseIndex_type: SparseTensorIndex,
pub sparseIndex: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>,
pub data: Option<&'a Buffer>,
}
impl<'a> Default for SparseTensorArgs<'a> {
#[inline]
fn default() -> Self {
SparseTensorArgs {
type_type: Type::NONE,
type_: None,
shape: None,
non_zero_length: 0,
sparseIndex_type: SparseTensorIndex::NONE,
sparseIndex: None,
data: None,
}
}
}
pub struct SparseTensorBuilder<'a: 'b, 'b> {
fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,
start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
}
impl<'a: 'b, 'b> SparseTensorBuilder<'a, 'b> {
#[inline]
pub fn add_type_type(&mut self, type_type: Type) {
self.fbb_
.push_slot::<Type>(SparseTensor::VT_TYPE_TYPE, type_type, Type::NONE);
}
#[inline]
pub fn add_type_(
&mut self,
type_: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(SparseTensor::VT_TYPE_, type_);
}
#[inline]
pub fn add_shape(
&mut self,
shape: flatbuffers::WIPOffset<
flatbuffers::Vector<'b, flatbuffers::ForwardsUOffset<TensorDim<'b>>>,
>,
) {
self.fbb_
.push_slot_always::<flatbuffers::WIPOffset<_>>(SparseTensor::VT_SHAPE, shape);
}
#[inline]
pub fn add_non_zero_length(&mut self, non_zero_length: i64) {
self.fbb_
.push_slot::<i64>(SparseTensor::VT_NON_ZERO_LENGTH, non_zero_length, 0);
}
#[inline]
pub fn add_sparseIndex_type(&mut self, sparseIndex_type: SparseTensorIndex) {
self.fbb_.push_slot::<SparseTensorIndex>(
SparseTensor::VT_SPARSEINDEX_TYPE,
sparseIndex_type,
SparseTensorIndex::NONE,
);
}
#[inline]
pub fn add_sparseIndex(
&mut self,
sparseIndex: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>,
) {
self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(
SparseTensor::VT_SPARSEINDEX,
sparseIndex,
);
}
#[inline]
pub fn add_data(&mut self, data: &'b Buffer) {
self.fbb_
.push_slot_always::<&Buffer>(SparseTensor::VT_DATA, data);
}
#[inline]
pub fn new(
_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
) -> SparseTensorBuilder<'a, 'b> {
let start = _fbb.start_table();
SparseTensorBuilder {
fbb_: _fbb,
start_: start,
}
}
#[inline]
pub fn finish(self) -> flatbuffers::WIPOffset<SparseTensor<'a>> {
let o = self.fbb_.end_table(self.start_);
flatbuffers::WIPOffset::new(o.value())
}
}
#[inline]
pub fn get_root_as_sparse_tensor<'a>(buf: &'a [u8]) -> SparseTensor<'a> {
flatbuffers::get_root::<SparseTensor<'a>>(buf)
}
#[inline]
pub fn get_size_prefixed_root_as_sparse_tensor<'a>(buf: &'a [u8]) -> SparseTensor<'a> {
flatbuffers::get_size_prefixed_root::<SparseTensor<'a>>(buf)
}
#[inline]
pub fn finish_sparse_tensor_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<SparseTensor<'a>>,
) {
fbb.finish(root, None);
}
#[inline]
pub fn finish_size_prefixed_sparse_tensor_buffer<'a, 'b>(
fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,
root: flatbuffers::WIPOffset<SparseTensor<'a>>,
) {
fbb.finish_size_prefixed(root, None);
}