1use core::mem;
7use core::cmp::Ordering;
8
9extern crate flatbuffers;
10use self::flatbuffers::{EndianScalar, Follow};
11
12#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
13pub const ENUM_MIN_COMPRESSION: u8 = 0;
14#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
15pub const ENUM_MAX_COMPRESSION: u8 = 1;
16#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
17#[allow(non_camel_case_types)]
18pub const ENUM_VALUES_COMPRESSION: [Compression; 2] = [
19 Compression::None,
20 Compression::LZ4,
21];
22
23#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
25#[repr(transparent)]
26pub struct Compression(pub u8);
27#[allow(non_upper_case_globals)]
28impl Compression {
29 pub const None: Self = Self(0);
30 pub const LZ4: Self = Self(1);
31
32 pub const ENUM_MIN: u8 = 0;
33 pub const ENUM_MAX: u8 = 1;
34 pub const ENUM_VALUES: &'static [Self] = &[
35 Self::None,
36 Self::LZ4,
37 ];
38 pub fn variant_name(self) -> Option<&'static str> {
40 match self {
41 Self::None => Some("None"),
42 Self::LZ4 => Some("LZ4"),
43 _ => None,
44 }
45 }
46}
47impl core::fmt::Debug for Compression {
48 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
49 if let Some(name) = self.variant_name() {
50 f.write_str(name)
51 } else {
52 f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
53 }
54 }
55}
56impl<'a> flatbuffers::Follow<'a> for Compression {
57 type Inner = Self;
58 #[inline]
59 unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
60 let b = unsafe { flatbuffers::read_scalar_at::<u8>(buf, loc) };
61 Self(b)
62 }
63}
64
65impl flatbuffers::Push for Compression {
66 type Output = Compression;
67 #[inline]
68 unsafe fn push(&self, dst: &mut [u8], _written_len: usize) {
69 unsafe { flatbuffers::emplace_scalar::<u8>(dst, self.0); }
70 }
71}
72
73impl flatbuffers::EndianScalar for Compression {
74 type Scalar = u8;
75 #[inline]
76 fn to_little_endian(self) -> u8 {
77 self.0.to_le()
78 }
79 #[inline]
80 #[allow(clippy::wrong_self_convention)]
81 fn from_little_endian(v: u8) -> Self {
82 let b = u8::from_le(v);
83 Self(b)
84 }
85}
86
87impl<'a> flatbuffers::Verifiable for Compression {
88 #[inline]
89 fn run_verifier(
90 v: &mut flatbuffers::Verifier, pos: usize
91 ) -> Result<(), flatbuffers::InvalidFlatbuffer> {
92 use self::flatbuffers::Verifiable;
93 u8::run_verifier(v, pos)
94 }
95}
96
97impl flatbuffers::SimpleToVerifyInSlice for Compression {}
98#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
99pub const ENUM_MIN_PRECISION: u8 = 0;
100#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
101pub const ENUM_MAX_PRECISION: u8 = 1;
102#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
103#[allow(non_camel_case_types)]
104pub const ENUM_VALUES_PRECISION: [Precision; 2] = [
105 Precision::Inexact,
106 Precision::Exact,
107];
108
109#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
110#[repr(transparent)]
111pub struct Precision(pub u8);
112#[allow(non_upper_case_globals)]
113impl Precision {
114 pub const Inexact: Self = Self(0);
115 pub const Exact: Self = Self(1);
116
117 pub const ENUM_MIN: u8 = 0;
118 pub const ENUM_MAX: u8 = 1;
119 pub const ENUM_VALUES: &'static [Self] = &[
120 Self::Inexact,
121 Self::Exact,
122 ];
123 pub fn variant_name(self) -> Option<&'static str> {
125 match self {
126 Self::Inexact => Some("Inexact"),
127 Self::Exact => Some("Exact"),
128 _ => None,
129 }
130 }
131}
132impl core::fmt::Debug for Precision {
133 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
134 if let Some(name) = self.variant_name() {
135 f.write_str(name)
136 } else {
137 f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
138 }
139 }
140}
141impl<'a> flatbuffers::Follow<'a> for Precision {
142 type Inner = Self;
143 #[inline]
144 unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
145 let b = unsafe { flatbuffers::read_scalar_at::<u8>(buf, loc) };
146 Self(b)
147 }
148}
149
150impl flatbuffers::Push for Precision {
151 type Output = Precision;
152 #[inline]
153 unsafe fn push(&self, dst: &mut [u8], _written_len: usize) {
154 unsafe { flatbuffers::emplace_scalar::<u8>(dst, self.0); }
155 }
156}
157
158impl flatbuffers::EndianScalar for Precision {
159 type Scalar = u8;
160 #[inline]
161 fn to_little_endian(self) -> u8 {
162 self.0.to_le()
163 }
164 #[inline]
165 #[allow(clippy::wrong_self_convention)]
166 fn from_little_endian(v: u8) -> Self {
167 let b = u8::from_le(v);
168 Self(b)
169 }
170}
171
172impl<'a> flatbuffers::Verifiable for Precision {
173 #[inline]
174 fn run_verifier(
175 v: &mut flatbuffers::Verifier, pos: usize
176 ) -> Result<(), flatbuffers::InvalidFlatbuffer> {
177 use self::flatbuffers::Verifiable;
178 u8::run_verifier(v, pos)
179 }
180}
181
182impl flatbuffers::SimpleToVerifyInSlice for Precision {}
183#[repr(transparent)]
186#[derive(Clone, Copy, PartialEq)]
187pub struct Buffer(pub [u8; 8]);
188impl Default for Buffer {
189 fn default() -> Self {
190 Self([0; 8])
191 }
192}
193impl core::fmt::Debug for Buffer {
194 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
195 f.debug_struct("Buffer")
196 .field("padding", &self.padding())
197 .field("alignment_exponent", &self.alignment_exponent())
198 .field("compression", &self.compression())
199 .field("length", &self.length())
200 .finish()
201 }
202}
203
204impl flatbuffers::SimpleToVerifyInSlice for Buffer {}
205impl<'a> flatbuffers::Follow<'a> for Buffer {
206 type Inner = &'a Buffer;
207 #[inline]
208 unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
209 unsafe { <&'a Buffer>::follow(buf, loc) }
210 }
211}
212impl<'a> flatbuffers::Follow<'a> for &'a Buffer {
213 type Inner = &'a Buffer;
214 #[inline]
215 unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
216 unsafe { flatbuffers::follow_cast_ref::<Buffer>(buf, loc) }
217 }
218}
219impl<'b> flatbuffers::Push for Buffer {
220 type Output = Buffer;
221 #[inline]
222 unsafe fn push(&self, dst: &mut [u8], _written_len: usize) {
223 let src = unsafe { ::core::slice::from_raw_parts(self as *const Buffer as *const u8, <Self as flatbuffers::Push>::size()) };
224 dst.copy_from_slice(src);
225 }
226 #[inline]
227 fn alignment() -> flatbuffers::PushAlignment {
228 flatbuffers::PushAlignment::new(4)
229 }
230}
231
232impl<'a> flatbuffers::Verifiable for Buffer {
233 #[inline]
234 fn run_verifier(
235 v: &mut flatbuffers::Verifier, pos: usize
236 ) -> Result<(), flatbuffers::InvalidFlatbuffer> {
237 use self::flatbuffers::Verifiable;
238 v.in_buffer::<Self>(pos)
239 }
240}
241
242impl<'a> Buffer {
243 #[allow(clippy::too_many_arguments)]
244 pub fn new(
245 padding: u16,
246 alignment_exponent: u8,
247 compression: Compression,
248 length: u32,
249 ) -> Self {
250 let mut s = Self([0; 8]);
251 s.set_padding(padding);
252 s.set_alignment_exponent(alignment_exponent);
253 s.set_compression(compression);
254 s.set_length(length);
255 s
256 }
257
258 pub fn padding(&self) -> u16 {
260 let mut mem = core::mem::MaybeUninit::<<u16 as EndianScalar>::Scalar>::uninit();
261 EndianScalar::from_little_endian(unsafe {
265 core::ptr::copy_nonoverlapping(
266 self.0[0..].as_ptr(),
267 mem.as_mut_ptr() as *mut u8,
268 core::mem::size_of::<<u16 as EndianScalar>::Scalar>(),
269 );
270 mem.assume_init()
271 })
272 }
273
274 pub fn set_padding(&mut self, x: u16) {
275 let x_le = x.to_little_endian();
276 unsafe {
280 core::ptr::copy_nonoverlapping(
281 &x_le as *const _ as *const u8,
282 self.0[0..].as_mut_ptr(),
283 core::mem::size_of::<<u16 as EndianScalar>::Scalar>(),
284 );
285 }
286 }
287
288 pub fn alignment_exponent(&self) -> u8 {
290 let mut mem = core::mem::MaybeUninit::<<u8 as EndianScalar>::Scalar>::uninit();
291 EndianScalar::from_little_endian(unsafe {
295 core::ptr::copy_nonoverlapping(
296 self.0[2..].as_ptr(),
297 mem.as_mut_ptr() as *mut u8,
298 core::mem::size_of::<<u8 as EndianScalar>::Scalar>(),
299 );
300 mem.assume_init()
301 })
302 }
303
304 pub fn set_alignment_exponent(&mut self, x: u8) {
305 let x_le = x.to_little_endian();
306 unsafe {
310 core::ptr::copy_nonoverlapping(
311 &x_le as *const _ as *const u8,
312 self.0[2..].as_mut_ptr(),
313 core::mem::size_of::<<u8 as EndianScalar>::Scalar>(),
314 );
315 }
316 }
317
318 pub fn compression(&self) -> Compression {
320 let mut mem = core::mem::MaybeUninit::<<Compression as EndianScalar>::Scalar>::uninit();
321 EndianScalar::from_little_endian(unsafe {
325 core::ptr::copy_nonoverlapping(
326 self.0[3..].as_ptr(),
327 mem.as_mut_ptr() as *mut u8,
328 core::mem::size_of::<<Compression as EndianScalar>::Scalar>(),
329 );
330 mem.assume_init()
331 })
332 }
333
334 pub fn set_compression(&mut self, x: Compression) {
335 let x_le = x.to_little_endian();
336 unsafe {
340 core::ptr::copy_nonoverlapping(
341 &x_le as *const _ as *const u8,
342 self.0[3..].as_mut_ptr(),
343 core::mem::size_of::<<Compression as EndianScalar>::Scalar>(),
344 );
345 }
346 }
347
348 pub fn length(&self) -> u32 {
350 let mut mem = core::mem::MaybeUninit::<<u32 as EndianScalar>::Scalar>::uninit();
351 EndianScalar::from_little_endian(unsafe {
355 core::ptr::copy_nonoverlapping(
356 self.0[4..].as_ptr(),
357 mem.as_mut_ptr() as *mut u8,
358 core::mem::size_of::<<u32 as EndianScalar>::Scalar>(),
359 );
360 mem.assume_init()
361 })
362 }
363
364 pub fn set_length(&mut self, x: u32) {
365 let x_le = x.to_little_endian();
366 unsafe {
370 core::ptr::copy_nonoverlapping(
371 &x_le as *const _ as *const u8,
372 self.0[4..].as_mut_ptr(),
373 core::mem::size_of::<<u32 as EndianScalar>::Scalar>(),
374 );
375 }
376 }
377
378}
379
380pub enum ArrayOffset {}
381#[derive(Copy, Clone, PartialEq)]
382
383pub struct Array<'a> {
386 pub _tab: flatbuffers::Table<'a>,
387}
388
389impl<'a> flatbuffers::Follow<'a> for Array<'a> {
390 type Inner = Array<'a>;
391 #[inline]
392 unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
393 Self { _tab: unsafe { flatbuffers::Table::new(buf, loc) } }
394 }
395}
396
397impl<'a> Array<'a> {
398 pub const VT_ROOT: flatbuffers::VOffsetT = 4;
399 pub const VT_BUFFERS: flatbuffers::VOffsetT = 6;
400
401 #[inline]
402 pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
403 Array { _tab: table }
404 }
405 #[allow(unused_mut)]
406 pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
407 _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
408 args: &'args ArrayArgs<'args>
409 ) -> flatbuffers::WIPOffset<Array<'bldr>> {
410 let mut builder = ArrayBuilder::new(_fbb);
411 if let Some(x) = args.buffers { builder.add_buffers(x); }
412 if let Some(x) = args.root { builder.add_root(x); }
413 builder.finish()
414 }
415
416
417 #[inline]
419 pub fn root(&self) -> Option<ArrayNode<'a>> {
420 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<ArrayNode>>(Array::VT_ROOT, None)}
424 }
425 #[inline]
427 pub fn buffers(&self) -> Option<flatbuffers::Vector<'a, Buffer>> {
428 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, Buffer>>>(Array::VT_BUFFERS, None)}
432 }
433}
434
435impl flatbuffers::Verifiable for Array<'_> {
436 #[inline]
437 fn run_verifier(
438 v: &mut flatbuffers::Verifier, pos: usize
439 ) -> Result<(), flatbuffers::InvalidFlatbuffer> {
440 use self::flatbuffers::Verifiable;
441 v.visit_table(pos)?
442 .visit_field::<flatbuffers::ForwardsUOffset<ArrayNode>>("root", Self::VT_ROOT, false)?
443 .visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, Buffer>>>("buffers", Self::VT_BUFFERS, false)?
444 .finish();
445 Ok(())
446 }
447}
448pub struct ArrayArgs<'a> {
449 pub root: Option<flatbuffers::WIPOffset<ArrayNode<'a>>>,
450 pub buffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, Buffer>>>,
451}
452impl<'a> Default for ArrayArgs<'a> {
453 #[inline]
454 fn default() -> Self {
455 ArrayArgs {
456 root: None,
457 buffers: None,
458 }
459 }
460}
461
462pub struct ArrayBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
463 fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
464 start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
465}
466impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> ArrayBuilder<'a, 'b, A> {
467 #[inline]
468 pub fn add_root(&mut self, root: flatbuffers::WIPOffset<ArrayNode<'b >>) {
469 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<ArrayNode>>(Array::VT_ROOT, root);
470 }
471 #[inline]
472 pub fn add_buffers(&mut self, buffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b , Buffer>>) {
473 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Array::VT_BUFFERS, buffers);
474 }
475 #[inline]
476 pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> ArrayBuilder<'a, 'b, A> {
477 let start = _fbb.start_table();
478 ArrayBuilder {
479 fbb_: _fbb,
480 start_: start,
481 }
482 }
483 #[inline]
484 pub fn finish(self) -> flatbuffers::WIPOffset<Array<'a>> {
485 let o = self.fbb_.end_table(self.start_);
486 flatbuffers::WIPOffset::new(o.value())
487 }
488}
489
490impl core::fmt::Debug for Array<'_> {
491 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
492 let mut ds = f.debug_struct("Array");
493 ds.field("root", &self.root());
494 ds.field("buffers", &self.buffers());
495 ds.finish()
496 }
497}
498pub enum ArrayNodeOffset {}
499#[derive(Copy, Clone, PartialEq)]
500
501pub struct ArrayNode<'a> {
502 pub _tab: flatbuffers::Table<'a>,
503}
504
505impl<'a> flatbuffers::Follow<'a> for ArrayNode<'a> {
506 type Inner = ArrayNode<'a>;
507 #[inline]
508 unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
509 Self { _tab: unsafe { flatbuffers::Table::new(buf, loc) } }
510 }
511}
512
513impl<'a> ArrayNode<'a> {
514 pub const VT_ENCODING: flatbuffers::VOffsetT = 4;
515 pub const VT_METADATA: flatbuffers::VOffsetT = 6;
516 pub const VT_CHILDREN: flatbuffers::VOffsetT = 8;
517 pub const VT_BUFFERS: flatbuffers::VOffsetT = 10;
518 pub const VT_STATS: flatbuffers::VOffsetT = 12;
519
520 #[inline]
521 pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
522 ArrayNode { _tab: table }
523 }
524 #[allow(unused_mut)]
525 pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
526 _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
527 args: &'args ArrayNodeArgs<'args>
528 ) -> flatbuffers::WIPOffset<ArrayNode<'bldr>> {
529 let mut builder = ArrayNodeBuilder::new(_fbb);
530 if let Some(x) = args.stats { builder.add_stats(x); }
531 if let Some(x) = args.buffers { builder.add_buffers(x); }
532 if let Some(x) = args.children { builder.add_children(x); }
533 if let Some(x) = args.metadata { builder.add_metadata(x); }
534 builder.add_encoding(args.encoding);
535 builder.finish()
536 }
537
538
539 #[inline]
540 pub fn encoding(&self) -> u16 {
541 unsafe { self._tab.get::<u16>(ArrayNode::VT_ENCODING, Some(0)).unwrap()}
545 }
546 #[inline]
547 pub fn metadata(&self) -> Option<flatbuffers::Vector<'a, u8>> {
548 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayNode::VT_METADATA, None)}
552 }
553 #[inline]
554 pub fn children(&self) -> Option<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<ArrayNode<'a>>>> {
555 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<ArrayNode>>>>(ArrayNode::VT_CHILDREN, None)}
559 }
560 #[inline]
561 pub fn buffers(&self) -> Option<flatbuffers::Vector<'a, u16>> {
562 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u16>>>(ArrayNode::VT_BUFFERS, None)}
566 }
567 #[inline]
568 pub fn stats(&self) -> Option<ArrayStats<'a>> {
569 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<ArrayStats>>(ArrayNode::VT_STATS, None)}
573 }
574}
575
576impl flatbuffers::Verifiable for ArrayNode<'_> {
577 #[inline]
578 fn run_verifier(
579 v: &mut flatbuffers::Verifier, pos: usize
580 ) -> Result<(), flatbuffers::InvalidFlatbuffer> {
581 use self::flatbuffers::Verifiable;
582 v.visit_table(pos)?
583 .visit_field::<u16>("encoding", Self::VT_ENCODING, false)?
584 .visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("metadata", Self::VT_METADATA, false)?
585 .visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset<ArrayNode>>>>("children", Self::VT_CHILDREN, false)?
586 .visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u16>>>("buffers", Self::VT_BUFFERS, false)?
587 .visit_field::<flatbuffers::ForwardsUOffset<ArrayStats>>("stats", Self::VT_STATS, false)?
588 .finish();
589 Ok(())
590 }
591}
592pub struct ArrayNodeArgs<'a> {
593 pub encoding: u16,
594 pub metadata: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
595 pub children: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset<ArrayNode<'a>>>>>,
596 pub buffers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u16>>>,
597 pub stats: Option<flatbuffers::WIPOffset<ArrayStats<'a>>>,
598}
599impl<'a> Default for ArrayNodeArgs<'a> {
600 #[inline]
601 fn default() -> Self {
602 ArrayNodeArgs {
603 encoding: 0,
604 metadata: None,
605 children: None,
606 buffers: None,
607 stats: None,
608 }
609 }
610}
611
612pub struct ArrayNodeBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
613 fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
614 start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
615}
616impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> ArrayNodeBuilder<'a, 'b, A> {
617 #[inline]
618 pub fn add_encoding(&mut self, encoding: u16) {
619 self.fbb_.push_slot::<u16>(ArrayNode::VT_ENCODING, encoding, 0);
620 }
621 #[inline]
622 pub fn add_metadata(&mut self, metadata: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
623 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayNode::VT_METADATA, metadata);
624 }
625 #[inline]
626 pub fn add_children(&mut self, children: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<ArrayNode<'b >>>>) {
627 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayNode::VT_CHILDREN, children);
628 }
629 #[inline]
630 pub fn add_buffers(&mut self, buffers: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u16>>) {
631 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayNode::VT_BUFFERS, buffers);
632 }
633 #[inline]
634 pub fn add_stats(&mut self, stats: flatbuffers::WIPOffset<ArrayStats<'b >>) {
635 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<ArrayStats>>(ArrayNode::VT_STATS, stats);
636 }
637 #[inline]
638 pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> ArrayNodeBuilder<'a, 'b, A> {
639 let start = _fbb.start_table();
640 ArrayNodeBuilder {
641 fbb_: _fbb,
642 start_: start,
643 }
644 }
645 #[inline]
646 pub fn finish(self) -> flatbuffers::WIPOffset<ArrayNode<'a>> {
647 let o = self.fbb_.end_table(self.start_);
648 flatbuffers::WIPOffset::new(o.value())
649 }
650}
651
652impl core::fmt::Debug for ArrayNode<'_> {
653 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
654 let mut ds = f.debug_struct("ArrayNode");
655 ds.field("encoding", &self.encoding());
656 ds.field("metadata", &self.metadata());
657 ds.field("children", &self.children());
658 ds.field("buffers", &self.buffers());
659 ds.field("stats", &self.stats());
660 ds.finish()
661 }
662}
663pub enum ArrayStatsOffset {}
664#[derive(Copy, Clone, PartialEq)]
665
666pub struct ArrayStats<'a> {
667 pub _tab: flatbuffers::Table<'a>,
668}
669
670impl<'a> flatbuffers::Follow<'a> for ArrayStats<'a> {
671 type Inner = ArrayStats<'a>;
672 #[inline]
673 unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
674 Self { _tab: unsafe { flatbuffers::Table::new(buf, loc) } }
675 }
676}
677
678impl<'a> ArrayStats<'a> {
679 pub const VT_MIN: flatbuffers::VOffsetT = 4;
680 pub const VT_MIN_PRECISION: flatbuffers::VOffsetT = 6;
681 pub const VT_MAX: flatbuffers::VOffsetT = 8;
682 pub const VT_MAX_PRECISION: flatbuffers::VOffsetT = 10;
683 pub const VT_SUM: flatbuffers::VOffsetT = 12;
684 pub const VT_IS_SORTED: flatbuffers::VOffsetT = 14;
685 pub const VT_IS_STRICT_SORTED: flatbuffers::VOffsetT = 16;
686 pub const VT_IS_CONSTANT: flatbuffers::VOffsetT = 18;
687 pub const VT_NULL_COUNT: flatbuffers::VOffsetT = 20;
688 pub const VT_UNCOMPRESSED_SIZE_IN_BYTES: flatbuffers::VOffsetT = 22;
689 pub const VT_NAN_COUNT: flatbuffers::VOffsetT = 24;
690
691 #[inline]
692 pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
693 ArrayStats { _tab: table }
694 }
695 #[allow(unused_mut)]
696 pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>(
697 _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>,
698 args: &'args ArrayStatsArgs<'args>
699 ) -> flatbuffers::WIPOffset<ArrayStats<'bldr>> {
700 let mut builder = ArrayStatsBuilder::new(_fbb);
701 if let Some(x) = args.nan_count { builder.add_nan_count(x); }
702 if let Some(x) = args.uncompressed_size_in_bytes { builder.add_uncompressed_size_in_bytes(x); }
703 if let Some(x) = args.null_count { builder.add_null_count(x); }
704 if let Some(x) = args.sum { builder.add_sum(x); }
705 if let Some(x) = args.max { builder.add_max(x); }
706 if let Some(x) = args.min { builder.add_min(x); }
707 if let Some(x) = args.is_constant { builder.add_is_constant(x); }
708 if let Some(x) = args.is_strict_sorted { builder.add_is_strict_sorted(x); }
709 if let Some(x) = args.is_sorted { builder.add_is_sorted(x); }
710 builder.add_max_precision(args.max_precision);
711 builder.add_min_precision(args.min_precision);
712 builder.finish()
713 }
714
715
716 #[inline]
718 pub fn min(&self) -> Option<flatbuffers::Vector<'a, u8>> {
719 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayStats::VT_MIN, None)}
723 }
724 #[inline]
725 pub fn min_precision(&self) -> Precision {
726 unsafe { self._tab.get::<Precision>(ArrayStats::VT_MIN_PRECISION, Some(Precision::Inexact)).unwrap()}
730 }
731 #[inline]
732 pub fn max(&self) -> Option<flatbuffers::Vector<'a, u8>> {
733 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayStats::VT_MAX, None)}
737 }
738 #[inline]
739 pub fn max_precision(&self) -> Precision {
740 unsafe { self._tab.get::<Precision>(ArrayStats::VT_MAX_PRECISION, Some(Precision::Inexact)).unwrap()}
744 }
745 #[inline]
746 pub fn sum(&self) -> Option<flatbuffers::Vector<'a, u8>> {
747 unsafe { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(ArrayStats::VT_SUM, None)}
751 }
752 #[inline]
753 pub fn is_sorted(&self) -> Option<bool> {
754 unsafe { self._tab.get::<bool>(ArrayStats::VT_IS_SORTED, None)}
758 }
759 #[inline]
760 pub fn is_strict_sorted(&self) -> Option<bool> {
761 unsafe { self._tab.get::<bool>(ArrayStats::VT_IS_STRICT_SORTED, None)}
765 }
766 #[inline]
767 pub fn is_constant(&self) -> Option<bool> {
768 unsafe { self._tab.get::<bool>(ArrayStats::VT_IS_CONSTANT, None)}
772 }
773 #[inline]
774 pub fn null_count(&self) -> Option<u64> {
775 unsafe { self._tab.get::<u64>(ArrayStats::VT_NULL_COUNT, None)}
779 }
780 #[inline]
781 pub fn uncompressed_size_in_bytes(&self) -> Option<u64> {
782 unsafe { self._tab.get::<u64>(ArrayStats::VT_UNCOMPRESSED_SIZE_IN_BYTES, None)}
786 }
787 #[inline]
788 pub fn nan_count(&self) -> Option<u64> {
789 unsafe { self._tab.get::<u64>(ArrayStats::VT_NAN_COUNT, None)}
793 }
794}
795
796impl flatbuffers::Verifiable for ArrayStats<'_> {
797 #[inline]
798 fn run_verifier(
799 v: &mut flatbuffers::Verifier, pos: usize
800 ) -> Result<(), flatbuffers::InvalidFlatbuffer> {
801 use self::flatbuffers::Verifiable;
802 v.visit_table(pos)?
803 .visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("min", Self::VT_MIN, false)?
804 .visit_field::<Precision>("min_precision", Self::VT_MIN_PRECISION, false)?
805 .visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("max", Self::VT_MAX, false)?
806 .visit_field::<Precision>("max_precision", Self::VT_MAX_PRECISION, false)?
807 .visit_field::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'_, u8>>>("sum", Self::VT_SUM, false)?
808 .visit_field::<bool>("is_sorted", Self::VT_IS_SORTED, false)?
809 .visit_field::<bool>("is_strict_sorted", Self::VT_IS_STRICT_SORTED, false)?
810 .visit_field::<bool>("is_constant", Self::VT_IS_CONSTANT, false)?
811 .visit_field::<u64>("null_count", Self::VT_NULL_COUNT, false)?
812 .visit_field::<u64>("uncompressed_size_in_bytes", Self::VT_UNCOMPRESSED_SIZE_IN_BYTES, false)?
813 .visit_field::<u64>("nan_count", Self::VT_NAN_COUNT, false)?
814 .finish();
815 Ok(())
816 }
817}
818pub struct ArrayStatsArgs<'a> {
819 pub min: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
820 pub min_precision: Precision,
821 pub max: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
822 pub max_precision: Precision,
823 pub sum: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a, u8>>>,
824 pub is_sorted: Option<bool>,
825 pub is_strict_sorted: Option<bool>,
826 pub is_constant: Option<bool>,
827 pub null_count: Option<u64>,
828 pub uncompressed_size_in_bytes: Option<u64>,
829 pub nan_count: Option<u64>,
830}
831impl<'a> Default for ArrayStatsArgs<'a> {
832 #[inline]
833 fn default() -> Self {
834 ArrayStatsArgs {
835 min: None,
836 min_precision: Precision::Inexact,
837 max: None,
838 max_precision: Precision::Inexact,
839 sum: None,
840 is_sorted: None,
841 is_strict_sorted: None,
842 is_constant: None,
843 null_count: None,
844 uncompressed_size_in_bytes: None,
845 nan_count: None,
846 }
847 }
848}
849
850pub struct ArrayStatsBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> {
851 fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
852 start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>,
853}
854impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> ArrayStatsBuilder<'a, 'b, A> {
855 #[inline]
856 pub fn add_min(&mut self, min: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
857 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayStats::VT_MIN, min);
858 }
859 #[inline]
860 pub fn add_min_precision(&mut self, min_precision: Precision) {
861 self.fbb_.push_slot::<Precision>(ArrayStats::VT_MIN_PRECISION, min_precision, Precision::Inexact);
862 }
863 #[inline]
864 pub fn add_max(&mut self, max: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
865 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayStats::VT_MAX, max);
866 }
867 #[inline]
868 pub fn add_max_precision(&mut self, max_precision: Precision) {
869 self.fbb_.push_slot::<Precision>(ArrayStats::VT_MAX_PRECISION, max_precision, Precision::Inexact);
870 }
871 #[inline]
872 pub fn add_sum(&mut self, sum: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) {
873 self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(ArrayStats::VT_SUM, sum);
874 }
875 #[inline]
876 pub fn add_is_sorted(&mut self, is_sorted: bool) {
877 self.fbb_.push_slot_always::<bool>(ArrayStats::VT_IS_SORTED, is_sorted);
878 }
879 #[inline]
880 pub fn add_is_strict_sorted(&mut self, is_strict_sorted: bool) {
881 self.fbb_.push_slot_always::<bool>(ArrayStats::VT_IS_STRICT_SORTED, is_strict_sorted);
882 }
883 #[inline]
884 pub fn add_is_constant(&mut self, is_constant: bool) {
885 self.fbb_.push_slot_always::<bool>(ArrayStats::VT_IS_CONSTANT, is_constant);
886 }
887 #[inline]
888 pub fn add_null_count(&mut self, null_count: u64) {
889 self.fbb_.push_slot_always::<u64>(ArrayStats::VT_NULL_COUNT, null_count);
890 }
891 #[inline]
892 pub fn add_uncompressed_size_in_bytes(&mut self, uncompressed_size_in_bytes: u64) {
893 self.fbb_.push_slot_always::<u64>(ArrayStats::VT_UNCOMPRESSED_SIZE_IN_BYTES, uncompressed_size_in_bytes);
894 }
895 #[inline]
896 pub fn add_nan_count(&mut self, nan_count: u64) {
897 self.fbb_.push_slot_always::<u64>(ArrayStats::VT_NAN_COUNT, nan_count);
898 }
899 #[inline]
900 pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> ArrayStatsBuilder<'a, 'b, A> {
901 let start = _fbb.start_table();
902 ArrayStatsBuilder {
903 fbb_: _fbb,
904 start_: start,
905 }
906 }
907 #[inline]
908 pub fn finish(self) -> flatbuffers::WIPOffset<ArrayStats<'a>> {
909 let o = self.fbb_.end_table(self.start_);
910 flatbuffers::WIPOffset::new(o.value())
911 }
912}
913
914impl core::fmt::Debug for ArrayStats<'_> {
915 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
916 let mut ds = f.debug_struct("ArrayStats");
917 ds.field("min", &self.min());
918 ds.field("min_precision", &self.min_precision());
919 ds.field("max", &self.max());
920 ds.field("max_precision", &self.max_precision());
921 ds.field("sum", &self.sum());
922 ds.field("is_sorted", &self.is_sorted());
923 ds.field("is_strict_sorted", &self.is_strict_sorted());
924 ds.field("is_constant", &self.is_constant());
925 ds.field("null_count", &self.null_count());
926 ds.field("uncompressed_size_in_bytes", &self.uncompressed_size_in_bytes());
927 ds.field("nan_count", &self.nan_count());
928 ds.finish()
929 }
930}
931#[inline]
932pub fn root_as_array(buf: &[u8]) -> Result<Array, flatbuffers::InvalidFlatbuffer> {
939 flatbuffers::root::<Array>(buf)
940}
941#[inline]
942pub fn size_prefixed_root_as_array(buf: &[u8]) -> Result<Array, flatbuffers::InvalidFlatbuffer> {
949 flatbuffers::size_prefixed_root::<Array>(buf)
950}
951#[inline]
952pub fn root_as_array_with_opts<'b, 'o>(
959 opts: &'o flatbuffers::VerifierOptions,
960 buf: &'b [u8],
961) -> Result<Array<'b>, flatbuffers::InvalidFlatbuffer> {
962 flatbuffers::root_with_opts::<Array<'b>>(opts, buf)
963}
964#[inline]
965pub fn size_prefixed_root_as_array_with_opts<'b, 'o>(
972 opts: &'o flatbuffers::VerifierOptions,
973 buf: &'b [u8],
974) -> Result<Array<'b>, flatbuffers::InvalidFlatbuffer> {
975 flatbuffers::size_prefixed_root_with_opts::<Array<'b>>(opts, buf)
976}
977#[inline]
978pub unsafe fn root_as_array_unchecked(buf: &[u8]) -> Array {
982 unsafe { flatbuffers::root_unchecked::<Array>(buf) }
983}
984#[inline]
985pub unsafe fn size_prefixed_root_as_array_unchecked(buf: &[u8]) -> Array {
989 unsafe { flatbuffers::size_prefixed_root_unchecked::<Array>(buf) }
990}
991#[inline]
992pub fn finish_array_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>(
993 fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>,
994 root: flatbuffers::WIPOffset<Array<'a>>) {
995 fbb.finish(root, None);
996}
997
998#[inline]
999pub fn finish_size_prefixed_array_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>(fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, root: flatbuffers::WIPOffset<Array<'a>>) {
1000 fbb.finish_size_prefixed(root, None);
1001}