vortex-array 0.54.0

Vortex in memory columnar data format
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright the Vortex contributors

use std::fmt::{Debug, Formatter};
use std::iter;
use std::sync::Arc;

use flatbuffers::{FlatBufferBuilder, Follow, WIPOffset, root};
use itertools::Itertools;
use vortex_buffer::{Alignment, ByteBuffer};
use vortex_dtype::{DType, TryFromBytes};
use vortex_error::{
    VortexError, VortexExpect, VortexResult, vortex_bail, vortex_err, vortex_panic,
};
use vortex_flatbuffers::array::Compression;
use vortex_flatbuffers::{
    FlatBuffer, FlatBufferRoot, ReadFlatBuffer, WriteFlatBuffer, array as fba,
};

use crate::stats::StatsSet;
use crate::{Array, ArrayContext, ArrayRef, ArrayVisitor, ArrayVisitorExt};

/// Options for serializing an array.
#[derive(Default, Debug)]
pub struct SerializeOptions {
    /// The starting position within an external stream or file. This offset is used to compute
    /// appropriate padding to enable zero-copy reads.
    pub offset: usize,
    /// Whether to include sufficient zero-copy padding.
    pub include_padding: bool,
}

impl dyn Array + '_ {
    /// Serialize the array into a sequence of byte buffers that should be written contiguously.
    /// This function returns a vec to avoid copying data buffers.
    ///
    /// Optionally, padding can be included to guarantee buffer alignment and ensure zero-copy
    /// reads within the context of an external file or stream. In this case, the alignment of
    /// the first byte buffer should be respected when writing the buffers to the stream or file.
    ///
    /// The format of this blob is a sequence of data buffers, possible with prefixed padding,
    /// followed by a flatbuffer containing an [`fba::Array`] message, and ending with a
    /// little-endian u32 describing the length of the flatbuffer message.
    pub fn serialize(
        &self,
        ctx: &ArrayContext,
        options: &SerializeOptions,
    ) -> VortexResult<Vec<ByteBuffer>> {
        // Collect all array buffers
        let array_buffers = self
            .depth_first_traversal()
            .flat_map(|f| f.buffers())
            .collect::<Vec<_>>();

        // Allocate result buffers, including a possible padding buffer for each.
        let mut buffers = vec![];
        let mut fb_buffers = Vec::with_capacity(buffers.capacity());

        // If we're including padding, we need to find the maximum required buffer alignment.
        let max_alignment = array_buffers
            .iter()
            .map(|buf| buf.alignment())
            .chain(iter::once(FlatBuffer::alignment()))
            .max()
            .unwrap_or_else(FlatBuffer::alignment);

        // Create a shared buffer of zeros we can use for padding
        let zeros = ByteBuffer::zeroed(*max_alignment);

        // We push an empty buffer with the maximum alignment, so then subsequent buffers
        // will be aligned. For subsequent buffers, we always push a 1-byte alignment.
        buffers.push(ByteBuffer::zeroed_aligned(0, max_alignment));

        // Keep track of where we are in the "file" to calculate padding.
        let mut pos = options.offset;

        // Push all the array buffers with padding as necessary.
        for buffer in array_buffers {
            let padding = if options.include_padding {
                let padding = pos.next_multiple_of(*buffer.alignment()) - pos;
                if padding > 0 {
                    pos += padding;
                    buffers.push(zeros.slice(0..padding));
                }
                padding
            } else {
                0
            };

            fb_buffers.push(fba::Buffer::new(
                u16::try_from(padding).vortex_expect("padding fits into u16"),
                buffer.alignment().exponent(),
                Compression::None,
                u32::try_from(buffer.len())
                    .map_err(|_| vortex_err!("All buffers must fit into u32 for serialization"))?,
            ));

            pos += buffer.len();
            buffers.push(buffer.aligned(Alignment::none()));
        }

        // Set up the flatbuffer builder
        let mut fbb = FlatBufferBuilder::new();
        let root = ArrayNodeFlatBuffer::try_new(ctx, self)?;
        let fb_root = root.write_flatbuffer(&mut fbb);
        let fb_buffers = fbb.create_vector(&fb_buffers);
        let fb_array = fba::Array::create(
            &mut fbb,
            &fba::ArrayArgs {
                root: Some(fb_root),
                buffers: Some(fb_buffers),
            },
        );
        fbb.finish_minimal(fb_array);
        let (fb_vec, fb_start) = fbb.collapse();
        let fb_end = fb_vec.len();
        let fb_buffer = ByteBuffer::from(fb_vec).slice(fb_start..fb_end);
        let fb_length = fb_buffer.len();

        if options.include_padding {
            let padding = pos.next_multiple_of(*FlatBuffer::alignment()) - pos;
            if padding > 0 {
                buffers.push(zeros.slice(0..padding));
            }
        }
        buffers.push(fb_buffer);

        // Finally, we write down the u32 length for the flatbuffer.
        buffers.push(ByteBuffer::from(
            u32::try_from(fb_length)
                .map_err(|_| vortex_err!("Array metadata flatbuffer must fit into u32 for serialization. Array encoding tree is too large."))?
                .to_le_bytes()
                .to_vec(),
        ));

        Ok(buffers)
    }
}

/// A utility struct for creating an [`fba::ArrayNode`] flatbuffer.
pub struct ArrayNodeFlatBuffer<'a> {
    ctx: &'a ArrayContext,
    array: &'a dyn Array,
    buffer_idx: u16,
}

impl<'a> ArrayNodeFlatBuffer<'a> {
    pub fn try_new(ctx: &'a ArrayContext, array: &'a dyn Array) -> VortexResult<Self> {
        // Depth-first traversal of the array to ensure it supports serialization.
        for child in array.depth_first_traversal() {
            if child.metadata()?.is_none() {
                vortex_bail!(
                    "Array {} does not support serialization",
                    child.encoding_id()
                );
            }
        }
        Ok(Self {
            ctx,
            array,
            buffer_idx: 0,
        })
    }
}

impl FlatBufferRoot for ArrayNodeFlatBuffer<'_> {}

impl WriteFlatBuffer for ArrayNodeFlatBuffer<'_> {
    type Target<'t> = fba::ArrayNode<'t>;

    fn write_flatbuffer<'fb>(
        &self,
        fbb: &mut FlatBufferBuilder<'fb>,
    ) -> WIPOffset<Self::Target<'fb>> {
        let encoding = self.ctx.encoding_idx(&self.array.encoding());
        let metadata = self
            .array
            .metadata()
            // TODO(ngates): add try_write_flatbuffer
            .vortex_expect("Failed to serialize metadata")
            .vortex_expect("Validated that all arrays support serialization");
        let metadata = Some(fbb.create_vector(metadata.as_slice()));

        // Assign buffer indices for all child arrays.
        let nbuffers = u16::try_from(self.array.nbuffers())
            .vortex_expect("Array can have at most u16::MAX buffers");
        let mut child_buffer_idx = self.buffer_idx + nbuffers;

        let children = &self
            .array
            .children()
            .iter()
            .map(|child| {
                // Update the number of buffers required.
                let msg = ArrayNodeFlatBuffer {
                    ctx: self.ctx,
                    array: child,
                    buffer_idx: child_buffer_idx,
                }
                .write_flatbuffer(fbb);
                child_buffer_idx = u16::try_from(child.nbuffers_recursive())
                    .ok()
                    .and_then(|nbuffers| nbuffers.checked_add(child_buffer_idx))
                    .vortex_expect("Too many buffers (u16) for Array");
                msg
            })
            .collect::<Vec<_>>();
        let children = Some(fbb.create_vector(children));

        let buffers = Some(fbb.create_vector_from_iter((0..nbuffers).map(|i| i + self.buffer_idx)));
        let stats = Some(self.array.statistics().write_flatbuffer(fbb));

        fba::ArrayNode::create(
            fbb,
            &fba::ArrayNodeArgs {
                encoding,
                metadata,
                children,
                buffers,
                stats,
            },
        )
    }
}

/// To minimize the serialized form, arrays do not persist their own dtype and length. Instead,
/// parent arrays pass this information down during deserialization.
pub trait ArrayChildren {
    /// Returns the nth child of the array with the given dtype and length.
    fn get(&self, index: usize, dtype: &DType, len: usize) -> VortexResult<ArrayRef>;

    /// The number of children.
    fn len(&self) -> usize;

    /// Returns true if there are no children.
    fn is_empty(&self) -> bool {
        self.len() == 0
    }
}

/// [`ArrayParts`] represents a parsed but not-yet-decoded deserialized [`Array`].
/// It contains all the information from the serialized form, without anything extra. i.e.
/// it is missing a [`DType`] and `len`, and the `encoding_id` is not yet resolved to a concrete
/// vtable.
///
/// An [`ArrayParts`] can be fully decoded into an [`ArrayRef`] using the `decode` function.
#[derive(Clone)]
pub struct ArrayParts {
    // Typed as fb::ArrayNode
    flatbuffer: FlatBuffer,
    // The location of the current fb::ArrayNode
    flatbuffer_loc: usize,
    buffers: Arc<[ByteBuffer]>,
}

impl Debug for ArrayParts {
    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("ArrayParts")
            .field("encoding_id", &self.encoding_id())
            .field("children", &(0..self.nchildren()).map(|i| self.child(i)))
            .field(
                "buffers",
                &(0..self.nbuffers()).map(|i| self.buffer(i).ok()),
            )
            .field("metadata", &self.metadata())
            .finish()
    }
}

impl ArrayParts {
    /// Decode an [`ArrayParts`] into an [`ArrayRef`].
    pub fn decode(&self, ctx: &ArrayContext, dtype: &DType, len: usize) -> VortexResult<ArrayRef> {
        let encoding_id = self.flatbuffer().encoding();
        let vtable = ctx
            .lookup_encoding(encoding_id)
            .ok_or_else(|| vortex_err!("Unknown encoding: {}", encoding_id))?;

        let buffers: Vec<_> = (0..self.nbuffers())
            .map(|idx| self.buffer(idx))
            .try_collect()?;

        let children = ArrayPartsChildren { parts: self, ctx };

        let decoded = vtable.build(dtype, len, self.metadata(), &buffers, &children)?;

        assert_eq!(
            decoded.len(),
            len,
            "Array decoded from {} has incorrect length {}, expected {}",
            vtable.id(),
            decoded.len(),
            len
        );
        assert_eq!(
            decoded.dtype(),
            dtype,
            "Array decoded from {} has incorrect dtype {}, expected {}",
            vtable.id(),
            decoded.dtype(),
            dtype,
        );
        assert_eq!(
            decoded.encoding_id(),
            vtable.id(),
            "Array decoded from {} has incorrect encoding {}",
            vtable.id(),
            decoded.encoding_id(),
        );

        // Populate statistics from the serialized array.
        if let Some(stats) = self.flatbuffer().stats() {
            let decoded_statistics = decoded.statistics();
            StatsSet::read_flatbuffer(&stats)?
                .into_iter()
                .for_each(|(stat, val)| decoded_statistics.set(stat, val));
        }

        Ok(decoded)
    }

    /// Returns the array encoding.
    pub fn encoding_id(&self) -> u16 {
        self.flatbuffer().encoding()
    }

    /// Returns the array metadata bytes.
    pub fn metadata(&self) -> &[u8] {
        self.flatbuffer()
            .metadata()
            .map(|metadata| metadata.bytes())
            .unwrap_or(&[])
    }

    /// Returns the number of children.
    pub fn nchildren(&self) -> usize {
        self.flatbuffer()
            .children()
            .map_or(0, |children| children.len())
    }

    /// Returns the nth child of the array.
    pub fn child(&self, idx: usize) -> ArrayParts {
        let children = self
            .flatbuffer()
            .children()
            .vortex_expect("Expected array to have children");
        if idx >= children.len() {
            vortex_panic!(
                "Invalid child index {} for array with {} children",
                idx,
                children.len()
            );
        }
        self.with_root(children.get(idx))
    }

    /// Returns the number of buffers.
    pub fn nbuffers(&self) -> usize {
        self.flatbuffer()
            .buffers()
            .map_or(0, |buffers| buffers.len())
    }

    /// Returns the nth buffer of the current array.
    pub fn buffer(&self, idx: usize) -> VortexResult<ByteBuffer> {
        let buffer_idx = self
            .flatbuffer()
            .buffers()
            .ok_or_else(|| vortex_err!("Array has no buffers"))?
            .get(idx);
        self.buffers
            .get(buffer_idx as usize)
            .cloned()
            .ok_or_else(|| {
                vortex_err!(
                    "Invalid buffer index {} for array with {} buffers",
                    buffer_idx,
                    self.nbuffers()
                )
            })
    }

    /// Returns the root ArrayNode flatbuffer.
    fn flatbuffer(&self) -> fba::ArrayNode<'_> {
        unsafe { fba::ArrayNode::follow(self.flatbuffer.as_ref(), self.flatbuffer_loc) }
    }

    /// Returns a new [`ArrayParts`] with the given node as the root
    // TODO(ngates): we may want a wrapper that avoids this clone.
    fn with_root(&self, root: fba::ArrayNode) -> Self {
        let mut this = self.clone();
        this.flatbuffer_loc = root._tab.loc();
        this
    }
}

struct ArrayPartsChildren<'a> {
    parts: &'a ArrayParts,
    ctx: &'a ArrayContext,
}

impl ArrayChildren for ArrayPartsChildren<'_> {
    fn get(&self, index: usize, dtype: &DType, len: usize) -> VortexResult<ArrayRef> {
        self.parts.child(index).decode(self.ctx, dtype, len)
    }

    fn len(&self) -> usize {
        self.parts.nchildren()
    }
}

impl TryFrom<ByteBuffer> for ArrayParts {
    type Error = VortexError;

    fn try_from(value: ByteBuffer) -> Result<Self, Self::Error> {
        // The final 4 bytes contain the length of the flatbuffer.
        if value.len() < 4 {
            vortex_bail!("ArrayParts buffer is too short");
        }

        // We align each buffer individually, so we remove alignment requirements on the buffer.
        let value = value.aligned(Alignment::none());

        let fb_length = u32::try_from_le_bytes(&value.as_slice()[value.len() - 4..])? as usize;
        if value.len() < 4 + fb_length {
            vortex_bail!("ArrayParts buffer is too short for flatbuffer");
        }

        let fb_offset = value.len() - 4 - fb_length;
        let fb_buffer = value.slice(fb_offset..fb_offset + fb_length);
        let fb_buffer = FlatBuffer::align_from(fb_buffer);

        let fb_array = root::<fba::Array>(fb_buffer.as_ref())?;
        let fb_root = fb_array.root().vortex_expect("Array must have a root node");

        let mut offset = 0;
        let buffers: Arc<[ByteBuffer]> = fb_array
            .buffers()
            .unwrap_or_default()
            .iter()
            .map(|fb_buffer| {
                // Skip padding
                offset += fb_buffer.padding() as usize;

                let buffer_len = fb_buffer.length() as usize;

                // Extract a buffer and ensure it's aligned, copying if necessary
                let buffer = value
                    .slice(offset..(offset + buffer_len))
                    .aligned(Alignment::from_exponent(fb_buffer.alignment_exponent()));

                offset += buffer_len;
                buffer
            })
            .collect();

        Ok(ArrayParts {
            flatbuffer: fb_buffer.clone(),
            flatbuffer_loc: fb_root._tab.loc(),
            buffers,
        })
    }
}