polars_row/
row.rs

1#![allow(unsafe_op_in_unsafe_fn)]
2use arrow::array::{BinaryArray, BinaryViewArray};
3use arrow::datatypes::ArrowDataType;
4use arrow::ffi::mmap;
5use arrow::offset::{Offsets, OffsetsBuffer};
6use polars_compute::cast::binary_to_binview;
7
8const BOOLEAN_TRUE_SENTINEL: u8 = 0x03;
9const BOOLEAN_FALSE_SENTINEL: u8 = 0x02;
10
11/// Additional context provided to row encoding regarding a column.
12///
13/// This allows communication based on the Polars datatype instead on the Arrow datatype. Since
14/// polars-row is used under polars-core, we don't have access to the actual datatypes.
15#[derive(Debug, Clone)]
16pub enum RowEncodingContext {
17    Struct(Vec<Option<RowEncodingContext>>),
18    /// Categorical / Enum
19    Categorical(RowEncodingCategoricalContext),
20    /// Decimal with given precision
21    Decimal(usize),
22}
23
24#[derive(Debug, Clone)]
25pub struct RowEncodingCategoricalContext {
26    /// The number of known categories in categorical / enum currently.
27    pub num_known_categories: u32,
28    pub is_enum: bool,
29
30    /// The mapping from key to lexical sort index
31    pub lexical_sort_idxs: Option<Vec<u32>>,
32}
33
34impl RowEncodingCategoricalContext {
35    pub fn needed_num_bits(&self) -> usize {
36        if self.num_known_categories == 0 {
37            0
38        } else {
39            let max_category_index = self.num_known_categories - 1;
40            (max_category_index.next_power_of_two().trailing_zeros() + 1) as usize
41        }
42    }
43}
44
45bitflags::bitflags! {
46    /// Options for the Polars Row Encoding.
47    ///
48    /// The row encoding provides a method to combine several columns into one binary column which
49    /// has the same sort-order as the original columns.test
50    ///
51    /// By default, the row encoding provides the ascending, nulls first sort-order of the columns.
52    #[derive(Debug, Clone, Copy, Default)]
53    pub struct RowEncodingOptions: u8 {
54        /// Sort in descending order instead of ascending order
55        const DESCENDING               = 0x01;
56        /// Sort such that nulls / missing values are last
57        const NULLS_LAST               = 0x02;
58
59        /// Ignore all order-related flags and don't encode order-preserving. This will keep
60        /// uniqueness.
61        ///
62        /// This is faster for several encodings
63        const NO_ORDER                 = 0x04;
64    }
65}
66
67const LIST_CONTINUATION_TOKEN: u8 = 0xFE;
68const EMPTY_STR_TOKEN: u8 = 0x01;
69
70impl RowEncodingOptions {
71    pub fn new_sorted(descending: bool, nulls_last: bool) -> Self {
72        let mut slf = Self::default();
73        slf.set(Self::DESCENDING, descending);
74        slf.set(Self::NULLS_LAST, nulls_last);
75        slf
76    }
77
78    pub fn new_unsorted() -> Self {
79        Self::NO_ORDER
80    }
81
82    pub fn is_ordered(self) -> bool {
83        !self.contains(Self::NO_ORDER)
84    }
85
86    pub fn null_sentinel(self) -> u8 {
87        if self.contains(Self::NULLS_LAST) {
88            0xFF
89        } else {
90            0x00
91        }
92    }
93
94    pub(crate) fn bool_true_sentinel(self) -> u8 {
95        if self.contains(Self::DESCENDING) {
96            !BOOLEAN_TRUE_SENTINEL
97        } else {
98            BOOLEAN_TRUE_SENTINEL
99        }
100    }
101
102    pub(crate) fn bool_false_sentinel(self) -> u8 {
103        if self.contains(Self::DESCENDING) {
104            !BOOLEAN_FALSE_SENTINEL
105        } else {
106            BOOLEAN_FALSE_SENTINEL
107        }
108    }
109
110    pub fn list_null_sentinel(self) -> u8 {
111        self.null_sentinel()
112    }
113
114    pub fn list_continuation_token(self) -> u8 {
115        if self.contains(Self::DESCENDING) {
116            !LIST_CONTINUATION_TOKEN
117        } else {
118            LIST_CONTINUATION_TOKEN
119        }
120    }
121
122    pub fn list_termination_token(self) -> u8 {
123        !self.list_continuation_token()
124    }
125
126    pub fn empty_str_token(self) -> u8 {
127        if self.contains(Self::DESCENDING) {
128            !EMPTY_STR_TOKEN
129        } else {
130            EMPTY_STR_TOKEN
131        }
132    }
133}
134
135#[derive(Default, Clone)]
136pub struct RowsEncoded {
137    pub(crate) values: Vec<u8>,
138    pub(crate) offsets: Vec<usize>,
139}
140
141unsafe fn rows_to_array(buf: Vec<u8>, offsets: Vec<usize>) -> BinaryArray<i64> {
142    let offsets = if size_of::<usize>() == size_of::<i64>() {
143        assert!(
144            (*offsets.last().unwrap() as u64) < i64::MAX as u64,
145            "row encoding output overflowed"
146        );
147
148        // SAFETY: we checked overflow and size
149        bytemuck::cast_vec::<usize, i64>(offsets)
150    } else {
151        offsets.into_iter().map(|v| v as i64).collect()
152    };
153
154    // SAFETY: monotonically increasing
155    let offsets = Offsets::new_unchecked(offsets);
156
157    BinaryArray::new(ArrowDataType::LargeBinary, offsets.into(), buf.into(), None)
158}
159
160impl RowsEncoded {
161    pub(crate) fn new(values: Vec<u8>, offsets: Vec<usize>) -> Self {
162        RowsEncoded { values, offsets }
163    }
164
165    pub fn iter(&self) -> RowsEncodedIter {
166        let iter = self.offsets[1..].iter();
167        let offset = self.offsets[0];
168        RowsEncodedIter {
169            offset,
170            end: iter,
171            values: &self.values,
172        }
173    }
174
175    /// Borrows the buffers and returns a [`BinaryArray`].
176    ///
177    /// # Safety
178    /// The lifetime of that `BinaryArray` is tied to the lifetime of
179    /// `Self`. The caller must ensure that both stay alive for the same time.
180    pub unsafe fn borrow_array(&self) -> BinaryArray<i64> {
181        let (_, values, _) = unsafe { mmap::slice(&self.values) }.into_inner();
182        let offsets = if size_of::<usize>() == size_of::<i64>() {
183            assert!(
184                (*self.offsets.last().unwrap() as u64) < i64::MAX as u64,
185                "row encoding output overflowed"
186            );
187
188            let offsets = bytemuck::cast_slice::<usize, i64>(self.offsets.as_slice());
189            let (_, offsets, _) = unsafe { mmap::slice(offsets) }.into_inner();
190            offsets
191        } else {
192            self.offsets.iter().map(|&v| v as i64).collect()
193        };
194        let offsets = OffsetsBuffer::new_unchecked(offsets);
195
196        BinaryArray::new(ArrowDataType::LargeBinary, offsets, values, None)
197    }
198
199    /// This conversion is free.
200    pub fn into_array(self) -> BinaryArray<i64> {
201        unsafe { rows_to_array(self.values, self.offsets) }
202    }
203
204    /// This does allocate views.
205    pub fn into_binview(self) -> BinaryViewArray {
206        binary_to_binview(&self.into_array())
207    }
208
209    #[cfg(test)]
210    pub fn get(&self, i: usize) -> &[u8] {
211        let start = self.offsets[i];
212        let end = self.offsets[i + 1];
213        &self.values[start..end]
214    }
215}
216
217pub struct RowsEncodedIter<'a> {
218    offset: usize,
219    end: std::slice::Iter<'a, usize>,
220    values: &'a [u8],
221}
222
223impl<'a> Iterator for RowsEncodedIter<'a> {
224    type Item = &'a [u8];
225
226    fn next(&mut self) -> Option<Self::Item> {
227        let new_offset = *self.end.next()?;
228        let payload = unsafe { self.values.get_unchecked(self.offset..new_offset) };
229        self.offset = new_offset;
230        Some(payload)
231    }
232
233    fn size_hint(&self) -> (usize, Option<usize>) {
234        self.end.size_hint()
235    }
236}