polars_core/series/implementations/
list.rs

1use super::*;
2use crate::chunked_array::comparison::*;
3#[cfg(feature = "algorithm_group_by")]
4use crate::frame::group_by::*;
5use crate::prelude::row_encode::_get_rows_encoded_ca_unordered;
6use crate::prelude::*;
7
8impl private::PrivateSeries for SeriesWrap<ListChunked> {
9    fn compute_len(&mut self) {
10        self.0.compute_len()
11    }
12    fn _field(&self) -> Cow<'_, Field> {
13        Cow::Borrowed(self.0.ref_field())
14    }
15    fn _dtype(&self) -> &DataType {
16        self.0.ref_field().dtype()
17    }
18    fn _get_flags(&self) -> StatisticsFlags {
19        self.0.get_flags()
20    }
21    fn _set_flags(&mut self, flags: StatisticsFlags) {
22        self.0.set_flags(flags)
23    }
24
25    unsafe fn equal_element(&self, idx_self: usize, idx_other: usize, other: &Series) -> bool {
26        self.0.equal_element(idx_self, idx_other, other)
27    }
28
29    fn vec_hash(
30        &self,
31        build_hasher: PlSeedableRandomStateQuality,
32        buf: &mut Vec<u64>,
33    ) -> PolarsResult<()> {
34        _get_rows_encoded_ca_unordered(PlSmallStr::EMPTY, &[self.0.clone().into_column()])?
35            .vec_hash(build_hasher, buf)
36    }
37
38    fn vec_hash_combine(
39        &self,
40        build_hasher: PlSeedableRandomStateQuality,
41        hashes: &mut [u64],
42    ) -> PolarsResult<()> {
43        _get_rows_encoded_ca_unordered(PlSmallStr::EMPTY, &[self.0.clone().into_column()])?
44            .vec_hash_combine(build_hasher, hashes)
45    }
46
47    #[cfg(feature = "zip_with")]
48    fn zip_with_same_type(&self, mask: &BooleanChunked, other: &Series) -> PolarsResult<Series> {
49        ChunkZip::zip_with(&self.0, mask, other.as_ref().as_ref()).map(|ca| ca.into_series())
50    }
51
52    #[cfg(feature = "algorithm_group_by")]
53    unsafe fn agg_list(&self, groups: &GroupsType) -> Series {
54        self.0.agg_list(groups)
55    }
56
57    #[cfg(feature = "algorithm_group_by")]
58    fn group_tuples(&self, multithreaded: bool, sorted: bool) -> PolarsResult<GroupsType> {
59        IntoGroupsType::group_tuples(&self.0, multithreaded, sorted)
60    }
61
62    fn into_total_eq_inner<'a>(&'a self) -> Box<dyn TotalEqInner + 'a> {
63        (&self.0).into_total_eq_inner()
64    }
65    fn into_total_ord_inner<'a>(&'a self) -> Box<dyn TotalOrdInner + 'a> {
66        invalid_operation_panic!(into_total_ord_inner, self)
67    }
68
69    fn add_to(&self, rhs: &Series) -> PolarsResult<Series> {
70        self.0.add_to(rhs)
71    }
72
73    fn subtract(&self, rhs: &Series) -> PolarsResult<Series> {
74        self.0.subtract(rhs)
75    }
76
77    fn multiply(&self, rhs: &Series) -> PolarsResult<Series> {
78        self.0.multiply(rhs)
79    }
80    fn divide(&self, rhs: &Series) -> PolarsResult<Series> {
81        self.0.divide(rhs)
82    }
83    fn remainder(&self, rhs: &Series) -> PolarsResult<Series> {
84        self.0.remainder(rhs)
85    }
86}
87
88impl SeriesTrait for SeriesWrap<ListChunked> {
89    fn rename(&mut self, name: PlSmallStr) {
90        self.0.rename(name);
91    }
92
93    fn chunk_lengths(&self) -> ChunkLenIter<'_> {
94        self.0.chunk_lengths()
95    }
96    fn name(&self) -> &PlSmallStr {
97        self.0.name()
98    }
99
100    fn chunks(&self) -> &Vec<ArrayRef> {
101        self.0.chunks()
102    }
103    unsafe fn chunks_mut(&mut self) -> &mut Vec<ArrayRef> {
104        self.0.chunks_mut()
105    }
106    fn shrink_to_fit(&mut self) {
107        self.0.shrink_to_fit()
108    }
109
110    fn sum_reduce(&self) -> PolarsResult<Scalar> {
111        polars_bail!(
112            op = "`sum`",
113            self.dtype(),
114            hint = "you may mean to call `concat_list`"
115        );
116    }
117
118    fn arg_sort(&self, options: SortOptions) -> IdxCa {
119        self.0.arg_sort(options)
120    }
121
122    fn sort_with(&self, options: SortOptions) -> PolarsResult<Series> {
123        Ok(self.0.sort_with(options).into_series())
124    }
125
126    fn slice(&self, offset: i64, length: usize) -> Series {
127        self.0.slice(offset, length).into_series()
128    }
129
130    fn split_at(&self, offset: i64) -> (Series, Series) {
131        let (a, b) = self.0.split_at(offset);
132        (a.into_series(), b.into_series())
133    }
134
135    fn append(&mut self, other: &Series) -> PolarsResult<()> {
136        polars_ensure!(self.0.dtype() == other.dtype(), append);
137        self.0.append(other.as_ref().as_ref())
138    }
139    fn append_owned(&mut self, other: Series) -> PolarsResult<()> {
140        polars_ensure!(self.0.dtype() == other.dtype(), append);
141        self.0.append_owned(other.take_inner())
142    }
143
144    fn extend(&mut self, other: &Series) -> PolarsResult<()> {
145        polars_ensure!(self.0.dtype() == other.dtype(), extend);
146        self.0.extend(other.as_ref().as_ref())
147    }
148
149    fn filter(&self, filter: &BooleanChunked) -> PolarsResult<Series> {
150        ChunkFilter::filter(&self.0, filter).map(|ca| ca.into_series())
151    }
152
153    fn take(&self, indices: &IdxCa) -> PolarsResult<Series> {
154        Ok(self.0.take(indices)?.into_series())
155    }
156
157    unsafe fn take_unchecked(&self, indices: &IdxCa) -> Series {
158        self.0.take_unchecked(indices).into_series()
159    }
160
161    fn take_slice(&self, indices: &[IdxSize]) -> PolarsResult<Series> {
162        Ok(self.0.take(indices)?.into_series())
163    }
164
165    unsafe fn take_slice_unchecked(&self, indices: &[IdxSize]) -> Series {
166        self.0.take_unchecked(indices).into_series()
167    }
168
169    fn deposit(&self, validity: &Bitmap) -> Series {
170        self.0.deposit(validity).into_series()
171    }
172
173    fn len(&self) -> usize {
174        self.0.len()
175    }
176
177    fn rechunk(&self) -> Series {
178        self.0.rechunk().into_owned().into_series()
179    }
180
181    fn new_from_index(&self, index: usize, length: usize) -> Series {
182        ChunkExpandAtIndex::new_from_index(&self.0, index, length).into_series()
183    }
184
185    fn trim_lists_to_normalized_offsets(&self) -> Option<Series> {
186        self.0
187            .trim_lists_to_normalized_offsets()
188            .map(IntoSeries::into_series)
189    }
190
191    fn propagate_nulls(&self) -> Option<Series> {
192        self.0.propagate_nulls().map(IntoSeries::into_series)
193    }
194
195    fn cast(&self, dtype: &DataType, cast_options: CastOptions) -> PolarsResult<Series> {
196        self.0.cast_with_options(dtype, cast_options)
197    }
198
199    #[inline]
200    unsafe fn get_unchecked(&self, index: usize) -> AnyValue<'_> {
201        self.0.get_any_value_unchecked(index)
202    }
203
204    fn null_count(&self) -> usize {
205        self.0.null_count()
206    }
207
208    fn has_nulls(&self) -> bool {
209        self.0.has_nulls()
210    }
211
212    #[cfg(feature = "algorithm_group_by")]
213    fn unique(&self) -> PolarsResult<Series> {
214        if !self.inner_dtype().is_primitive_numeric() {
215            polars_bail!(opq = unique, self.dtype());
216        }
217        // this can be called in aggregation, so this fast path can be worth a lot
218        if self.len() < 2 {
219            return Ok(self.0.clone().into_series());
220        }
221        let main_thread = POOL.current_thread_index().is_none();
222        let groups = self.group_tuples(main_thread, false);
223        // SAFETY:
224        // groups are in bounds
225        Ok(unsafe { self.0.clone().into_series().agg_first(&groups?) })
226    }
227
228    #[cfg(feature = "algorithm_group_by")]
229    fn n_unique(&self) -> PolarsResult<usize> {
230        // this can be called in aggregation, so this fast path can be worth a lot
231        match self.len() {
232            0 => Ok(0),
233            1 => Ok(1),
234            _ => {
235                let main_thread = POOL.current_thread_index().is_none();
236                let groups = self.group_tuples(main_thread, false)?;
237                Ok(groups.len())
238            },
239        }
240    }
241
242    #[cfg(feature = "algorithm_group_by")]
243    fn arg_unique(&self) -> PolarsResult<IdxCa> {
244        if !self.inner_dtype().is_primitive_numeric() {
245            polars_bail!(opq = arg_unique, self.dtype());
246        }
247        // this can be called in aggregation, so this fast path can be worth a lot
248        if self.len() == 1 {
249            return Ok(IdxCa::new_vec(self.name().clone(), vec![0 as IdxSize]));
250        }
251        let main_thread = POOL.current_thread_index().is_none();
252        // arg_unique requires a stable order
253        let groups = self.group_tuples(main_thread, true)?;
254        let first = groups.take_group_firsts();
255        Ok(IdxCa::from_vec(self.name().clone(), first))
256    }
257
258    fn is_null(&self) -> BooleanChunked {
259        self.0.is_null()
260    }
261
262    fn is_not_null(&self) -> BooleanChunked {
263        self.0.is_not_null()
264    }
265
266    fn reverse(&self) -> Series {
267        ChunkReverse::reverse(&self.0).into_series()
268    }
269
270    fn as_single_ptr(&mut self) -> PolarsResult<usize> {
271        self.0.as_single_ptr()
272    }
273
274    fn shift(&self, periods: i64) -> Series {
275        ChunkShift::shift(&self.0, periods).into_series()
276    }
277
278    fn clone_inner(&self) -> Arc<dyn SeriesTrait> {
279        Arc::new(SeriesWrap(Clone::clone(&self.0)))
280    }
281
282    fn find_validity_mismatch(&self, other: &Series, idxs: &mut Vec<IdxSize>) {
283        self.0.find_validity_mismatch(other, idxs)
284    }
285
286    fn as_any(&self) -> &dyn Any {
287        &self.0
288    }
289
290    fn as_any_mut(&mut self) -> &mut dyn Any {
291        &mut self.0
292    }
293
294    fn as_phys_any(&self) -> &dyn Any {
295        &self.0
296    }
297
298    fn as_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
299        self as _
300    }
301}