opensrv_clickhouse/types/column/
concat.rs

1// Copyright 2021 Datafuse Labs.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::iter;
16
17use super::column_data::ArcColumnData;
18use super::column_data::BoxColumnData;
19use super::column_data::ColumnData;
20use crate::binary::Encoder;
21use crate::errors::Error;
22use crate::errors::FromSqlError;
23use crate::errors::Result;
24use crate::types::SqlType;
25use crate::types::Value;
26use crate::types::ValueRef;
27
28pub struct ConcatColumnData {
29    data: Vec<ArcColumnData>,
30    index: Vec<usize>,
31}
32
33impl ConcatColumnData {
34    pub fn concat(data: Vec<ArcColumnData>) -> Self {
35        Self::check_columns(&data);
36
37        let index = build_index(data.iter().map(|x| x.len()));
38        Self { data, index }
39    }
40
41    fn check_columns(data: &[ArcColumnData]) {
42        match data.first() {
43            None => panic!("data should not be empty."),
44            Some(first) => {
45                for column in data.iter().skip(1) {
46                    if first.sql_type() != column.sql_type() {
47                        panic!(
48                            "all columns should have the same type ({:?} != {:?}).",
49                            first.sql_type(),
50                            column.sql_type()
51                        );
52                    }
53                }
54            }
55        }
56    }
57}
58
59impl ColumnData for ConcatColumnData {
60    fn sql_type(&self) -> SqlType {
61        self.data[0].sql_type()
62    }
63
64    fn save(&self, _: &mut Encoder, _: usize, _: usize) {
65        unimplemented!()
66    }
67
68    fn len(&self) -> usize {
69        *self.index.last().unwrap()
70    }
71
72    fn push(&mut self, _value: Value) {
73        unimplemented!()
74    }
75
76    fn at(&self, index: usize) -> ValueRef {
77        let chunk_index = find_chunk(&self.index, index);
78        let chunk = &self.data[chunk_index];
79        chunk.at(index - self.index[chunk_index])
80    }
81
82    fn clone_instance(&self) -> BoxColumnData {
83        unimplemented!()
84    }
85
86    unsafe fn get_internal(&self, pointers: &[*mut *const u8], level: u8) -> Result<()> {
87        if level == 0xff {
88            *pointers[0] = &self.data as *const Vec<ArcColumnData> as *mut u8;
89            Ok(())
90        } else {
91            Err(Error::FromSql(FromSqlError::UnsupportedOperation))
92        }
93    }
94}
95
96pub fn build_index<'a, I>(sizes: I) -> Vec<usize>
97where
98    I: iter::Iterator<Item = usize> + 'a,
99{
100    let mut acc = 0;
101    let mut index = vec![acc];
102
103    for size in sizes {
104        acc += size;
105        index.push(acc);
106    }
107
108    index
109}
110
111pub fn find_chunk(index: &[usize], ix: usize) -> usize {
112    let mut lo = 0_usize;
113    let mut hi = index.len() - 1;
114
115    while lo < hi {
116        let mid = lo + (hi - lo) / 2;
117
118        if index[lo] == index[lo + 1] {
119            lo += 1;
120            continue;
121        }
122
123        if ix < index[mid] {
124            hi = mid;
125        } else if ix >= index[mid + 1] {
126            lo = mid + 1;
127        } else {
128            return mid;
129        }
130    }
131
132    0
133}