arrow_odbc/
odbc_writer.rs

1use std::{borrow::Cow, cmp::min, sync::Arc};
2
3use thiserror::Error;
4
5use arrow::{
6    array::Array,
7    datatypes::{
8        DataType, Date32Type, Date64Type, Field, Float16Type, Float32Type, Float64Type, Int8Type,
9        Int16Type, Int32Type, Int64Type, Schema, Time32MillisecondType, Time32SecondType,
10        Time64MicrosecondType, Time64NanosecondType, TimeUnit, UInt8Type,
11    },
12    error::ArrowError,
13    record_batch::{RecordBatch, RecordBatchReader},
14};
15use odbc_api::{
16    ColumnarBulkInserter, Connection, ConnectionTransitions, Prepared,
17    buffers::{AnyBuffer, AnySliceMut, BufferDesc},
18    handles::{AsStatementRef, StatementConnection, StatementImpl, StatementParent},
19};
20
21use crate::{
22    date_time::{NullableTimeAsText, epoch_to_date, sec_since_midnight_to_time},
23    decimal::{NullableDecimal128AsText, NullableDecimal256AsText},
24    odbc_writer::timestamp::insert_timestamp_strategy,
25};
26
27use self::{
28    binary::VariadicBinary,
29    boolean::boolean_to_bit,
30    map_arrow_to_odbc::MapArrowToOdbc,
31    text::{LargeUtf8ToNativeText, Utf8ToNativeText},
32};
33
34mod binary;
35mod boolean;
36mod map_arrow_to_odbc;
37mod text;
38mod timestamp;
39
40/// Fastest and most convinient way to stream the contents of arrow record batches into a database
41/// table. For usecase there you want to insert repeatedly into the same table from different
42/// streams it is more efficient to create an instance of [`self::OdbcWriter`] and reuse it.
43///
44/// **Note:**
45///
46/// If table or column names are derived from user input, be sure to sanatize the input in order to
47/// prevent SQL injection attacks.
48pub fn insert_into_table(
49    connection: &Connection,
50    batches: &mut impl RecordBatchReader,
51    table_name: &str,
52    batch_size: usize,
53) -> Result<(), WriterError> {
54    let schema = batches.schema();
55    let mut inserter =
56        OdbcWriter::with_connection(connection, schema.as_ref(), table_name, batch_size)?;
57    inserter.write_all(batches)
58}
59
60/// Generates an insert statement using the table and column names.
61///
62/// `INSERT INTO <table> (<column_names 0>, <column_names 1>, ...) VALUES (?, ?, ...)`
63fn insert_statement_text(table: &str, column_names: &[&'_ str]) -> String {
64    // Generate statement text from table name and headline
65    let column_names = column_names
66        .iter()
67        .map(|cn| quote_column_name(cn))
68        .collect::<Vec<_>>();
69    let columns = column_names.join(", ");
70    let values = column_names
71        .iter()
72        .map(|_| "?")
73        .collect::<Vec<_>>()
74        .join(", ");
75    // Do not finish the statement with a semicolon. There is anecodtical evidence of IBM db2 not
76    // allowing the command, because it expects now multiple statements.
77    // See: <https://github.com/pacman82/arrow-odbc/issues/63>
78    format!("INSERT INTO {table} ({columns}) VALUES ({values})")
79}
80
81/// Wraps column name in quotes, if need be.
82fn quote_column_name(column_name: &str) -> Cow<'_, str> {
83    // We do not want to apply quoting in case the string is already quoted. See:
84    // <https://github.com/pacman82/arrow-odbc-py/issues/162>
85    //
86    // Another approach would have been to apply quoting after detecting keywords. Yet the list of
87    // reserved keywords is large. There is also the issue with different databases having different
88    // quoting rules. So the strategy choosen here is to apply quoting in less situations and not
89    // more, so the user has more control over the final statement. This crate is about arrow and
90    // odbc, less so about SQL dialects and statement construction.
91    let is_already_quoted = || {
92        (column_name.starts_with('"') && column_name.ends_with('"'))
93            || column_name.starts_with('[') && column_name.ends_with(']')
94            || column_name.starts_with('`') && column_name.ends_with('`')
95    };
96    let contains_invalid_characters = || column_name.contains(|c| !valid_in_column_name(c));
97    let needs_quotes = contains_invalid_characters() && !is_already_quoted();
98    if needs_quotes {
99        Cow::Owned(format!("\"{column_name}\""))
100    } else {
101        Cow::Borrowed(column_name)
102    }
103}
104
105/// Check if this character is allowed in an unquoted column name
106fn valid_in_column_name(c: char) -> bool {
107    // See:
108    // <https://stackoverflow.com/questions/4200351/what-characters-are-valid-in-an-sql-server-database-name>
109    c.is_alphanumeric() || c == '@' || c == '$' || c == '#' || c == '_'
110}
111
112/// Creates an SQL insert statement from an arrow schema. The resulting statement will have one
113/// placeholer (`?`) for each column in the statement.
114///
115/// **Note:**
116///
117/// If the column name contains any character which would make it not a valid qualifier for transact
118/// SQL it will be wrapped in double quotes (`"`) within the insert schema. Valid names consist of
119/// alpha numeric characters, `@`, `$`, `#` and `_`.
120///
121/// # Example
122///
123/// ```
124/// use arrow_odbc::{
125///     insert_statement_from_schema,
126///     arrow::datatypes::{Field, DataType, Schema},
127/// };
128///
129/// let field_a = Field::new("a", DataType::Int64, false);
130/// let field_b = Field::new("b", DataType::Boolean, false);
131///
132/// let schema = Schema::new(vec![field_a, field_b]);
133/// let sql = insert_statement_from_schema(&schema, "MyTable");
134///
135/// assert_eq!("INSERT INTO MyTable (a, b) VALUES (?, ?)", sql)
136/// ```
137///
138/// This function is automatically invoked by [`crate::OdbcWriter::with_connection`].
139pub fn insert_statement_from_schema(schema: &Schema, table_name: &str) -> String {
140    let fields = schema.fields();
141    let num_columns = fields.len();
142    let column_names: Vec<_> = (0..num_columns)
143        .map(|i| fields[i].name().as_str())
144        .collect();
145    insert_statement_text(table_name, &column_names)
146}
147
148/// Emitted writing values from arror arrays into a table on the database
149#[derive(Debug, Error)]
150pub enum WriterError {
151    #[error("Failure to bind the array parameter buffers to the statement.\n{0}")]
152    BindParameterBuffers(#[source] odbc_api::Error),
153    #[error("Failure to execute the sql statement, sending the data to the database.\n{0}")]
154    ExecuteStatment(#[source] odbc_api::Error),
155    #[error("An error occured rebinding a parameter buffer to the sql statement.\n{0}")]
156    RebindBuffer(#[source] odbc_api::Error),
157    #[error("The arrow data type {0} is not supported for insertion.")]
158    UnsupportedArrowDataType(DataType),
159    #[error("An error occured extracting a record batch from an error reader.\n{0}")]
160    ReadingRecordBatch(#[source] ArrowError),
161    #[error("Unable to parse '{time_zone}' into a valid IANA time zone.")]
162    InvalidTimeZone { time_zone: Arc<str> },
163    #[error("An error occurred preparing SQL statement. SQL:\n{sql}\n{source}")]
164    PreparingInsertStatement {
165        #[source]
166        source: odbc_api::Error,
167        sql: String,
168    },
169}
170
171/// Inserts batches from an [`arrow::record_batch::RecordBatchReader`] into a database.
172pub struct OdbcWriter<S> {
173    /// Prepared statement with bound array parameter buffers. Data is copied into these buffers
174    /// until they are full. Then we execute the statement. This is repeated until we run out of
175    /// data.
176    inserter: ColumnarBulkInserter<S, AnyBuffer>,
177    /// For each field in the arrow schema we decide on which buffer to use to send the parameters
178    /// to the database, and need to remember how to copy the data from an arrow array to an odbc
179    /// mutable buffer slice for any column.
180    strategies: Vec<Box<dyn WriteStrategy>>,
181}
182
183impl<S> OdbcWriter<S>
184where
185    S: AsStatementRef,
186{
187    /// Construct a new ODBC writer using an alredy existing prepared statement. Usually you want to
188    /// call a higher level constructor like [`Self::with_connection`]. Yet, this constructor is
189    /// useful in two scenarios.
190    ///
191    /// 1. The prepared statement is already constructed and you do not want to spend the time to
192    ///    prepare it again.
193    /// 2. You want to use the arrow arrays as arrar parameters for a statement, but that statement
194    ///    is not necessarily an INSERT statement with a simple 1to1 mapping of columns between
195    ///    table and arrow schema.
196    ///
197    /// # Parameters
198    ///
199    /// * `row_capacity`: The amount of rows send to the database in each chunk. With the exception
200    ///   of the last chunk, which may be smaller.
201    /// * `schema`: Schema needs to have one column for each positional parameter of the statement
202    ///   and match the data which will be supplied to the instance later. Otherwise your code will
203    ///   panic.
204    /// * `statement`: A prepared statement whose SQL text representation contains one placeholder
205    ///   for each column. The order of the placeholers must correspond to the orders of the columns
206    ///   in the `schema`.
207    pub fn new(
208        row_capacity: usize,
209        schema: &Schema,
210        statement: Prepared<S>,
211    ) -> Result<Self, WriterError> {
212        let strategies: Vec<_> = schema
213            .fields()
214            .iter()
215            .map(|field| field_to_write_strategy(field.as_ref()))
216            .collect::<Result<_, _>>()?;
217        let descriptions = strategies.iter().map(|cws| cws.buffer_desc());
218        let inserter = statement
219            .into_column_inserter(row_capacity, descriptions)
220            .map_err(WriterError::BindParameterBuffers)?;
221
222        Ok(Self {
223            inserter,
224            strategies,
225        })
226    }
227
228    /// Consumes all the batches in the record batch reader and sends them chunk by chunk to the
229    /// database.
230    pub fn write_all(
231        &mut self,
232        reader: impl Iterator<Item = Result<RecordBatch, ArrowError>>,
233    ) -> Result<(), WriterError> {
234        for result in reader {
235            let record_batch = result.map_err(WriterError::ReadingRecordBatch)?;
236            self.write_batch(&record_batch)?;
237        }
238        self.flush()?;
239        Ok(())
240    }
241
242    /// Consumes a single batch and sends it chunk by chunk to the database. The last batch may not
243    /// be consumed until [`Self::flush`] is called.
244    pub fn write_batch(&mut self, record_batch: &RecordBatch) -> Result<(), WriterError> {
245        let capacity = self.inserter.capacity();
246        let mut remanining_rows = record_batch.num_rows();
247        // The record batch may contain more rows than the capacity of our writer can hold. So we
248        // need to be able to fill the buffers multiple times and send them to the database in
249        // between.
250        while remanining_rows != 0 {
251            let chunk_size = min(capacity - self.inserter.num_rows(), remanining_rows);
252            let param_offset = self.inserter.num_rows();
253            self.inserter.set_num_rows(param_offset + chunk_size);
254            let chunk = record_batch.slice(record_batch.num_rows() - remanining_rows, chunk_size);
255            for (index, (array, strategy)) in chunk
256                .columns()
257                .iter()
258                .zip(self.strategies.iter())
259                .enumerate()
260            {
261                strategy.write_rows(param_offset, self.inserter.column_mut(index), array)?
262            }
263
264            // If we used up all capacity we send the parameters to the database and reset the
265            // parameter buffers.
266            if self.inserter.num_rows() == capacity {
267                self.flush()?;
268            }
269            remanining_rows -= chunk_size;
270        }
271
272        Ok(())
273    }
274
275    /// The number of row in an individual record batch must not necessarily match the capacity of
276    /// the buffers owned by this writer. Therfore sometimes records are not send to the database
277    /// immediatly but rather we wait for the buffers to be filled then reading the next batch. Once
278    /// we reach the last batch however, there is no "next batch" anymore. In that case we call this
279    /// method in order to send the remainder of the records to the database as well.
280    pub fn flush(&mut self) -> Result<(), WriterError> {
281        self.inserter
282            .execute()
283            .map_err(WriterError::ExecuteStatment)?;
284        self.inserter.clear();
285        Ok(())
286    }
287}
288
289impl<C> OdbcWriter<StatementConnection<C>>
290where
291    C: StatementParent,
292{
293    /// A writer which takes ownership of the connection and inserts the given schema into a table
294    /// with matching column names.
295    ///
296    /// **Note:**
297    ///
298    /// If the column name contains any character which would make it not a valid qualifier for transact
299    /// SQL it will be wrapped in double quotes (`"`) within the insert schema. Valid names consist of
300    /// alpha numeric characters, `@`, `$`, `#` and `_`.
301    pub fn from_connection<C2>(
302        connection: C2,
303        schema: &Schema,
304        table_name: &str,
305        row_capacity: usize,
306    ) -> Result<Self, WriterError>
307    where
308        C2: ConnectionTransitions<StatementParent = C>,
309    {
310        let sql = insert_statement_from_schema(schema, table_name);
311        let statement = connection
312            .into_prepared(&sql)
313            .map_err(|source| WriterError::PreparingInsertStatement { source, sql })?;
314        Self::new(row_capacity, schema, statement)
315    }
316}
317
318impl<'o> OdbcWriter<StatementImpl<'o>> {
319    /// A writer which borrows the connection and inserts the given schema into a table with
320    /// matching column names.
321    ///
322    /// **Note:**
323    ///
324    /// If the column name contains any character which would make it not a valid qualifier for transact
325    /// SQL it will be wrapped in double quotes (`"`) within the insert schema. Valid names consist of
326    /// alpha numeric characters, `@`, `$`, `#` and `_`.
327    pub fn with_connection(
328        connection: &'o Connection<'o>,
329        schema: &Schema,
330        table_name: &str,
331        row_capacity: usize,
332    ) -> Result<Self, WriterError> {
333        let sql = insert_statement_from_schema(schema, table_name);
334        let statement = connection
335            .prepare(&sql)
336            .map_err(|source| WriterError::PreparingInsertStatement { source, sql })?;
337        Self::new(row_capacity, schema, statement)
338    }
339}
340
341pub trait WriteStrategy {
342    /// Describe the buffer used to hold the array parameters for the column
343    fn buffer_desc(&self) -> BufferDesc;
344
345    /// # Parameters
346    ///
347    /// * `param_offset`: Start writing parameters at that position. Number of rows in the parameter
348    ///   buffer before inserting the current chunk.
349    /// * `column_buf`: Buffer to write the data into
350    /// * `array`: Buffer to read the data from
351    fn write_rows(
352        &self,
353        param_offset: usize,
354        column_buf: AnySliceMut<'_>,
355        array: &dyn Array,
356    ) -> Result<(), WriterError>;
357}
358
359fn field_to_write_strategy(field: &Field) -> Result<Box<dyn WriteStrategy>, WriterError> {
360    let is_nullable = field.is_nullable();
361    let strategy = match field.data_type() {
362        DataType::Utf8 => Box::new(Utf8ToNativeText {}),
363        DataType::Boolean => boolean_to_bit(is_nullable),
364        DataType::LargeUtf8 => Box::new(LargeUtf8ToNativeText {}),
365        DataType::Int8 => Int8Type::identical(is_nullable),
366        DataType::Int16 => Int16Type::identical(is_nullable),
367        DataType::Int32 => Int32Type::identical(is_nullable),
368        DataType::Int64 => Int64Type::identical(is_nullable),
369        DataType::UInt8 => UInt8Type::identical(is_nullable),
370        DataType::Float16 => Float16Type::map_with(is_nullable, |half| half.to_f32()),
371        DataType::Float32 => Float32Type::identical(is_nullable),
372        DataType::Float64 => Float64Type::identical(is_nullable),
373        DataType::Timestamp(time_unit, time_zone) => {
374            insert_timestamp_strategy(is_nullable, &time_unit, time_zone.clone())?
375        }
376        DataType::Date32 => Date32Type::map_with(is_nullable, epoch_to_date),
377        DataType::Date64 => Date64Type::map_with(is_nullable, |days_since_epoch| {
378            epoch_to_date(days_since_epoch.try_into().unwrap())
379        }),
380        DataType::Time32(TimeUnit::Second) => {
381            Time32SecondType::map_with(is_nullable, sec_since_midnight_to_time)
382        }
383        DataType::Time32(TimeUnit::Millisecond) => {
384            Box::new(NullableTimeAsText::<Time32MillisecondType>::new())
385        }
386        DataType::Time64(TimeUnit::Microsecond) => {
387            Box::new(NullableTimeAsText::<Time64MicrosecondType>::new())
388        }
389        DataType::Time64(TimeUnit::Nanosecond) => {
390            Box::new(NullableTimeAsText::<Time64NanosecondType>::new())
391        }
392        DataType::Binary => Box::new(VariadicBinary::new(1)),
393        DataType::FixedSizeBinary(length) => {
394            Box::new(VariadicBinary::new((*length).try_into().unwrap()))
395        }
396        DataType::Decimal128(precision, scale) => {
397            Box::new(NullableDecimal128AsText::new(*precision, *scale))
398        }
399        DataType::Decimal256(precision, scale) => {
400            Box::new(NullableDecimal256AsText::new(*precision, *scale))
401        }
402        unsupported => return Err(WriterError::UnsupportedArrowDataType(unsupported.clone())),
403    };
404    Ok(strategy)
405}