Skip to main content

duckdb/
row.rs

1use std::{convert, sync::Arc};
2
3use super::{Error, Result, Statement};
4use crate::types::{self, EnumType, FromSql, FromSqlError, ListType, ValueRef};
5
6use arrow::{
7    array::{
8        self, Array, ArrayRef, DictionaryArray, FixedSizeBinaryArray, FixedSizeListArray, ListArray, MapArray,
9        StructArray,
10    },
11    datatypes::*,
12};
13use fallible_iterator::FallibleIterator;
14use fallible_streaming_iterator::FallibleStreamingIterator;
15use rust_decimal::prelude::*;
16
17/// An handle for the resulting rows of a query.
18#[must_use = "Rows is lazy and will do nothing unless consumed"]
19pub struct Rows<'stmt> {
20    pub(crate) stmt: Option<&'stmt Statement<'stmt>>,
21    arr: Arc<Option<StructArray>>,
22    row: Option<Row<'stmt>>,
23    current_row: usize,
24    current_batch_row: usize,
25}
26
27impl<'stmt> Rows<'stmt> {
28    #[inline]
29    fn reset(&mut self) {
30        self.current_row = 0;
31        self.current_batch_row = 0;
32        self.arr = Arc::new(None);
33    }
34
35    /// Attempt to get the next row from the query. Returns `Ok(Some(Row))` if
36    /// there is another row, `Err(...)` if there was an error
37    /// getting the next row, and `Ok(None)` if all rows have been retrieved.
38    ///
39    /// ## Note
40    ///
41    /// This interface is not compatible with Rust's `Iterator` trait, because
42    /// the lifetime of the returned row is tied to the lifetime of `self`.
43    /// This is a fallible "streaming iterator". For a more natural interface,
44    /// consider using [`query_map`](crate::Statement::query_map) or
45    /// [`query_and_then`](crate::Statement::query_and_then) instead, which
46    /// return types that implement `Iterator`.
47    #[allow(clippy::should_implement_trait)] // cannot implement Iterator
48    #[inline]
49    pub fn next(&mut self) -> Result<Option<&Row<'stmt>>> {
50        self.advance()?;
51        Ok((*self).get())
52    }
53
54    #[inline]
55    fn batch_row_count(&self) -> usize {
56        if self.arr.is_none() {
57            return 0;
58        }
59        self.arr.as_ref().as_ref().unwrap().len()
60    }
61
62    /// Map over this `Rows`, converting it to a [`Map`], which
63    /// implements `FallibleIterator`.
64    ///
65    /// **Note:** This method requires the closure to return `duckdb::Result<B>`.
66    /// If you need to use custom error types, consider using [`and_then`](Self::and_then)
67    /// instead, which allows any error type that implements `From<duckdb::Error>`.
68    ///
69    /// ```rust,no_run
70    /// use fallible_iterator::FallibleIterator;
71    /// # use duckdb::{Result, Statement};
72    /// fn query(stmt: &mut Statement) -> Result<Vec<i64>> {
73    ///     let rows = stmt.query([])?;
74    ///     rows.map(|r| r.get(0)).collect()
75    /// }
76    /// ```
77    // FIXME Hide FallibleStreamingIterator::map
78    #[inline]
79    pub fn map<F, B>(self, f: F) -> Map<'stmt, F>
80    where
81        F: FnMut(&Row<'_>) -> Result<B>,
82    {
83        Map { rows: self, f }
84    }
85
86    /// Map over this `Rows`, converting it to a [`MappedRows`], which
87    /// implements `Iterator`.
88    #[inline]
89    pub fn mapped<F, B>(self, f: F) -> MappedRows<'stmt, F>
90    where
91        F: FnMut(&Row<'_>) -> Result<B>,
92    {
93        MappedRows { rows: self, map: f }
94    }
95
96    /// Map over this `Rows` with a fallible function, converting it to a
97    /// [`AndThenRows`], which implements `Iterator` (instead of
98    /// `FallibleStreamingIterator`).
99    #[inline]
100    pub fn and_then<F, T, E>(self, f: F) -> AndThenRows<'stmt, F>
101    where
102        F: FnMut(&Row<'_>) -> Result<T, E>,
103    {
104        AndThenRows { rows: self, map: f }
105    }
106
107    /// Access the underlying statement
108    ///
109    /// This method provides a way to access the `Statement` that created these `Rows`
110    /// without additional borrowing conflicts. This is particularly useful when you need
111    /// to access statement metadata (like column count or names) while iterating over results.
112    ///
113    /// # Example
114    ///
115    /// ```rust,no_run
116    /// # use duckdb::{Connection, Result};
117    /// fn process_results(conn: &Connection) -> Result<()> {
118    ///     let mut stmt = conn.prepare("SELECT id, name FROM people")?;
119    ///     let mut rows = stmt.query([])?;
120    ///
121    ///     let column_count = rows.as_ref().unwrap().column_count();
122    ///     println!("Processing {} columns", column_count);
123    ///
124    ///     while let Some(row) = rows.next()? {
125    ///         // Process row...
126    ///     }
127    ///     Ok(())
128    /// }
129    /// ```
130    pub fn as_ref(&self) -> Option<&Statement<'stmt>> {
131        self.stmt
132    }
133}
134
135impl<'stmt> Rows<'stmt> {
136    #[inline]
137    pub(crate) fn new(stmt: &'stmt Statement<'stmt>) -> Self {
138        Rows {
139            stmt: Some(stmt),
140            arr: Arc::new(None),
141            row: None,
142            current_row: 0,
143            current_batch_row: 0,
144        }
145    }
146
147    #[inline]
148    pub(crate) fn get_expected_row(&mut self) -> Result<&Row<'stmt>> {
149        match self.next()? {
150            Some(row) => Ok(row),
151            None => Err(Error::QueryReturnedNoRows),
152        }
153    }
154}
155
156/// `F` is used to transform the _streaming_ iterator into a _fallible_
157/// iterator.
158#[must_use = "iterators are lazy and do nothing unless consumed"]
159pub struct Map<'stmt, F> {
160    rows: Rows<'stmt>,
161    f: F,
162}
163
164impl<F, B> FallibleIterator for Map<'_, F>
165where
166    F: FnMut(&Row<'_>) -> Result<B>,
167{
168    type Error = Error;
169    type Item = B;
170
171    #[inline]
172    fn next(&mut self) -> Result<Option<B>> {
173        match self.rows.next()? {
174            Some(v) => Ok(Some((self.f)(v)?)),
175            None => Ok(None),
176        }
177    }
178}
179
180/// An iterator over the mapped resulting rows of a query.
181///
182/// `F` is used to transform the _streaming_ iterator into a _standard_
183/// iterator.
184#[must_use = "iterators are lazy and do nothing unless consumed"]
185pub struct MappedRows<'stmt, F> {
186    rows: Rows<'stmt>,
187    map: F,
188}
189
190impl<T, F> Iterator for MappedRows<'_, F>
191where
192    F: FnMut(&Row<'_>) -> Result<T>,
193{
194    type Item = Result<T>;
195
196    #[inline]
197    fn next(&mut self) -> Option<Result<T>> {
198        let map = &mut self.map;
199        self.rows.next().transpose().map(|row_result| row_result.and_then(map))
200    }
201}
202
203/// An iterator over the mapped resulting rows of a query, with an Error type
204/// unifying with Error.
205#[must_use = "iterators are lazy and do nothing unless consumed"]
206pub struct AndThenRows<'stmt, F> {
207    rows: Rows<'stmt>,
208    map: F,
209}
210
211impl<T, E, F> Iterator for AndThenRows<'_, F>
212where
213    E: convert::From<Error>,
214    F: FnMut(&Row<'_>) -> Result<T, E>,
215{
216    type Item = Result<T, E>;
217
218    #[inline]
219    fn next(&mut self) -> Option<Self::Item> {
220        let map = &mut self.map;
221        self.rows
222            .next()
223            .transpose()
224            .map(|row_result| row_result.map_err(E::from).and_then(map))
225    }
226}
227
228/// `FallibleStreamingIterator` differs from the standard library's `Iterator`
229/// in two ways:
230/// * each call to `next` (sqlite3_step) can fail.
231/// * returned `Row` is valid until `next` is called again or `Statement` is
232///   reset or finalized.
233///
234/// While these iterators cannot be used with Rust `for` loops, `while let`
235/// loops offer a similar level of ergonomics:
236/// ```rust,no_run
237/// # use duckdb::{Result, Statement};
238/// fn query(stmt: &mut Statement) -> Result<()> {
239///     let mut rows = stmt.query([])?;
240///     while let Some(row) = rows.next()? {
241///         // scan columns value
242///     }
243///     Ok(())
244/// }
245/// ```
246impl<'stmt> FallibleStreamingIterator for Rows<'stmt> {
247    type Error = Error;
248    type Item = Row<'stmt>;
249
250    #[inline]
251    fn advance(&mut self) -> Result<()> {
252        match self.stmt {
253            Some(stmt) => {
254                if self.current_row < stmt.row_count() {
255                    if self.current_batch_row >= self.batch_row_count() {
256                        self.arr = Arc::new(stmt.step());
257                        if self.arr.is_none() {
258                            self.row = None;
259                            return Ok(());
260                        }
261                        self.current_batch_row = 0;
262                    }
263                    self.row = Some(Row {
264                        stmt,
265                        arr: self.arr.clone(),
266                        current_row: self.current_batch_row,
267                    });
268                    self.current_row += 1;
269                    self.current_batch_row += 1;
270                    Ok(())
271                } else {
272                    self.reset();
273                    self.row = None;
274                    Ok(())
275                }
276            }
277            None => {
278                self.row = None;
279                Ok(())
280            }
281        }
282    }
283
284    #[inline]
285    fn get(&self) -> Option<&Row<'stmt>> {
286        self.row.as_ref()
287    }
288}
289
290/// A single result row of a query.
291pub struct Row<'stmt> {
292    pub(crate) stmt: &'stmt Statement<'stmt>,
293    arr: Arc<Option<StructArray>>,
294    current_row: usize,
295}
296
297#[allow(clippy::needless_lifetimes)]
298impl<'stmt> Row<'stmt> {
299    /// Get the value of a particular column of the result row.
300    ///
301    /// ## Failure
302    ///
303    /// Panics if calling [`row.get(idx)`](Row::get) would return an error,
304    /// including:
305    ///
306    /// * If the underlying DuckDB column type is not a valid type as a source
307    ///   for `T`
308    /// * If the underlying DuckDB integral value is outside the range
309    ///   representable by `T`
310    /// * If `idx` is outside the range of columns in the returned query
311    pub fn get_unwrap<I: RowIndex, T: FromSql>(&self, idx: I) -> T {
312        self.get(idx).unwrap()
313    }
314
315    /// Get the value of a particular column of the result row.
316    ///
317    /// ## Failure
318    ///
319    /// Returns an `Error::InvalidColumnType` if the underlying DuckDB column
320    /// type is not a valid type as a source for `T`.
321    ///
322    /// Returns an `Error::InvalidColumnIndex` if `idx` is outside the valid
323    /// column range for this row.
324    ///
325    /// Returns an `Error::InvalidColumnName` if `idx` is not a valid column
326    /// name for this row.
327    ///
328    /// If the result type is i128 (which requires the `i128_blob` feature to be
329    /// enabled), and the underlying DuckDB column is a blob whose size is not
330    /// 16 bytes, `Error::InvalidColumnType` will also be returned.
331    pub fn get<I: RowIndex, T: FromSql>(&self, idx: I) -> Result<T> {
332        let idx = idx.idx(self.stmt)?;
333        let value = self.value_ref(self.current_row, idx);
334        FromSql::column_result(value).map_err(|err| match err {
335            FromSqlError::InvalidType => {
336                Error::InvalidColumnType(idx, self.stmt.column_name_unwrap(idx).into(), value.data_type())
337            }
338            FromSqlError::OutOfRange(i) => Error::IntegralValueOutOfRange(idx, i),
339            FromSqlError::Other(err) => Error::FromSqlConversionFailure(idx, value.data_type(), err),
340            #[cfg(feature = "uuid")]
341            FromSqlError::InvalidUuidSize(_) => {
342                Error::InvalidColumnType(idx, self.stmt.column_name_unwrap(idx).into(), value.data_type())
343            }
344        })
345    }
346
347    /// Get the value of a particular column of the result row as a `ValueRef`,
348    /// allowing data to be read out of a row without copying.
349    ///
350    /// This `ValueRef` is valid only as long as this Row, which is enforced by
351    /// it's lifetime. This means that while this method is completely safe,
352    /// it can be somewhat difficult to use, and most callers will be better
353    /// served by [`get`](Row::get) or [`get_unwrap`](Row::get_unwrap).
354    ///
355    /// ## Failure
356    ///
357    /// Returns an `Error::InvalidColumnIndex` if `idx` is outside the valid
358    /// column range for this row.
359    ///
360    /// Returns an `Error::InvalidColumnName` if `idx` is not a valid column
361    /// name for this row.
362    pub fn get_ref<I: RowIndex>(&self, idx: I) -> Result<ValueRef<'_>> {
363        let idx = idx.idx(self.stmt)?;
364        // Narrowing from `ValueRef<'stmt>` (which `self.stmt.value_ref(idx)`
365        // returns) to `ValueRef<'a>` is needed because it's only valid until
366        // the next call to sqlite3_step.
367        let val_ref = self.value_ref(self.current_row, idx);
368        Ok(val_ref)
369    }
370
371    fn value_ref(&self, row: usize, col: usize) -> ValueRef<'_> {
372        let column = self.arr.as_ref().as_ref().unwrap().column(col);
373        Self::value_ref_internal(row, col, column)
374    }
375
376    pub(crate) fn value_ref_internal(row: usize, col: usize, column: &ArrayRef) -> ValueRef<'_> {
377        if column.is_null(row) {
378            return ValueRef::Null;
379        }
380        // duckdb.cpp SetArrowFormat
381        // https://github.com/duckdb/duckdb/blob/71f1c7a7e4b8737cff5e78d1f090c54f5e78e17b/src/main/query_result.cpp#L148
382        match column.data_type() {
383            DataType::Utf8 => {
384                let array = column.as_any().downcast_ref::<array::StringArray>().unwrap();
385                ValueRef::from(array.value(row))
386            }
387            DataType::LargeUtf8 => {
388                let array = column.as_any().downcast_ref::<array::LargeStringArray>().unwrap();
389                ValueRef::from(array.value(row))
390            }
391            DataType::Binary => {
392                let array = column.as_any().downcast_ref::<array::BinaryArray>().unwrap();
393                ValueRef::Blob(array.value(row))
394            }
395            DataType::LargeBinary => {
396                let array = column.as_any().downcast_ref::<array::LargeBinaryArray>().unwrap();
397                ValueRef::Blob(array.value(row))
398            }
399            DataType::FixedSizeBinary(_) => {
400                let array = column.as_any().downcast_ref::<FixedSizeBinaryArray>().unwrap();
401                ValueRef::Blob(array.value(row))
402            }
403            DataType::Boolean => {
404                let array = column.as_any().downcast_ref::<array::BooleanArray>().unwrap();
405                ValueRef::Boolean(array.value(row))
406            }
407            DataType::Int8 => {
408                let array = column.as_any().downcast_ref::<array::Int8Array>().unwrap();
409                ValueRef::TinyInt(array.value(row))
410            }
411            DataType::Int16 => {
412                let array = column.as_any().downcast_ref::<array::Int16Array>().unwrap();
413                ValueRef::SmallInt(array.value(row))
414            }
415            DataType::Int32 => {
416                let array = column.as_any().downcast_ref::<array::Int32Array>().unwrap();
417                ValueRef::Int(array.value(row))
418            }
419            DataType::Int64 => {
420                let array = column.as_any().downcast_ref::<array::Int64Array>().unwrap();
421                ValueRef::BigInt(array.value(row))
422            }
423            DataType::UInt8 => {
424                let array = column.as_any().downcast_ref::<array::UInt8Array>().unwrap();
425                ValueRef::UTinyInt(array.value(row))
426            }
427            DataType::UInt16 => {
428                let array = column.as_any().downcast_ref::<array::UInt16Array>().unwrap();
429                ValueRef::USmallInt(array.value(row))
430            }
431            DataType::UInt32 => {
432                let array = column.as_any().downcast_ref::<array::UInt32Array>().unwrap();
433                ValueRef::UInt(array.value(row))
434            }
435            DataType::UInt64 => {
436                let array = column.as_any().downcast_ref::<array::UInt64Array>().unwrap();
437                ValueRef::UBigInt(array.value(row))
438            }
439            DataType::Float16 => {
440                let array = column.as_any().downcast_ref::<array::Float32Array>().unwrap();
441                ValueRef::Float(array.value(row))
442            }
443            DataType::Float32 => {
444                let array = column.as_any().downcast_ref::<array::Float32Array>().unwrap();
445                ValueRef::Float(array.value(row))
446            }
447            DataType::Float64 => {
448                let array = column.as_any().downcast_ref::<array::Float64Array>().unwrap();
449                ValueRef::Double(array.value(row))
450            }
451            DataType::Decimal128(..) => {
452                let array = column.as_any().downcast_ref::<array::Decimal128Array>().unwrap();
453                // hugeint: d:38,0
454                if array.scale() == 0 {
455                    return ValueRef::HugeInt(array.value(row));
456                }
457                ValueRef::Decimal(Decimal::from_i128_with_scale(array.value(row), array.scale() as u32))
458            }
459            DataType::Timestamp(unit, _) if *unit == TimeUnit::Second => {
460                let array = column.as_any().downcast_ref::<array::TimestampSecondArray>().unwrap();
461                ValueRef::Timestamp(types::TimeUnit::Second, array.value(row))
462            }
463            DataType::Timestamp(unit, _) if *unit == TimeUnit::Millisecond => {
464                let array = column
465                    .as_any()
466                    .downcast_ref::<array::TimestampMillisecondArray>()
467                    .unwrap();
468                ValueRef::Timestamp(types::TimeUnit::Millisecond, array.value(row))
469            }
470            DataType::Timestamp(unit, _) if *unit == TimeUnit::Microsecond => {
471                let array = column
472                    .as_any()
473                    .downcast_ref::<array::TimestampMicrosecondArray>()
474                    .unwrap();
475                ValueRef::Timestamp(types::TimeUnit::Microsecond, array.value(row))
476            }
477            DataType::Timestamp(unit, _) if *unit == TimeUnit::Nanosecond => {
478                let array = column
479                    .as_any()
480                    .downcast_ref::<array::TimestampNanosecondArray>()
481                    .unwrap();
482                ValueRef::Timestamp(types::TimeUnit::Nanosecond, array.value(row))
483            }
484            DataType::Date32 => {
485                let array = column.as_any().downcast_ref::<array::Date32Array>().unwrap();
486                ValueRef::Date32(array.value(row))
487            }
488            DataType::Time64(TimeUnit::Microsecond) => {
489                let array = column.as_any().downcast_ref::<array::Time64MicrosecondArray>().unwrap();
490                ValueRef::Time64(types::TimeUnit::Microsecond, array.value(row))
491            }
492            DataType::Interval(unit) => match unit {
493                IntervalUnit::MonthDayNano => {
494                    let array = column
495                        .as_any()
496                        .downcast_ref::<array::IntervalMonthDayNanoArray>()
497                        .unwrap();
498                    let value = array.value(row);
499
500                    ValueRef::Interval {
501                        months: value.months,
502                        days: value.days,
503                        nanos: value.nanoseconds,
504                    }
505                }
506                _ => unimplemented!("{:?}", unit),
507            },
508            // TODO: support more data types
509            // NOTE: DataTypes not supported by duckdb
510            // DataType::Date64 => make_string_date!(array::Date64Array, column, row),
511            // DataType::Time32(unit) if *unit == TimeUnit::Second => {
512            //     make_string_time!(array::Time32SecondArray, column, row)
513            // }
514            // DataType::Time32(unit) if *unit == TimeUnit::Millisecond => {
515            //     make_string_time!(array::Time32MillisecondArray, column, row)
516            // }
517            // DataType::Time64(unit) if *unit == TimeUnit::Nanosecond => {
518            //     make_string_time!(array::Time64NanosecondArray, column, row)
519            // }
520            DataType::LargeList(..) => {
521                let arr = column.as_any().downcast_ref::<array::LargeListArray>().unwrap();
522
523                ValueRef::List(ListType::Large(arr), row)
524            }
525            DataType::List(..) => {
526                let arr = column.as_any().downcast_ref::<ListArray>().unwrap();
527
528                ValueRef::List(ListType::Regular(arr), row)
529            }
530            DataType::Dictionary(key_type, ..) => {
531                let column = column.as_any();
532                ValueRef::Enum(
533                    match key_type.as_ref() {
534                        DataType::UInt8 => {
535                            EnumType::UInt8(column.downcast_ref::<DictionaryArray<UInt8Type>>().unwrap())
536                        }
537                        DataType::UInt16 => {
538                            EnumType::UInt16(column.downcast_ref::<DictionaryArray<UInt16Type>>().unwrap())
539                        }
540                        DataType::UInt32 => {
541                            EnumType::UInt32(column.downcast_ref::<DictionaryArray<UInt32Type>>().unwrap())
542                        }
543                        typ => panic!("Unsupported key type: {typ:?}"),
544                    },
545                    row,
546                )
547            }
548            DataType::Struct(_) => {
549                let res = column.as_any().downcast_ref::<StructArray>().unwrap();
550                ValueRef::Struct(res, row)
551            }
552            DataType::Map(..) => {
553                let arr = column.as_any().downcast_ref::<MapArray>().unwrap();
554                ValueRef::Map(arr, row)
555            }
556            DataType::FixedSizeList(..) => {
557                let arr = column.as_any().downcast_ref::<FixedSizeListArray>().unwrap();
558                ValueRef::Array(arr, row)
559            }
560            DataType::Union(..) => ValueRef::Union(column, row),
561            _ => unreachable!("invalid value: {}, {}", col, column.data_type()),
562        }
563    }
564
565    /// Get the value of a particular column of the result row as a `ValueRef`,
566    /// allowing data to be read out of a row without copying.
567    ///
568    /// This `ValueRef` is valid only as long as this Row, which is enforced by
569    /// it's lifetime. This means that while this method is completely safe,
570    /// it can be difficult to use, and most callers will be better served by
571    /// [`get`](Row::get) or [`get_unwrap`](Row::get_unwrap).
572    ///
573    /// ## Failure
574    ///
575    /// Panics if calling [`row.get_ref(idx)`](Row::get_ref) would return an
576    /// error, including:
577    ///
578    /// * If `idx` is outside the range of columns in the returned query.
579    /// * If `idx` is not a valid column name for this row.
580    pub fn get_ref_unwrap<I: RowIndex>(&self, idx: I) -> ValueRef<'_> {
581        self.get_ref(idx).unwrap()
582    }
583}
584
585impl<'stmt> AsRef<Statement<'stmt>> for Row<'stmt> {
586    fn as_ref(&self) -> &Statement<'stmt> {
587        self.stmt
588    }
589}
590
591mod sealed {
592    /// This trait exists just to ensure that the only impls of `trait Params`
593    /// that are allowed are ones in this crate.
594    pub trait Sealed {}
595    impl Sealed for usize {}
596    impl Sealed for &str {}
597}
598
599/// A trait implemented by types that can index into columns of a row.
600///
601/// It is only implemented for `usize` and `&str`.
602pub trait RowIndex: sealed::Sealed {
603    /// Returns the index of the appropriate column, or `None` if no such
604    /// column exists.
605    fn idx(&self, stmt: &Statement<'_>) -> Result<usize>;
606}
607
608impl RowIndex for usize {
609    #[inline]
610    fn idx(&self, stmt: &Statement<'_>) -> Result<usize> {
611        if *self >= stmt.column_count() {
612            Err(Error::InvalidColumnIndex(*self))
613        } else {
614            Ok(*self)
615        }
616    }
617}
618
619impl RowIndex for &'_ str {
620    #[inline]
621    fn idx(&self, stmt: &Statement<'_>) -> Result<usize> {
622        stmt.column_index(self)
623    }
624}
625
626macro_rules! tuple_try_from_row {
627    ($($field:ident),*) => {
628        impl<'a, $($field,)*> convert::TryFrom<&'a Row<'a>> for ($($field,)*) where $($field: FromSql,)* {
629            type Error = crate::Error;
630
631            // we end with index += 1, which rustc warns about
632            // unused_variables and unused_mut are allowed for ()
633            #[allow(unused_assignments, unused_variables, unused_mut)]
634            fn try_from(row: &'a Row<'a>) -> Result<Self> {
635                let mut index = 0;
636                $(
637                    #[allow(non_snake_case)]
638                    let $field = row.get::<_, $field>(index)?;
639                    index += 1;
640                )*
641                Ok(($($field,)*))
642            }
643        }
644    }
645}
646
647macro_rules! tuples_try_from_row {
648    () => {
649        // not very useful, but maybe some other macro users will find this helpful
650        tuple_try_from_row!();
651    };
652    ($first:ident $(, $remaining:ident)*) => {
653        tuple_try_from_row!($first $(, $remaining)*);
654        tuples_try_from_row!($($remaining),*);
655    };
656}
657
658tuples_try_from_row!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P);
659
660#[cfg(test)]
661mod tests {
662    #![allow(clippy::redundant_closure)] // false positives due to lifetime issues; clippy issue #5594
663    use crate::{Connection, Result};
664
665    #[test]
666    fn test_try_from_row_for_tuple_1() -> Result<()> {
667        use crate::ToSql;
668        use std::convert::TryFrom;
669
670        let conn = Connection::open_in_memory()?;
671        conn.execute(
672            "CREATE TABLE test (a INTEGER)",
673            crate::params_from_iter(std::iter::empty::<&dyn ToSql>()),
674        )?;
675        conn.execute("INSERT INTO test VALUES (42)", [])?;
676        let val = conn.query_row("SELECT a FROM test", [], |row| <(u32,)>::try_from(row))?;
677        assert_eq!(val, (42,));
678        let fail = conn.query_row("SELECT a FROM test", [], |row| <(u32, u32)>::try_from(row));
679        assert!(fail.is_err());
680        Ok(())
681    }
682
683    #[test]
684    fn test_try_from_row_for_tuple_2() -> Result<()> {
685        use std::convert::TryFrom;
686
687        let conn = Connection::open_in_memory()?;
688        conn.execute("CREATE TABLE test (a INTEGER, b INTEGER)", [])?;
689        conn.execute("INSERT INTO test VALUES (42, 47)", [])?;
690        let val = conn.query_row("SELECT a, b FROM test", [], |row| <(u32, u32)>::try_from(row))?;
691        assert_eq!(val, (42, 47));
692        let fail = conn.query_row("SELECT a, b FROM test", [], |row| <(u32, u32, u32)>::try_from(row));
693        assert!(fail.is_err());
694        Ok(())
695    }
696
697    #[test]
698    fn test_try_from_row_for_tuple_16() -> Result<()> {
699        use std::convert::TryFrom;
700
701        let create_table = "CREATE TABLE test (
702            a INTEGER,
703            b INTEGER,
704            c INTEGER,
705            d INTEGER,
706            e INTEGER,
707            f INTEGER,
708            g INTEGER,
709            h INTEGER,
710            i INTEGER,
711            j INTEGER,
712            k INTEGER,
713            l INTEGER,
714            m INTEGER,
715            n INTEGER,
716            o INTEGER,
717            p INTEGER
718        )";
719
720        let insert_values = "INSERT INTO test VALUES (
721            0,
722            1,
723            2,
724            3,
725            4,
726            5,
727            6,
728            7,
729            8,
730            9,
731            10,
732            11,
733            12,
734            13,
735            14,
736            15
737        )";
738
739        type BigTuple = (
740            u32,
741            u32,
742            u32,
743            u32,
744            u32,
745            u32,
746            u32,
747            u32,
748            u32,
749            u32,
750            u32,
751            u32,
752            u32,
753            u32,
754            u32,
755            u32,
756        );
757
758        let conn = Connection::open_in_memory()?;
759        conn.execute(create_table, [])?;
760        conn.execute(insert_values, [])?;
761        let val = conn.query_row("SELECT * FROM test", [], |row| BigTuple::try_from(row))?;
762        // Debug is not implemented for tuples of 16
763        assert_eq!(val.0, 0);
764        assert_eq!(val.1, 1);
765        assert_eq!(val.2, 2);
766        assert_eq!(val.3, 3);
767        assert_eq!(val.4, 4);
768        assert_eq!(val.5, 5);
769        assert_eq!(val.6, 6);
770        assert_eq!(val.7, 7);
771        assert_eq!(val.8, 8);
772        assert_eq!(val.9, 9);
773        assert_eq!(val.10, 10);
774        assert_eq!(val.11, 11);
775        assert_eq!(val.12, 12);
776        assert_eq!(val.13, 13);
777        assert_eq!(val.14, 14);
778        assert_eq!(val.15, 15);
779
780        // We don't test one bigger because it's unimplemented
781        Ok(())
782    }
783
784    #[test]
785    #[cfg(feature = "vtab-arrow")]
786    fn test_fixed_size_binary_via_arrow() -> Result<()> {
787        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
788        use arrow::array::{Array, ArrayRef, BinaryArray, FixedSizeBinaryArray};
789        use arrow::datatypes::{DataType, Field, Schema};
790        use arrow::record_batch::RecordBatch;
791        use std::sync::Arc;
792
793        let conn = Connection::open_in_memory()?;
794        conn.register_table_function::<ArrowVTab>("arrow")?;
795
796        // Create FixedSizeBinary(16) array - like UUID
797        let values = vec![
798            vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
799            vec![16u8, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
800            vec![0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255],
801        ];
802
803        let byte_array = FixedSizeBinaryArray::try_from_iter(values.into_iter()).unwrap();
804        let arc: ArrayRef = Arc::new(byte_array);
805        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(16), false)]);
806        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
807
808        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
809        let mut arr = stmt.query_arrow(arrow_recordbatch_to_query_params(batch))?;
810        let rb = arr.next().expect("no record batch");
811
812        // DuckDB converts FixedSizeBinary to regular Binary
813        let column = rb.column(0).as_any().downcast_ref::<BinaryArray>().unwrap();
814        assert_eq!(column.len(), 3);
815        assert_eq!(
816            column.value(0),
817            &[1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
818        );
819        assert_eq!(
820            column.value(1),
821            &[16u8, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
822        );
823        assert_eq!(column.value(2), &[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255]);
824
825        Ok(())
826    }
827
828    #[test]
829    #[cfg(feature = "vtab-arrow")]
830    fn test_fixed_size_binary_with_nulls_via_arrow() -> Result<()> {
831        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
832        use arrow::array::{Array, ArrayRef, BinaryArray, FixedSizeBinaryArray};
833        use arrow::datatypes::{DataType, Field, Schema};
834        use arrow::record_batch::RecordBatch;
835        use std::sync::Arc;
836
837        let conn = Connection::open_in_memory()?;
838        conn.register_table_function::<ArrowVTab>("arrow")?;
839
840        // Create FixedSizeBinary(8) array with nulls
841        let values = vec![
842            Some(vec![1u8, 2, 3, 4, 5, 6, 7, 8]),
843            None,
844            Some(vec![9u8, 10, 11, 12, 13, 14, 15, 16]),
845        ];
846
847        let byte_array = FixedSizeBinaryArray::try_from_sparse_iter_with_size(values.into_iter(), 8).unwrap();
848        let arc: ArrayRef = Arc::new(byte_array);
849        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(8), true)]);
850        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
851
852        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
853        let mut arr = stmt.query_arrow(arrow_recordbatch_to_query_params(batch))?;
854        let rb = arr.next().expect("no record batch");
855
856        // NOTE: Currently, null handling for FixedSizeBinary is not fully implemented
857        // (see vtab/arrow.rs fixed_size_binary_array_to_vector, line 925-926)
858        // Nulls are converted to zero bytes instead of actual nulls
859        let column = rb.column(0).as_any().downcast_ref::<BinaryArray>().unwrap();
860        assert_eq!(column.len(), 3);
861        assert!(column.is_valid(0));
862        // This should be false when null handling is implemented
863        // assert!(!column.is_valid(1));
864        assert!(column.is_valid(2));
865        assert_eq!(column.value(0), &[1u8, 2, 3, 4, 5, 6, 7, 8]);
866        // The null value is currently represented as zero bytes
867        assert_eq!(column.value(1), &[0u8, 0, 0, 0, 0, 0, 0, 0]);
868        assert_eq!(column.value(2), &[9u8, 10, 11, 12, 13, 14, 15, 16]);
869
870        Ok(())
871    }
872
873    #[test]
874    #[cfg(feature = "vtab-arrow")]
875    fn test_fixed_size_binary_different_sizes_via_arrow() -> Result<()> {
876        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
877        use arrow::array::{ArrayRef, FixedSizeBinaryArray};
878        use arrow::datatypes::{DataType, Field, Schema};
879        use arrow::record_batch::RecordBatch;
880        use std::sync::Arc;
881
882        let conn = Connection::open_in_memory()?;
883        conn.register_table_function::<ArrowVTab>("arrow")?;
884
885        // Test with FixedSizeBinary(4)
886        let values = vec![vec![1u8, 2, 3, 4], vec![5u8, 6, 7, 8]];
887
888        let byte_array = FixedSizeBinaryArray::try_from_iter(values.into_iter()).unwrap();
889        let arc: ArrayRef = Arc::new(byte_array);
890        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(4), false)]);
891        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
892
893        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
894        let mut rows = stmt.query(arrow_recordbatch_to_query_params(batch))?;
895
896        // Read via Row interface
897        let row = rows.next()?.unwrap();
898        let bytes: Vec<u8> = row.get(0)?;
899        assert_eq!(bytes, vec![1u8, 2, 3, 4]);
900
901        let row = rows.next()?.unwrap();
902        let bytes: Vec<u8> = row.get(0)?;
903        assert_eq!(bytes, vec![5u8, 6, 7, 8]);
904
905        Ok(())
906    }
907
908    #[test]
909    #[cfg(feature = "vtab-arrow")]
910    fn test_fixed_size_binary_value_ref_via_arrow() -> Result<()> {
911        use crate::types::ValueRef;
912        use crate::vtab::arrow::{arrow_recordbatch_to_query_params, ArrowVTab};
913        use arrow::array::{ArrayRef, FixedSizeBinaryArray};
914        use arrow::datatypes::{DataType, Field, Schema};
915        use arrow::record_batch::RecordBatch;
916        use std::sync::Arc;
917
918        let conn = Connection::open_in_memory()?;
919        conn.register_table_function::<ArrowVTab>("arrow")?;
920
921        let values = vec![Some(vec![1u8, 2, 3, 4]), None];
922
923        let byte_array = FixedSizeBinaryArray::try_from_sparse_iter_with_size(values.into_iter(), 4).unwrap();
924        let arc: ArrayRef = Arc::new(byte_array);
925        let schema = Schema::new(vec![Field::new("data", DataType::FixedSizeBinary(4), true)]);
926        let batch = RecordBatch::try_new(Arc::new(schema), vec![arc]).unwrap();
927
928        let mut stmt = conn.prepare("SELECT data FROM arrow(?, ?)")?;
929        let mut rows = stmt.query(arrow_recordbatch_to_query_params(batch))?;
930
931        // First row - non-null
932        let row = rows.next()?.unwrap();
933        let value_ref = row.get_ref(0)?;
934        match value_ref {
935            ValueRef::Blob(bytes) => {
936                assert_eq!(bytes, &[1u8, 2, 3, 4]);
937            }
938            _ => panic!("Expected Blob ValueRef, got {:?}", value_ref),
939        }
940
941        // Second row - should be null, but currently null handling is not implemented
942        // (see vtab/arrow.rs fixed_size_binary_array_to_vector, line 925-926)
943        // so it's represented as zero bytes
944        let row = rows.next()?.unwrap();
945        let value_ref = row.get_ref(0)?;
946        match value_ref {
947            ValueRef::Blob(bytes) => {
948                // This should be ValueRef::Null when null handling is implemented
949                assert_eq!(bytes, &[0u8, 0, 0, 0]);
950            }
951            _ => panic!("Expected Blob ValueRef with zero bytes, got {:?}", value_ref),
952        }
953
954        Ok(())
955    }
956
957    #[cfg(feature = "uuid")]
958    #[test]
959    fn test_fixed_size_binary_uuid() -> Result<()> {
960        use uuid::Uuid;
961
962        let conn = Connection::open_in_memory()?;
963        conn.execute_batch("CREATE TABLE test (id UUID)")?;
964
965        let uuid_str = "550e8400-e29b-41d4-a716-446655440000";
966        conn.execute("INSERT INTO test VALUES (?)", [uuid_str])?;
967
968        // Read back as UUID
969        let uuid: Uuid = conn.query_row("SELECT id FROM test", [], |r| r.get(0))?;
970        assert_eq!(uuid.to_string(), uuid_str);
971        Ok(())
972    }
973
974    #[cfg(feature = "uuid")]
975    #[test]
976    fn test_fixed_size_binary_uuid_roundtrip() -> Result<()> {
977        use uuid::Uuid;
978
979        let conn = Connection::open_in_memory()?;
980        conn.execute_batch("CREATE TABLE test (id UUID)")?;
981
982        let original_uuid = Uuid::new_v4();
983        conn.execute("INSERT INTO test VALUES (?)", [original_uuid])?;
984
985        let retrieved_uuid: Uuid = conn.query_row("SELECT id FROM test", [], |r| r.get(0))?;
986        assert_eq!(original_uuid, retrieved_uuid);
987        Ok(())
988    }
989}