mssql_client/
row.rs

1//! Row representation for query results.
2//!
3//! This module implements the `Arc<Bytes>` pattern from ADR-004 for reduced-copy
4//! row data access. The `Row` struct holds a shared reference to the raw packet
5//! buffer, deferring allocation until explicitly requested.
6//!
7//! ## Access Patterns (per ADR-004)
8//!
9//! - `get_bytes()` - Returns borrowed slice into buffer (zero additional allocation)
10//! - `get_str()` - Returns Cow - borrowed if valid UTF-8, owned if conversion needed
11//! - `get_string()` - Allocates new String (explicit allocation)
12//! - `get<T>()` - Type-converting accessor with allocation only if needed
13
14use std::borrow::Cow;
15use std::sync::Arc;
16
17use bytes::Bytes;
18
19use mssql_types::decode::{TypeInfo, decode_value};
20use mssql_types::{FromSql, SqlValue, TypeError};
21
22use crate::blob::BlobReader;
23
24/// Column slice information pointing into the row buffer.
25///
26/// This is the internal representation that enables zero-copy access
27/// to column data within the shared buffer.
28#[derive(Debug, Clone, Copy)]
29pub struct ColumnSlice {
30    /// Offset into the buffer where this column's data begins.
31    pub offset: u32,
32    /// Length of the column data in bytes.
33    pub length: u32,
34    /// Whether this column value is NULL.
35    pub is_null: bool,
36}
37
38impl ColumnSlice {
39    /// Create a new column slice.
40    pub fn new(offset: u32, length: u32, is_null: bool) -> Self {
41        Self {
42            offset,
43            length,
44            is_null,
45        }
46    }
47
48    /// Create a NULL column slice.
49    pub fn null() -> Self {
50        Self {
51            offset: 0,
52            length: 0,
53            is_null: true,
54        }
55    }
56}
57
58/// Column metadata describing a result set column.
59///
60/// This struct is marked `#[non_exhaustive]` to allow adding new fields
61/// in future versions without breaking semver compatibility. Use
62/// [`Column::new()`] or builder methods to construct instances.
63#[derive(Debug, Clone)]
64#[non_exhaustive]
65pub struct Column {
66    /// Column name.
67    pub name: String,
68    /// Column index (0-based).
69    pub index: usize,
70    /// SQL type name (e.g., "INT", "NVARCHAR").
71    pub type_name: String,
72    /// Whether the column allows NULL values.
73    pub nullable: bool,
74    /// Maximum length for variable-length types.
75    pub max_length: Option<u32>,
76    /// Precision for numeric types.
77    pub precision: Option<u8>,
78    /// Scale for numeric types.
79    pub scale: Option<u8>,
80    /// Collation for string types (VARCHAR, CHAR, TEXT).
81    ///
82    /// Used for proper encoding/decoding of non-Unicode string data.
83    /// When present, enables collation-aware decoding that correctly
84    /// handles locale-specific ANSI encodings (e.g., Shift_JIS, GB18030).
85    pub collation: Option<tds_protocol::Collation>,
86}
87
88impl Column {
89    /// Create a new column with basic metadata.
90    pub fn new(name: impl Into<String>, index: usize, type_name: impl Into<String>) -> Self {
91        Self {
92            name: name.into(),
93            index,
94            type_name: type_name.into(),
95            nullable: true,
96            max_length: None,
97            precision: None,
98            scale: None,
99            collation: None,
100        }
101    }
102
103    /// Set whether the column is nullable.
104    #[must_use]
105    pub fn with_nullable(mut self, nullable: bool) -> Self {
106        self.nullable = nullable;
107        self
108    }
109
110    /// Set the maximum length.
111    #[must_use]
112    pub fn with_max_length(mut self, max_length: u32) -> Self {
113        self.max_length = Some(max_length);
114        self
115    }
116
117    /// Set precision and scale for numeric types.
118    #[must_use]
119    pub fn with_precision_scale(mut self, precision: u8, scale: u8) -> Self {
120        self.precision = Some(precision);
121        self.scale = Some(scale);
122        self
123    }
124
125    /// Set the collation for string types.
126    ///
127    /// Used for proper encoding/decoding of non-Unicode string data (VARCHAR, CHAR, TEXT).
128    #[must_use]
129    pub fn with_collation(mut self, collation: tds_protocol::Collation) -> Self {
130        self.collation = Some(collation);
131        self
132    }
133
134    /// Get the encoding name for this column's collation.
135    ///
136    /// Returns the name of the character encoding used for this column's data,
137    /// or "unknown" if the collation is not set or the encoding feature is disabled.
138    ///
139    /// # Examples
140    ///
141    /// - `"Shift_JIS"` - Japanese encoding (LCID 0x0411)
142    /// - `"GB18030"` - Simplified Chinese (LCID 0x0804)
143    /// - `"UTF-8"` - SQL Server 2019+ UTF-8 collation
144    /// - `"windows-1252"` - Latin/Western European (LCID 0x0409)
145    /// - `"unknown"` - No collation or unsupported encoding
146    #[must_use]
147    pub fn encoding_name(&self) -> &'static str {
148        #[cfg(feature = "encoding")]
149        if let Some(ref collation) = self.collation {
150            return collation.encoding_name();
151        }
152        "unknown"
153    }
154
155    /// Check if this column uses UTF-8 encoding.
156    ///
157    /// Returns `true` if the column has a SQL Server 2019+ UTF-8 collation,
158    /// which is indicated by bit 27 (0x0800_0000) being set in the LCID.
159    #[must_use]
160    pub fn is_utf8_collation(&self) -> bool {
161        #[cfg(feature = "encoding")]
162        if let Some(ref collation) = self.collation {
163            return collation.is_utf8();
164        }
165        false
166    }
167
168    /// Convert column metadata to TDS TypeInfo for decoding.
169    ///
170    /// Maps type names to TDS type IDs and constructs appropriate TypeInfo.
171    pub fn to_type_info(&self) -> TypeInfo {
172        let type_id = type_name_to_id(&self.type_name);
173        TypeInfo {
174            type_id,
175            length: self.max_length,
176            scale: self.scale,
177            precision: self.precision,
178            collation: self.collation.map(|c| mssql_types::decode::Collation {
179                lcid: c.lcid,
180                flags: c.sort_id,
181            }),
182        }
183    }
184}
185
186/// Map SQL type name to TDS type ID.
187fn type_name_to_id(name: &str) -> u8 {
188    match name.to_uppercase().as_str() {
189        // Integer types
190        "INT" | "INTEGER" => 0x38,
191        "BIGINT" => 0x7F,
192        "SMALLINT" => 0x34,
193        "TINYINT" => 0x30,
194        "BIT" => 0x32,
195
196        // Floating point
197        "FLOAT" => 0x3E,
198        "REAL" => 0x3B,
199
200        // Decimal/Numeric
201        "DECIMAL" | "NUMERIC" => 0x6C,
202        "MONEY" | "SMALLMONEY" => 0x6E,
203
204        // String types
205        "NVARCHAR" | "NCHAR" | "NTEXT" => 0xE7,
206        "VARCHAR" | "CHAR" | "TEXT" => 0xA7,
207
208        // Binary types
209        "VARBINARY" | "BINARY" | "IMAGE" => 0xA5,
210
211        // Date/Time types
212        "DATE" => 0x28,
213        "TIME" => 0x29,
214        "DATETIME2" => 0x2A,
215        "DATETIMEOFFSET" => 0x2B,
216        "DATETIME" => 0x3D,
217        "SMALLDATETIME" => 0x3F,
218
219        // GUID
220        "UNIQUEIDENTIFIER" => 0x24,
221
222        // XML
223        "XML" => 0xF1,
224
225        // Nullable variants (INTNTYPE, etc.)
226        _ if name.ends_with("N") => 0x26,
227
228        // Default to binary for unknown types
229        _ => 0xA5,
230    }
231}
232
233/// Shared column metadata for a result set.
234///
235/// This is shared across all rows in the result set to avoid
236/// duplicating metadata per row.
237#[derive(Debug, Clone)]
238pub struct ColMetaData {
239    /// Column definitions.
240    pub columns: Arc<[Column]>,
241}
242
243impl ColMetaData {
244    /// Create new column metadata from a list of columns.
245    pub fn new(columns: Vec<Column>) -> Self {
246        Self {
247            columns: columns.into(),
248        }
249    }
250
251    /// Get the number of columns.
252    #[must_use]
253    pub fn len(&self) -> usize {
254        self.columns.len()
255    }
256
257    /// Check if there are no columns.
258    #[must_use]
259    pub fn is_empty(&self) -> bool {
260        self.columns.is_empty()
261    }
262
263    /// Get a column by index.
264    #[must_use]
265    pub fn get(&self, index: usize) -> Option<&Column> {
266        self.columns.get(index)
267    }
268
269    /// Find a column index by name (case-insensitive).
270    #[must_use]
271    pub fn find_by_name(&self, name: &str) -> Option<usize> {
272        self.columns
273            .iter()
274            .position(|c| c.name.eq_ignore_ascii_case(name))
275    }
276}
277
278/// A row from a query result.
279///
280/// Implements the `Arc<Bytes>` pattern from ADR-004 for reduced memory allocation.
281/// The row holds a shared reference to the raw packet buffer and column slice
282/// information, deferring parsing and allocation until values are accessed.
283///
284/// # Memory Model
285///
286/// ```text
287/// Row {
288///     buffer: Arc<Bytes> ──────────► [raw packet data...]
289///     slices: Arc<[ColumnSlice]> ──► [{offset, length, is_null}, ...]
290///     metadata: Arc<ColMetaData> ──► [Column definitions...]
291/// }
292/// ```
293///
294/// Multiple `Row` instances from the same result set share the `metadata`.
295/// The `buffer` and `slices` are unique per row but use `Arc` for cheap cloning.
296///
297/// # Access Patterns
298///
299/// - **Zero-copy:** `get_bytes()`, `get_str()` (when UTF-8 valid)
300/// - **Allocating:** `get_string()`, `get::<String>()`
301/// - **Type-converting:** `get::<T>()` uses `FromSql` trait
302#[derive(Clone)]
303pub struct Row {
304    /// Shared reference to raw packet body containing row data.
305    buffer: Arc<Bytes>,
306    /// Column offsets into buffer.
307    slices: Arc<[ColumnSlice]>,
308    /// Column metadata (shared across result set).
309    metadata: Arc<ColMetaData>,
310    /// Cached parsed values (lazily populated).
311    /// This maintains backward compatibility with code expecting SqlValue access.
312    values: Option<Arc<[SqlValue]>>,
313}
314
315impl Row {
316    /// Create a new row with the `Arc<Bytes>` pattern.
317    ///
318    /// This is the primary constructor for the reduced-copy pattern.
319    pub fn new(buffer: Arc<Bytes>, slices: Arc<[ColumnSlice]>, metadata: Arc<ColMetaData>) -> Self {
320        Self {
321            buffer,
322            slices,
323            metadata,
324            values: None,
325        }
326    }
327
328    /// Create a row from pre-parsed values (backward compatibility).
329    ///
330    /// This constructor supports existing code that works with `SqlValue` directly.
331    /// It's less efficient than the buffer-based approach but maintains compatibility.
332    #[allow(dead_code)]
333    pub(crate) fn from_values(columns: Vec<Column>, values: Vec<SqlValue>) -> Self {
334        let metadata = Arc::new(ColMetaData::new(columns));
335        let slices: Arc<[ColumnSlice]> = values
336            .iter()
337            .enumerate()
338            .map(|(i, v)| ColumnSlice::new(i as u32, 0, v.is_null()))
339            .collect::<Vec<_>>()
340            .into();
341
342        Self {
343            buffer: Arc::new(Bytes::new()),
344            slices,
345            metadata,
346            values: Some(values.into()),
347        }
348    }
349
350    // ========================================================================
351    // Zero-Copy Access Methods (ADR-004)
352    // ========================================================================
353
354    /// Returns borrowed slice into buffer (zero additional allocation).
355    ///
356    /// This is the most efficient access method when you need raw bytes.
357    #[must_use]
358    pub fn get_bytes(&self, index: usize) -> Option<&[u8]> {
359        let slice = self.slices.get(index)?;
360        if slice.is_null {
361            return None;
362        }
363
364        let start = slice.offset as usize;
365        let end = start + slice.length as usize;
366
367        if end <= self.buffer.len() {
368            Some(&self.buffer[start..end])
369        } else {
370            None
371        }
372    }
373
374    /// Returns Cow - borrowed if valid UTF-8, owned if conversion needed.
375    ///
376    /// For UTF-8 data, this returns a borrowed reference (zero allocation).
377    /// For VARCHAR data with collation, uses collation-aware decoding.
378    /// For UTF-16 data (NVARCHAR), decodes as UTF-16LE.
379    ///
380    /// # Collation-Aware Decoding
381    ///
382    /// When the `encoding` feature is enabled and the column has collation metadata,
383    /// VARCHAR data is decoded using the appropriate character encoding based on the
384    /// collation's LCID. This correctly handles:
385    ///
386    /// - Japanese (Shift_JIS/CP932)
387    /// - Simplified Chinese (GB18030/CP936)
388    /// - Traditional Chinese (Big5/CP950)
389    /// - Korean (EUC-KR/CP949)
390    /// - Windows code pages 874, 1250-1258
391    /// - SQL Server 2019+ UTF-8 collations
392    #[must_use]
393    pub fn get_str(&self, index: usize) -> Option<Cow<'_, str>> {
394        let bytes = self.get_bytes(index)?;
395
396        // Try to interpret as UTF-8 first (zero allocation for ASCII/UTF-8 data)
397        match std::str::from_utf8(bytes) {
398            Ok(s) => Some(Cow::Borrowed(s)),
399            Err(_) => {
400                // Check if we have collation metadata for this column
401                #[cfg(feature = "encoding")]
402                if let Some(column) = self.metadata.get(index) {
403                    if let Some(ref collation) = column.collation {
404                        // Use collation-aware decoding for VARCHAR/CHAR types
405                        if let Some(encoding) = collation.encoding() {
406                            let (decoded, _, had_errors) = encoding.decode(bytes);
407                            if had_errors {
408                                tracing::warn!(
409                                    column_name = %column.name,
410                                    column_index = index,
411                                    encoding = %encoding.name(),
412                                    lcid = collation.lcid,
413                                    byte_len = bytes.len(),
414                                    "collation-aware decoding had errors, falling back to UTF-16LE"
415                                );
416                            } else {
417                                return Some(Cow::Owned(decoded.into_owned()));
418                            }
419                        } else {
420                            tracing::debug!(
421                                column_name = %column.name,
422                                column_index = index,
423                                lcid = collation.lcid,
424                                "no encoding found for LCID, falling back to UTF-16LE"
425                            );
426                        }
427                    }
428                }
429
430                // Assume UTF-16LE (SQL Server NVARCHAR encoding)
431                // This requires allocation for the conversion
432                let utf16: Vec<u16> = bytes
433                    .chunks_exact(2)
434                    .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
435                    .collect();
436
437                String::from_utf16(&utf16).ok().map(Cow::Owned)
438            }
439        }
440    }
441
442    /// Allocates new String (explicit allocation).
443    ///
444    /// Use this when you need an owned String.
445    #[must_use]
446    pub fn get_string(&self, index: usize) -> Option<String> {
447        self.get_str(index).map(|cow| cow.into_owned())
448    }
449
450    // ========================================================================
451    // Streaming Access (LOB support)
452    // ========================================================================
453
454    /// Get a streaming reader for a binary/text column.
455    ///
456    /// Returns a [`BlobReader`] that implements [`tokio::io::AsyncRead`] for
457    /// streaming access to large binary or text columns. This is useful for:
458    ///
459    /// - Streaming large data to files without fully loading into memory
460    /// - Processing data in chunks with progress tracking
461    /// - Copying data between I/O destinations efficiently
462    ///
463    /// # Supported Column Types
464    ///
465    /// - `VARBINARY`, `VARBINARY(MAX)`
466    /// - `VARCHAR`, `VARCHAR(MAX)`
467    /// - `NVARCHAR`, `NVARCHAR(MAX)`
468    /// - `TEXT`, `NTEXT`, `IMAGE` (legacy types)
469    /// - `XML`
470    ///
471    /// # Example
472    ///
473    /// ```rust,ignore
474    /// use tokio::io::AsyncWriteExt;
475    ///
476    /// // Stream a large VARBINARY(MAX) column to a file
477    /// let mut reader = row.get_stream(0)?;
478    /// let mut file = tokio::fs::File::create("output.bin").await?;
479    /// tokio::io::copy(&mut reader, &mut file).await?;
480    /// ```
481    ///
482    /// # Returns
483    ///
484    /// - `Some(BlobReader)` if the column contains binary/text data
485    /// - `None` if the column is NULL or the index is out of bounds
486    #[must_use]
487    pub fn get_stream(&self, index: usize) -> Option<BlobReader> {
488        let slice = self.slices.get(index)?;
489        if slice.is_null {
490            return None;
491        }
492
493        let start = slice.offset as usize;
494        let end = start + slice.length as usize;
495
496        if end <= self.buffer.len() {
497            // Use zero-copy slicing from Arc<Bytes>
498            let data = self.buffer.slice(start..end);
499            Some(BlobReader::from_bytes(data))
500        } else {
501            None
502        }
503    }
504
505    /// Get a streaming reader for a binary/text column by name.
506    ///
507    /// See [`get_stream`](Self::get_stream) for details.
508    ///
509    /// # Example
510    ///
511    /// ```rust,ignore
512    /// let mut reader = row.get_stream_by_name("document_content")?;
513    /// // Process the blob stream...
514    /// ```
515    #[must_use]
516    pub fn get_stream_by_name(&self, name: &str) -> Option<BlobReader> {
517        let index = self.metadata.find_by_name(name)?;
518        self.get_stream(index)
519    }
520
521    // ========================================================================
522    // Type-Converting Access (FromSql trait)
523    // ========================================================================
524
525    /// Get a value by column index with type conversion.
526    ///
527    /// Uses the `FromSql` trait to convert the raw value to the requested type.
528    pub fn get<T: FromSql>(&self, index: usize) -> Result<T, TypeError> {
529        // If we have cached values, use them
530        if let Some(ref values) = self.values {
531            return values
532                .get(index)
533                .ok_or_else(|| TypeError::TypeMismatch {
534                    expected: "valid column index",
535                    actual: format!("index {index} out of bounds"),
536                })
537                .and_then(T::from_sql);
538        }
539
540        // Otherwise, parse on demand from the buffer
541        let slice = self
542            .slices
543            .get(index)
544            .ok_or_else(|| TypeError::TypeMismatch {
545                expected: "valid column index",
546                actual: format!("index {index} out of bounds"),
547            })?;
548
549        if slice.is_null {
550            return Err(TypeError::UnexpectedNull);
551        }
552
553        // Parse via SqlValue then convert to target type
554        // Note: parse_value uses zero-copy buffer slicing (Arc<Bytes>::slice)
555        let value = self.parse_value(index, slice)?;
556        T::from_sql(&value)
557    }
558
559    /// Get a value by column name with type conversion.
560    pub fn get_by_name<T: FromSql>(&self, name: &str) -> Result<T, TypeError> {
561        let index = self
562            .metadata
563            .find_by_name(name)
564            .ok_or_else(|| TypeError::TypeMismatch {
565                expected: "valid column name",
566                actual: format!("column '{name}' not found"),
567            })?;
568
569        self.get(index)
570    }
571
572    /// Try to get a value by column index, returning None if NULL or not found.
573    pub fn try_get<T: FromSql>(&self, index: usize) -> Option<T> {
574        // If we have cached values, use them
575        if let Some(ref values) = self.values {
576            return values
577                .get(index)
578                .and_then(|v| T::from_sql_nullable(v).ok().flatten());
579        }
580
581        // Otherwise check the slice
582        let slice = self.slices.get(index)?;
583        if slice.is_null {
584            return None;
585        }
586
587        self.get(index).ok()
588    }
589
590    /// Try to get a value by column name, returning None if NULL or not found.
591    pub fn try_get_by_name<T: FromSql>(&self, name: &str) -> Option<T> {
592        let index = self.metadata.find_by_name(name)?;
593        self.try_get(index)
594    }
595
596    // ========================================================================
597    // Raw Value Access (backward compatibility)
598    // ========================================================================
599
600    /// Get the raw SQL value by index.
601    ///
602    /// Note: This may allocate if values haven't been cached.
603    #[must_use]
604    pub fn get_raw(&self, index: usize) -> Option<SqlValue> {
605        if let Some(ref values) = self.values {
606            return values.get(index).cloned();
607        }
608
609        let slice = self.slices.get(index)?;
610        self.parse_value(index, slice).ok()
611    }
612
613    /// Get the raw SQL value by column name.
614    #[must_use]
615    pub fn get_raw_by_name(&self, name: &str) -> Option<SqlValue> {
616        let index = self.metadata.find_by_name(name)?;
617        self.get_raw(index)
618    }
619
620    // ========================================================================
621    // Metadata Access
622    // ========================================================================
623
624    /// Get the number of columns in the row.
625    #[must_use]
626    pub fn len(&self) -> usize {
627        self.slices.len()
628    }
629
630    /// Check if the row is empty.
631    #[must_use]
632    pub fn is_empty(&self) -> bool {
633        self.slices.is_empty()
634    }
635
636    /// Get the column metadata.
637    #[must_use]
638    pub fn columns(&self) -> &[Column] {
639        &self.metadata.columns
640    }
641
642    /// Get the shared column metadata.
643    #[must_use]
644    pub fn metadata(&self) -> &Arc<ColMetaData> {
645        &self.metadata
646    }
647
648    /// Check if a column value is NULL.
649    #[must_use]
650    pub fn is_null(&self, index: usize) -> bool {
651        self.slices.get(index).map(|s| s.is_null).unwrap_or(true)
652    }
653
654    /// Check if a column value is NULL by name.
655    #[must_use]
656    pub fn is_null_by_name(&self, name: &str) -> bool {
657        self.metadata
658            .find_by_name(name)
659            .map(|i| self.is_null(i))
660            .unwrap_or(true)
661    }
662
663    // ========================================================================
664    // Internal Helpers
665    // ========================================================================
666
667    /// Parse a value from the buffer at the given slice.
668    ///
669    /// Uses the mssql-types decode module for efficient binary parsing.
670    /// Optimized to use zero-copy buffer slicing via Arc<Bytes>.
671    fn parse_value(&self, index: usize, slice: &ColumnSlice) -> Result<SqlValue, TypeError> {
672        if slice.is_null {
673            return Ok(SqlValue::Null);
674        }
675
676        let column = self
677            .metadata
678            .get(index)
679            .ok_or_else(|| TypeError::TypeMismatch {
680                expected: "valid column metadata",
681                actual: format!("no metadata for column {index}"),
682            })?;
683
684        // Calculate byte range for this column
685        let start = slice.offset as usize;
686        let end = start + slice.length as usize;
687
688        // Validate range
689        if end > self.buffer.len() {
690            return Err(TypeError::TypeMismatch {
691                expected: "valid byte range",
692                actual: format!(
693                    "range {}..{} exceeds buffer length {}",
694                    start,
695                    end,
696                    self.buffer.len()
697                ),
698            });
699        }
700
701        // Convert column metadata to TypeInfo for the decode module
702        let type_info = column.to_type_info();
703
704        // Use zero-copy slice of the buffer instead of allocating
705        // This avoids the overhead of Bytes::copy_from_slice
706        let mut buf = self.buffer.slice(start..end);
707
708        // Use the unified decode module for efficient parsing
709        decode_value(&mut buf, &type_info)
710    }
711}
712
713impl std::fmt::Debug for Row {
714    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
715        f.debug_struct("Row")
716            .field("columns", &self.metadata.columns.len())
717            .field("buffer_size", &self.buffer.len())
718            .field("has_cached_values", &self.values.is_some())
719            .finish()
720    }
721}
722
723/// Iterator over row values as SqlValue.
724pub struct RowIter<'a> {
725    row: &'a Row,
726    index: usize,
727}
728
729impl Iterator for RowIter<'_> {
730    type Item = SqlValue;
731
732    fn next(&mut self) -> Option<Self::Item> {
733        if self.index >= self.row.len() {
734            return None;
735        }
736        let value = self.row.get_raw(self.index);
737        self.index += 1;
738        value
739    }
740
741    fn size_hint(&self) -> (usize, Option<usize>) {
742        let remaining = self.row.len() - self.index;
743        (remaining, Some(remaining))
744    }
745}
746
747impl<'a> IntoIterator for &'a Row {
748    type Item = SqlValue;
749    type IntoIter = RowIter<'a>;
750
751    fn into_iter(self) -> Self::IntoIter {
752        RowIter {
753            row: self,
754            index: 0,
755        }
756    }
757}
758
759#[cfg(test)]
760#[allow(clippy::unwrap_used)]
761mod tests {
762    use super::*;
763
764    #[test]
765    fn test_column_slice_null() {
766        let slice = ColumnSlice::null();
767        assert!(slice.is_null);
768        assert_eq!(slice.offset, 0);
769        assert_eq!(slice.length, 0);
770    }
771
772    #[test]
773    fn test_column_metadata() {
774        let col = Column::new("id", 0, "INT")
775            .with_nullable(false)
776            .with_precision_scale(10, 0);
777
778        assert_eq!(col.name, "id");
779        assert_eq!(col.index, 0);
780        assert!(!col.nullable);
781        assert_eq!(col.precision, Some(10));
782    }
783
784    #[test]
785    fn test_col_metadata_find_by_name() {
786        let meta = ColMetaData::new(vec![
787            Column::new("id", 0, "INT"),
788            Column::new("Name", 1, "NVARCHAR"),
789        ]);
790
791        assert_eq!(meta.find_by_name("id"), Some(0));
792        assert_eq!(meta.find_by_name("ID"), Some(0)); // case-insensitive
793        assert_eq!(meta.find_by_name("name"), Some(1));
794        assert_eq!(meta.find_by_name("unknown"), None);
795    }
796
797    #[test]
798    fn test_row_from_values_backward_compat() {
799        let columns = vec![
800            Column::new("id", 0, "INT"),
801            Column::new("name", 1, "NVARCHAR"),
802        ];
803        let values = vec![SqlValue::Int(42), SqlValue::String("Alice".to_string())];
804
805        let row = Row::from_values(columns, values);
806
807        assert_eq!(row.len(), 2);
808        assert_eq!(row.get::<i32>(0).unwrap(), 42);
809        assert_eq!(row.get_by_name::<String>("name").unwrap(), "Alice");
810    }
811
812    #[test]
813    fn test_row_is_null() {
814        let columns = vec![
815            Column::new("id", 0, "INT"),
816            Column::new("nullable_col", 1, "NVARCHAR"),
817        ];
818        let values = vec![SqlValue::Int(1), SqlValue::Null];
819
820        let row = Row::from_values(columns, values);
821
822        assert!(!row.is_null(0));
823        assert!(row.is_null(1));
824        assert!(row.is_null(99)); // Out of bounds returns true
825    }
826
827    #[test]
828    fn test_row_get_bytes_with_buffer() {
829        let buffer = Arc::new(Bytes::from_static(b"Hello World"));
830        let slices: Arc<[ColumnSlice]> = vec![
831            ColumnSlice::new(0, 5, false), // "Hello"
832            ColumnSlice::new(6, 5, false), // "World"
833        ]
834        .into();
835        let meta = Arc::new(ColMetaData::new(vec![
836            Column::new("greeting", 0, "VARCHAR"),
837            Column::new("subject", 1, "VARCHAR"),
838        ]));
839
840        let row = Row::new(buffer, slices, meta);
841
842        assert_eq!(row.get_bytes(0), Some(b"Hello".as_slice()));
843        assert_eq!(row.get_bytes(1), Some(b"World".as_slice()));
844    }
845
846    #[test]
847    fn test_row_get_str() {
848        let buffer = Arc::new(Bytes::from_static(b"Test"));
849        let slices: Arc<[ColumnSlice]> = vec![ColumnSlice::new(0, 4, false)].into();
850        let meta = Arc::new(ColMetaData::new(vec![Column::new("val", 0, "VARCHAR")]));
851
852        let row = Row::new(buffer, slices, meta);
853
854        let s = row.get_str(0).unwrap();
855        assert_eq!(s, "Test");
856        // Should be borrowed for valid UTF-8
857        assert!(matches!(s, Cow::Borrowed(_)));
858    }
859
860    #[test]
861    fn test_row_metadata_access() {
862        let columns = vec![Column::new("col1", 0, "INT")];
863        let row = Row::from_values(columns, vec![SqlValue::Int(1)]);
864
865        assert_eq!(row.columns().len(), 1);
866        assert_eq!(row.columns()[0].name, "col1");
867        assert_eq!(row.metadata().len(), 1);
868    }
869
870    #[test]
871    fn test_row_get_stream() {
872        let buffer = Arc::new(Bytes::from_static(b"Hello, World!"));
873        let slices: Arc<[ColumnSlice]> = vec![
874            ColumnSlice::new(0, 5, false), // "Hello"
875            ColumnSlice::new(7, 5, false), // "World"
876            ColumnSlice::null(),           // NULL column
877        ]
878        .into();
879        let meta = Arc::new(ColMetaData::new(vec![
880            Column::new("greeting", 0, "VARBINARY"),
881            Column::new("subject", 1, "VARBINARY"),
882            Column::new("nullable", 2, "VARBINARY"),
883        ]));
884
885        let row = Row::new(buffer, slices, meta);
886
887        // Get stream for first column
888        let reader = row.get_stream(0).unwrap();
889        assert_eq!(reader.len(), Some(5));
890        assert_eq!(reader.as_bytes().as_ref(), b"Hello");
891
892        // Get stream for second column
893        let reader = row.get_stream(1).unwrap();
894        assert_eq!(reader.len(), Some(5));
895        assert_eq!(reader.as_bytes().as_ref(), b"World");
896
897        // NULL column returns None
898        assert!(row.get_stream(2).is_none());
899
900        // Out of bounds returns None
901        assert!(row.get_stream(99).is_none());
902    }
903
904    #[test]
905    fn test_row_get_stream_by_name() {
906        let buffer = Arc::new(Bytes::from_static(b"Binary data here"));
907        let slices: Arc<[ColumnSlice]> = vec![ColumnSlice::new(0, 11, false)].into();
908        let meta = Arc::new(ColMetaData::new(vec![Column::new(
909            "document",
910            0,
911            "VARBINARY",
912        )]));
913
914        let row = Row::new(buffer, slices, meta);
915
916        // Get by name (case-insensitive)
917        let reader = row.get_stream_by_name("document").unwrap();
918        assert_eq!(reader.len(), Some(11));
919
920        let reader = row.get_stream_by_name("DOCUMENT").unwrap();
921        assert_eq!(reader.len(), Some(11));
922
923        // Unknown column returns None
924        assert!(row.get_stream_by_name("unknown").is_none());
925    }
926}