Expand description

Fill Apache Arrow arrays from ODBC data sources.


use arrow_odbc::{odbc_api::Environment, OdbcReader};

const CONNECTION_STRING: &str = "\
    Driver={ODBC Driver 17 for SQL Server};\

fn main() -> Result<(), anyhow::Error> {
    // Your application is fine if you spin up only one Environment.
    let odbc_environment = Environment::new()?;
    // Connect with database.
    let connection = odbc_environment.connect_with_connection_string(CONNECTION_STRING)?;

    // This SQL statement does not require any arguments.
    let parameters = ();

    // Execute query and create result set
    let cursor = connection
        .execute("SELECT * FROM MyTable", parameters)?
        .expect("SELECT statement must produce a cursor");

    // Each batch shall only consist of maximum 10.000 rows.
    let max_batch_size = 10_000;

    // Read result set as arrow batches. Infer Arrow types automatically using the meta
    // information of `cursor`.
    let arrow_record_batches = OdbcReader::new(cursor, max_batch_size)?;

    for batch in arrow_record_batches {
        // ... process batch ...



pub use arrow;
pub use odbc_api;


Arrow ODBC reader. Implements the arrow::record_batch::RecordBatchReader trait so it can be used to fill Arrow arrays from an ODBC data source.


A variation of things which can go wrong then creating an [OdbcReader].


Query the metadata to create an arrow schema. This method is invoked automatically for you by crate::OdbcReader::new. You may want to call this method in situtation ther you want to create an arrow schema without creating the reader yet.