parquet_opendal/lib.rs
1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements. See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership. The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License. You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied. See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! parquet_opendal provides parquet IO utilities.
19//!
20//! ```no_run
21//! use std::sync::Arc;
22//! use arrow::array::{ArrayRef, Int64Array, RecordBatch};
23//!
24//! use futures::StreamExt;
25//! use opendal::{services::S3Config, Operator};
26//! use parquet::arrow::{AsyncArrowWriter, ParquetRecordBatchStreamBuilder};
27//! use parquet_opendal::{AsyncReader, AsyncWriter};
28//!
29//! #[tokio::main]
30//! async fn main() {
31//! let mut cfg = S3Config::default();
32//! cfg.access_key_id = Some("my_access_key".to_string());
33//! cfg.secret_access_key = Some("my_secret_key".to_string());
34//! cfg.endpoint = Some("my_endpoint".to_string());
35//! cfg.region = Some("my_region".to_string());
36//! cfg.bucket = "my_bucket".to_string();
37//!
38//! // Create a new operator
39//! let operator = Operator::from_config(cfg).unwrap().finish();
40//! let path = "/path/to/file.parquet";
41//!
42//! // Create an async writer
43//! let writer = AsyncWriter::new(
44//! operator
45//! .writer_with(path)
46//! .chunk(32 * 1024 * 1024)
47//! .concurrent(8)
48//! .await
49//! .unwrap(),
50//! );
51//!
52//! let col = Arc::new(Int64Array::from_iter_values([1, 2, 3])) as ArrayRef;
53//! let to_write = RecordBatch::try_from_iter([("col", col)]).unwrap();
54//! let mut writer = AsyncArrowWriter::try_new(writer, to_write.schema(), None).unwrap();
55//! writer.write(&to_write).await.unwrap();
56//! writer.close().await.unwrap();
57//!
58//! // gap: Allow the underlying reader to merge small IOs
59//! // when the gap between multiple IO ranges is less than the threshold.
60//! let reader = operator
61//! .reader_with(path)
62//! .gap(512 * 1024)
63//! .chunk(16 * 1024 * 1024)
64//! .concurrent(16)
65//! .await
66//! .unwrap();
67//! let content_len = operator.stat(path).await.unwrap().content_length();
68//! let reader = AsyncReader::new(reader, content_len).with_prefetch_footer_size(512 * 1024);
69//! let mut stream = ParquetRecordBatchStreamBuilder::new(reader)
70//! .await
71//! .unwrap()
72//! .build()
73//! .unwrap();
74//! let read = stream.next().await.unwrap().unwrap();
75//! assert_eq!(to_write, read);
76//! }
77//! ```
78
79mod async_reader;
80mod async_writer;
81
82pub use async_reader::AsyncReader;
83pub use async_writer::AsyncWriter;