parquet/arrow/array_reader/mod.rs
1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements. See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership. The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License. You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied. See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Logic for reading into arrow arrays: [`ArrayReader`] and [`RowGroups`]
19
20use crate::errors::Result;
21use arrow_array::ArrayRef;
22use arrow_schema::DataType as ArrowType;
23use std::any::Any;
24use std::sync::Arc;
25
26use crate::arrow::record_reader::GenericRecordReader;
27use crate::arrow::record_reader::buffer::ValuesBuffer;
28use crate::column::page::PageIterator;
29use crate::column::reader::decoder::ColumnValueDecoder;
30use crate::file::metadata::ParquetMetaData;
31use crate::file::reader::{FilePageIterator, FileReader};
32
33mod builder;
34mod byte_array;
35mod byte_array_dictionary;
36mod byte_view_array;
37mod cached_array_reader;
38mod empty_array;
39mod fixed_len_byte_array;
40mod fixed_size_list_array;
41mod list_array;
42mod map_array;
43mod null_array;
44mod primitive_array;
45mod row_group_cache;
46mod row_group_index;
47mod row_number;
48mod struct_array;
49
50#[cfg(test)]
51mod test_util;
52
53// Note that this crate is public under the `experimental` feature flag.
54use crate::file::metadata::RowGroupMetaData;
55pub use builder::{ArrayReaderBuilder, CacheOptions, CacheOptionsBuilder};
56pub use byte_array::make_byte_array_reader;
57pub use byte_array_dictionary::make_byte_array_dictionary_reader;
58#[allow(unused_imports)] // Only used for benchmarks
59pub use byte_view_array::make_byte_view_array_reader;
60#[allow(unused_imports)] // Only used for benchmarks
61pub use fixed_len_byte_array::make_fixed_len_byte_array_reader;
62pub use fixed_size_list_array::FixedSizeListArrayReader;
63pub use list_array::ListArrayReader;
64pub use map_array::MapArrayReader;
65pub use null_array::NullArrayReader;
66pub use primitive_array::PrimitiveArrayReader;
67pub use row_group_cache::RowGroupCache;
68pub use struct_array::StructArrayReader;
69
70/// Reads Parquet data into Arrow Arrays.
71///
72/// This is an internal implementation detail of the Parquet reader, and is not
73/// intended for public use.
74///
75/// This is the core trait for reading encoded Parquet data directly into Arrow
76/// Arrays efficiently. There are various specializations of this trait for
77/// different combinations of encodings and arrays, such as
78/// [`PrimitiveArrayReader`], [`ListArrayReader`], etc.
79///
80/// Each `ArrayReader` logically contains the following state
81/// 1. A handle to the encoded Parquet data
82/// 2. An in progress buffered Array
83///
84/// Data can either be read in batches using [`ArrayReader::next_batch`] or
85/// incrementally using [`ArrayReader::read_records`] and [`ArrayReader::skip_records`].
86pub trait ArrayReader: Send {
87 // TODO: this function is never used, and the trait is not public. Perhaps this should be
88 // removed.
89 #[allow(dead_code)]
90 fn as_any(&self) -> &dyn Any;
91
92 /// Returns the arrow type of this array reader.
93 fn get_data_type(&self) -> &ArrowType;
94
95 /// Reads at most `batch_size` records into an arrow array and return it.
96 #[cfg(any(feature = "experimental", test))]
97 fn next_batch(&mut self, batch_size: usize) -> Result<ArrayRef> {
98 self.read_records(batch_size)?;
99 self.consume_batch()
100 }
101
102 /// Reads at most `batch_size` records' bytes into buffer
103 ///
104 /// Returns the number of records read, which can be less than `batch_size` if
105 /// pages is exhausted.
106 fn read_records(&mut self, batch_size: usize) -> Result<usize>;
107
108 /// Consume all currently stored buffer data
109 /// into an arrow array and return it.
110 fn consume_batch(&mut self) -> Result<ArrayRef>;
111
112 /// Skips over `num_records` records, returning the number of rows skipped
113 ///
114 /// Note that calling `skip_records` with large values of `num_records` is
115 /// efficient as it avoids decoding data into the the in-progress array.
116 /// However, there is overhead to calling this function, so for small values of
117 /// `num_records`, it can be more efficient to call read_records and apply
118 /// a filter to the resulting array.
119 fn skip_records(&mut self, num_records: usize) -> Result<usize>;
120
121 /// If this array has a non-zero definition level, i.e. has a nullable parent
122 /// array, returns the definition levels of data from the last call of `next_batch`
123 ///
124 /// Otherwise returns None
125 ///
126 /// This is used by parent [`ArrayReader`] to compute their null bitmaps
127 fn get_def_levels(&self) -> Option<&[i16]>;
128
129 /// If this array has a non-zero repetition level, i.e. has a repeated parent
130 /// array, returns the repetition levels of data from the last call of `next_batch`
131 ///
132 /// Otherwise returns None
133 ///
134 /// This is used by parent [`ArrayReader`] to compute their array offsets
135 fn get_rep_levels(&self) -> Option<&[i16]>;
136}
137
138/// Interface for reading data pages from the columns of one or more RowGroups.
139pub trait RowGroups {
140 /// Get the number of rows in this collection
141 fn num_rows(&self) -> usize;
142
143 /// Returns a [`PageIterator`] for all pages in the specified column chunk
144 /// across all row groups in this collection.
145 fn column_chunks(&self, i: usize) -> Result<Box<dyn PageIterator>>;
146
147 /// Returns an iterator over the row groups in this collection
148 ///
149 /// Note this may not include all row groups in [`Self::metadata`].
150 fn row_groups(&self) -> Box<dyn Iterator<Item = &RowGroupMetaData> + '_>;
151
152 /// Returns the parquet metadata
153 fn metadata(&self) -> &ParquetMetaData;
154}
155
156impl RowGroups for Arc<dyn FileReader> {
157 fn num_rows(&self) -> usize {
158 FileReader::metadata(self.as_ref())
159 .file_metadata()
160 .num_rows() as usize
161 }
162
163 fn column_chunks(&self, column_index: usize) -> Result<Box<dyn PageIterator>> {
164 let iterator = FilePageIterator::new(column_index, Arc::clone(self))?;
165 Ok(Box::new(iterator))
166 }
167
168 fn row_groups(&self) -> Box<dyn Iterator<Item = &RowGroupMetaData> + '_> {
169 Box::new(FileReader::metadata(self.as_ref()).row_groups().iter())
170 }
171
172 fn metadata(&self) -> &ParquetMetaData {
173 FileReader::metadata(self.as_ref())
174 }
175}
176
177/// Uses `record_reader` to read up to `batch_size` records from `pages`
178///
179/// Returns the number of records read, which can be less than `batch_size` if
180/// pages is exhausted.
181fn read_records<V, CV>(
182 record_reader: &mut GenericRecordReader<V, CV>,
183 pages: &mut dyn PageIterator,
184 batch_size: usize,
185) -> Result<usize>
186where
187 V: ValuesBuffer,
188 CV: ColumnValueDecoder<Buffer = V>,
189{
190 let mut records_read = 0usize;
191 while records_read < batch_size {
192 let records_to_read = batch_size - records_read;
193
194 let records_read_once = record_reader.read_records(records_to_read)?;
195 records_read += records_read_once;
196
197 // Record reader exhausted
198 if records_read_once < records_to_read {
199 if let Some(page_reader) = pages.next() {
200 // Read from new page reader (i.e. column chunk)
201 record_reader.set_page_reader(page_reader?)?;
202 } else {
203 // Page reader also exhausted
204 break;
205 }
206 }
207 }
208 Ok(records_read)
209}
210
211/// Uses `record_reader` to skip up to `batch_size` records from `pages`
212///
213/// Returns the number of records skipped, which can be less than `batch_size` if
214/// pages is exhausted
215fn skip_records<V, CV>(
216 record_reader: &mut GenericRecordReader<V, CV>,
217 pages: &mut dyn PageIterator,
218 batch_size: usize,
219) -> Result<usize>
220where
221 V: ValuesBuffer,
222 CV: ColumnValueDecoder<Buffer = V>,
223{
224 let mut records_skipped = 0usize;
225 while records_skipped < batch_size {
226 let records_to_read = batch_size - records_skipped;
227
228 let records_skipped_once = record_reader.skip_records(records_to_read)?;
229 records_skipped += records_skipped_once;
230
231 // Record reader exhausted
232 if records_skipped_once < records_to_read {
233 if let Some(page_reader) = pages.next() {
234 // Read from new page reader (i.e. column chunk)
235 record_reader.set_page_reader(page_reader?)?;
236 } else {
237 // Page reader also exhausted
238 break;
239 }
240 }
241 }
242 Ok(records_skipped)
243}