parquet/file/
reader.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! File reader API and methods to access file metadata, row group
19//! readers to read individual column chunks, or access record
20//! iterator.
21
22use bytes::{Buf, Bytes};
23use std::fs::File;
24use std::io::{BufReader, Seek, SeekFrom};
25use std::{io::Read, sync::Arc};
26
27use crate::bloom_filter::Sbbf;
28use crate::column::page::PageIterator;
29use crate::column::{page::PageReader, reader::ColumnReader};
30use crate::errors::{ParquetError, Result};
31use crate::file::metadata::*;
32pub use crate::file::serialized_reader::{SerializedFileReader, SerializedPageReader};
33use crate::record::reader::RowIter;
34use crate::schema::types::Type as SchemaType;
35
36use crate::basic::Type;
37
38use crate::column::reader::ColumnReaderImpl;
39
40/// Length should return the total number of bytes in the input source.
41/// It's mainly used to read the metadata, which is at the end of the source.
42#[allow(clippy::len_without_is_empty)]
43pub trait Length {
44    /// Returns the amount of bytes of the inner source.
45    fn len(&self) -> u64;
46}
47
48/// Generates [`Read`]ers to read chunks of a Parquet data source.
49///
50/// The Parquet reader uses [`ChunkReader`] to access Parquet data, allowing
51/// multiple decoders to read concurrently from different locations in the same
52/// file.
53///
54/// The trait functions both as a reader and a factory for readers.
55/// * random access via [`Self::get_bytes`]
56/// * sequential access via the reader returned via factory method [`Self::get_read`]
57///
58/// # Provided Implementations
59/// * [`File`] for reading from local file system
60/// * [`Bytes`] for reading from an in-memory buffer
61///
62/// User provided implementations can implement more sophisticated behaviors
63/// such as on-demand buffering or scan sharing.
64pub trait ChunkReader: Length + Send + Sync {
65    /// The concrete type of reader returned by this trait
66    type T: Read;
67
68    /// Get a [`Read`] instance starting at the provided file offset
69    ///
70    /// Returned readers follow the model of [`File::try_clone`] where mutations
71    /// of one reader affect all readers. Thus subsequent or concurrent calls to
72    /// [`Self::get_read`] or [`Self::get_bytes`] may cause side-effects on
73    /// previously returned readers. Callers of `get_read` should take care
74    /// to avoid race conditions.
75    fn get_read(&self, start: u64) -> Result<Self::T>;
76
77    /// Get a range of data in memory as [`Bytes`]
78    ///
79    /// Similarly to [`Self::get_read`], this method may have side-effects on
80    /// previously returned readers.
81    fn get_bytes(&self, start: u64, length: usize) -> Result<Bytes>;
82}
83
84impl Length for File {
85    fn len(&self) -> u64 {
86        self.metadata().map(|m| m.len()).unwrap_or(0u64)
87    }
88}
89
90impl ChunkReader for File {
91    type T = BufReader<File>;
92
93    fn get_read(&self, start: u64) -> Result<Self::T> {
94        let mut reader = self.try_clone()?;
95        reader.seek(SeekFrom::Start(start))?;
96        Ok(BufReader::new(self.try_clone()?))
97    }
98
99    fn get_bytes(&self, start: u64, length: usize) -> Result<Bytes> {
100        let mut buffer = Vec::with_capacity(length);
101        let mut reader = self.try_clone()?;
102        reader.seek(SeekFrom::Start(start))?;
103        let read = reader.take(length as _).read_to_end(&mut buffer)?;
104
105        if read != length {
106            return Err(eof_err!(
107                "Expected to read {} bytes, read only {}",
108                length,
109                read
110            ));
111        }
112        Ok(buffer.into())
113    }
114}
115
116impl Length for Bytes {
117    fn len(&self) -> u64 {
118        self.len() as u64
119    }
120}
121
122impl ChunkReader for Bytes {
123    type T = bytes::buf::Reader<Bytes>;
124
125    fn get_read(&self, start: u64) -> Result<Self::T> {
126        let start = start as usize;
127        if start > self.len() {
128            return Err(eof_err!(
129                "Expected to read at offset {start}, while file has length {}",
130                self.len()
131            ));
132        }
133        Ok(self.slice(start..).reader())
134    }
135
136    fn get_bytes(&self, start: u64, length: usize) -> Result<Bytes> {
137        let start = start as usize;
138        if start > self.len() || start + length > self.len() {
139            return Err(eof_err!(
140                "Expected to read {} bytes at offset {}, while file has length {}",
141                length,
142                start,
143                self.len()
144            ));
145        }
146        Ok(self.slice(start..start + length))
147    }
148}
149
150// ----------------------------------------------------------------------
151// APIs for file & row group readers
152
153/// Parquet file reader API. With this, user can get metadata information about the
154/// Parquet file, can get reader for each row group, and access record iterator.
155pub trait FileReader: Send + Sync {
156    /// Get metadata information about this file.
157    fn metadata(&self) -> &ParquetMetaData;
158
159    /// Get the total number of row groups for this file.
160    fn num_row_groups(&self) -> usize;
161
162    /// Get the `i`th row group reader. Note this doesn't do bound check.
163    fn get_row_group(&self, i: usize) -> Result<Box<dyn RowGroupReader + '_>>;
164
165    /// Get an iterator over the row in this file, see [`RowIter`] for caveats.
166    ///
167    /// Iterator will automatically load the next row group to advance.
168    ///
169    /// Projected schema can be a subset of or equal to the file schema, when it is None,
170    /// full file schema is assumed.
171    fn get_row_iter(&self, projection: Option<SchemaType>) -> Result<RowIter<'_>>;
172}
173
174/// Parquet row group reader API. With this, user can get metadata information about the
175/// row group, as well as readers for each individual column chunk.
176pub trait RowGroupReader: Send + Sync {
177    /// Get metadata information about this row group.
178    fn metadata(&self) -> &RowGroupMetaData;
179
180    /// Get the total number of column chunks in this row group.
181    fn num_columns(&self) -> usize;
182
183    /// Get page reader for the `i`th column chunk.
184    fn get_column_page_reader(&self, i: usize) -> Result<Box<dyn PageReader>>;
185
186    /// Get value reader for the `i`th column chunk.
187    fn get_column_reader(&self, i: usize) -> Result<ColumnReader> {
188        let schema_descr = self.metadata().schema_descr();
189        let col_descr = schema_descr.column(i);
190        let col_page_reader = self.get_column_page_reader(i)?;
191        let col_reader = match col_descr.physical_type() {
192            Type::BOOLEAN => {
193                ColumnReader::BoolColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
194            }
195            Type::INT32 => {
196                ColumnReader::Int32ColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
197            }
198            Type::INT64 => {
199                ColumnReader::Int64ColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
200            }
201            Type::INT96 => {
202                ColumnReader::Int96ColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
203            }
204            Type::FLOAT => {
205                ColumnReader::FloatColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
206            }
207            Type::DOUBLE => {
208                ColumnReader::DoubleColumnReader(ColumnReaderImpl::new(col_descr, col_page_reader))
209            }
210            Type::BYTE_ARRAY => ColumnReader::ByteArrayColumnReader(ColumnReaderImpl::new(
211                col_descr,
212                col_page_reader,
213            )),
214            Type::FIXED_LEN_BYTE_ARRAY => ColumnReader::FixedLenByteArrayColumnReader(
215                ColumnReaderImpl::new(col_descr, col_page_reader),
216            ),
217        };
218        Ok(col_reader)
219    }
220
221    /// Get bloom filter for the `i`th column chunk, if present and the reader was configured
222    /// to read bloom filters.
223    fn get_column_bloom_filter(&self, i: usize) -> Option<&Sbbf>;
224
225    /// Get an iterator over the row in this file, see [`RowIter`] for caveats.
226    ///
227    /// Projected schema can be a subset of or equal to the file schema, when it is None,
228    /// full file schema is assumed.
229    fn get_row_iter(&self, projection: Option<SchemaType>) -> Result<RowIter<'_>>;
230}
231
232// ----------------------------------------------------------------------
233// Iterator
234
235/// Implementation of page iterator for parquet file.
236pub struct FilePageIterator {
237    column_index: usize,
238    row_group_indices: Box<dyn Iterator<Item = usize> + Send>,
239    file_reader: Arc<dyn FileReader>,
240}
241
242impl FilePageIterator {
243    /// Creates a page iterator for all row groups in file.
244    pub fn new(column_index: usize, file_reader: Arc<dyn FileReader>) -> Result<Self> {
245        let num_row_groups = file_reader.metadata().num_row_groups();
246
247        let row_group_indices = Box::new(0..num_row_groups);
248
249        Self::with_row_groups(column_index, row_group_indices, file_reader)
250    }
251
252    /// Create page iterator from parquet file reader with only some row groups.
253    pub fn with_row_groups(
254        column_index: usize,
255        row_group_indices: Box<dyn Iterator<Item = usize> + Send>,
256        file_reader: Arc<dyn FileReader>,
257    ) -> Result<Self> {
258        // Check that column_index is valid
259        let num_columns = file_reader
260            .metadata()
261            .file_metadata()
262            .schema_descr()
263            .num_columns();
264
265        if column_index >= num_columns {
266            return Err(ParquetError::IndexOutOfBound(column_index, num_columns));
267        }
268
269        // We don't check iterators here because iterator may be infinite
270        Ok(Self {
271            column_index,
272            row_group_indices,
273            file_reader,
274        })
275    }
276}
277
278impl Iterator for FilePageIterator {
279    type Item = Result<Box<dyn PageReader>>;
280
281    fn next(&mut self) -> Option<Result<Box<dyn PageReader>>> {
282        self.row_group_indices.next().map(|row_group_index| {
283            self.file_reader
284                .get_row_group(row_group_index)
285                .and_then(|r| r.get_column_page_reader(self.column_index))
286        })
287    }
288}
289
290impl PageIterator for FilePageIterator {}
291
292#[cfg(test)]
293mod tests {
294    use super::*;
295
296    #[test]
297    fn test_bytes_chunk_reader_get_read_out_of_bounds() {
298        let data = Bytes::from(vec![0, 1, 2, 3]);
299        let err = data.get_read(5).unwrap_err();
300        assert_eq!(
301            err.to_string(),
302            "EOF: Expected to read at offset 5, while file has length 4"
303        );
304    }
305
306    #[test]
307    fn test_bytes_chunk_reader_get_bytes_out_of_bounds() {
308        let data = Bytes::from(vec![0, 1, 2, 3]);
309        let err = data.get_bytes(5, 1).unwrap_err();
310        assert_eq!(
311            err.to_string(),
312            "EOF: Expected to read 1 bytes at offset 5, while file has length 4"
313        );
314
315        let err = data.get_bytes(2, 3).unwrap_err();
316        assert_eq!(
317            err.to_string(),
318            "EOF: Expected to read 3 bytes at offset 2, while file has length 4"
319        );
320    }
321}