1use crate::bloom_filter::Sbbf;
21use crate::file::metadata::thrift::PageHeader;
22use crate::file::page_index::column_index::ColumnIndexMetaData;
23use crate::file::page_index::offset_index::OffsetIndexMetaData;
24use crate::parquet_thrift::{ThriftCompactOutputProtocol, WriteThrift};
25use std::fmt::Debug;
26use std::io::{BufWriter, IoSlice, Read};
27use std::{io::Write, sync::Arc};
28
29use crate::column::page_encryption::PageEncryptor;
30use crate::column::writer::{ColumnCloseResult, ColumnWriterImpl, get_typed_column_writer_mut};
31use crate::column::{
32 page::{CompressedPage, PageWriteSpec, PageWriter},
33 writer::{ColumnWriter, get_column_writer},
34};
35use crate::data_type::DataType;
36#[cfg(feature = "encryption")]
37use crate::encryption::encrypt::{
38 FileEncryptionProperties, FileEncryptor, get_column_crypto_metadata,
39};
40use crate::errors::{ParquetError, Result};
41#[cfg(feature = "encryption")]
42use crate::file::PARQUET_MAGIC_ENCR_FOOTER;
43use crate::file::properties::{BloomFilterPosition, WriterPropertiesPtr};
44use crate::file::reader::ChunkReader;
45use crate::file::{PARQUET_MAGIC, metadata::*};
46use crate::schema::types::{ColumnDescPtr, SchemaDescPtr, SchemaDescriptor, TypePtr};
47
48pub struct TrackedWrite<W: Write> {
52 inner: BufWriter<W>,
53 bytes_written: usize,
54}
55
56impl<W: Write> TrackedWrite<W> {
57 pub fn new(inner: W) -> Self {
59 let buf_write = BufWriter::new(inner);
60 Self {
61 inner: buf_write,
62 bytes_written: 0,
63 }
64 }
65
66 pub fn bytes_written(&self) -> usize {
68 self.bytes_written
69 }
70
71 pub fn inner(&self) -> &W {
73 self.inner.get_ref()
74 }
75
76 pub fn inner_mut(&mut self) -> &mut W {
81 self.inner.get_mut()
82 }
83
84 pub fn into_inner(self) -> Result<W> {
86 self.inner.into_inner().map_err(|err| {
87 ParquetError::General(format!("fail to get inner writer: {:?}", err.to_string()))
88 })
89 }
90}
91
92impl<W: Write> Write for TrackedWrite<W> {
93 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
94 let bytes = self.inner.write(buf)?;
95 self.bytes_written += bytes;
96 Ok(bytes)
97 }
98
99 fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> std::io::Result<usize> {
100 let bytes = self.inner.write_vectored(bufs)?;
101 self.bytes_written += bytes;
102 Ok(bytes)
103 }
104
105 fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
106 self.inner.write_all(buf)?;
107 self.bytes_written += buf.len();
108
109 Ok(())
110 }
111
112 fn flush(&mut self) -> std::io::Result<()> {
113 self.inner.flush()
114 }
115}
116
117pub type OnCloseColumnChunk<'a> = Box<dyn FnOnce(ColumnCloseResult) -> Result<()> + 'a>;
119
120pub type OnCloseRowGroup<'a, W> = Box<
126 dyn FnOnce(
127 &'a mut TrackedWrite<W>,
128 RowGroupMetaData,
129 Vec<Option<Sbbf>>,
130 Vec<Option<ColumnIndexMetaData>>,
131 Vec<Option<OffsetIndexMetaData>>,
132 ) -> Result<()>
133 + 'a
134 + Send,
135>;
136
137pub struct SerializedFileWriter<W: Write> {
157 buf: TrackedWrite<W>,
158 descr: SchemaDescPtr,
159 props: WriterPropertiesPtr,
160 row_groups: Vec<RowGroupMetaData>,
161 bloom_filters: Vec<Vec<Option<Sbbf>>>,
162 column_indexes: Vec<Vec<Option<ColumnIndexMetaData>>>,
163 offset_indexes: Vec<Vec<Option<OffsetIndexMetaData>>>,
164 row_group_index: usize,
165 kv_metadatas: Vec<KeyValue>,
167 finished: bool,
168 #[cfg(feature = "encryption")]
169 file_encryptor: Option<Arc<FileEncryptor>>,
170}
171
172impl<W: Write> Debug for SerializedFileWriter<W> {
173 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
174 f.debug_struct("SerializedFileWriter")
177 .field("descr", &self.descr)
178 .field("row_group_index", &self.row_group_index)
179 .field("kv_metadatas", &self.kv_metadatas)
180 .finish_non_exhaustive()
181 }
182}
183
184impl<W: Write + Send> SerializedFileWriter<W> {
185 pub fn new(buf: W, schema: TypePtr, properties: WriterPropertiesPtr) -> Result<Self> {
187 let mut buf = TrackedWrite::new(buf);
188
189 let schema_descriptor = SchemaDescriptor::new(schema.clone());
190
191 #[cfg(feature = "encryption")]
192 let file_encryptor = Self::get_file_encryptor(&properties, &schema_descriptor)?;
193
194 Self::start_file(&properties, &mut buf)?;
195 Ok(Self {
196 buf,
197 descr: Arc::new(schema_descriptor),
198 props: properties,
199 row_groups: vec![],
200 bloom_filters: vec![],
201 column_indexes: Vec::new(),
202 offset_indexes: Vec::new(),
203 row_group_index: 0,
204 kv_metadatas: Vec::new(),
205 finished: false,
206 #[cfg(feature = "encryption")]
207 file_encryptor,
208 })
209 }
210
211 #[cfg(feature = "encryption")]
212 fn get_file_encryptor(
213 properties: &WriterPropertiesPtr,
214 schema_descriptor: &SchemaDescriptor,
215 ) -> Result<Option<Arc<FileEncryptor>>> {
216 if let Some(file_encryption_properties) = properties.file_encryption_properties() {
217 file_encryption_properties.validate_encrypted_column_names(schema_descriptor)?;
218
219 Ok(Some(Arc::new(FileEncryptor::new(Arc::clone(
220 file_encryption_properties,
221 ))?)))
222 } else {
223 Ok(None)
224 }
225 }
226
227 pub fn next_row_group(&mut self) -> Result<SerializedRowGroupWriter<'_, W>> {
236 self.assert_previous_writer_closed()?;
237 let ordinal = self.row_group_index;
238
239 let ordinal: i16 = ordinal.try_into().map_err(|_| {
240 ParquetError::General(format!(
241 "Parquet does not support more than {} row groups per file (currently: {})",
242 i16::MAX,
243 ordinal
244 ))
245 })?;
246
247 self.row_group_index = self
248 .row_group_index
249 .checked_add(1)
250 .expect("SerializedFileWriter::row_group_index overflowed");
251
252 let bloom_filter_position = self.properties().bloom_filter_position();
253 let row_groups = &mut self.row_groups;
254 let row_bloom_filters = &mut self.bloom_filters;
255 let row_column_indexes = &mut self.column_indexes;
256 let row_offset_indexes = &mut self.offset_indexes;
257 let on_close = move |buf,
258 mut metadata,
259 row_group_bloom_filter,
260 row_group_column_index,
261 row_group_offset_index| {
262 row_bloom_filters.push(row_group_bloom_filter);
263 row_column_indexes.push(row_group_column_index);
264 row_offset_indexes.push(row_group_offset_index);
265 match bloom_filter_position {
267 BloomFilterPosition::AfterRowGroup => {
268 write_bloom_filters(buf, row_bloom_filters, &mut metadata)?
269 }
270 BloomFilterPosition::End => (),
271 };
272 row_groups.push(metadata);
273 Ok(())
274 };
275
276 let row_group_writer = SerializedRowGroupWriter::new(
277 self.descr.clone(),
278 self.props.clone(),
279 &mut self.buf,
280 ordinal,
281 Some(Box::new(on_close)),
282 );
283 #[cfg(feature = "encryption")]
284 let row_group_writer = row_group_writer.with_file_encryptor(self.file_encryptor.clone());
285
286 Ok(row_group_writer)
287 }
288
289 pub fn flushed_row_groups(&self) -> &[RowGroupMetaData] {
291 &self.row_groups
292 }
293
294 pub fn finish(&mut self) -> Result<ParquetMetaData> {
300 self.assert_previous_writer_closed()?;
301 let metadata = self.write_metadata()?;
302 self.buf.flush()?;
303 Ok(metadata)
304 }
305
306 pub fn close(mut self) -> Result<ParquetMetaData> {
308 self.finish()
309 }
310
311 #[cfg(not(feature = "encryption"))]
313 fn start_file(_properties: &WriterPropertiesPtr, buf: &mut TrackedWrite<W>) -> Result<()> {
314 buf.write_all(get_file_magic())?;
315 Ok(())
316 }
317
318 #[cfg(feature = "encryption")]
320 fn start_file(properties: &WriterPropertiesPtr, buf: &mut TrackedWrite<W>) -> Result<()> {
321 let magic = get_file_magic(properties.file_encryption_properties.as_ref());
322
323 buf.write_all(magic)?;
324 Ok(())
325 }
326
327 fn write_metadata(&mut self) -> Result<ParquetMetaData> {
330 self.finished = true;
331
332 for row_group in &mut self.row_groups {
334 write_bloom_filters(&mut self.buf, &mut self.bloom_filters, row_group)?;
335 }
336
337 let key_value_metadata = match self.props.key_value_metadata() {
338 Some(kv) => Some(kv.iter().chain(&self.kv_metadatas).cloned().collect()),
339 None if self.kv_metadatas.is_empty() => None,
340 None => Some(self.kv_metadatas.clone()),
341 };
342
343 let row_groups = std::mem::take(&mut self.row_groups);
345 let column_indexes = std::mem::take(&mut self.column_indexes);
346 let offset_indexes = std::mem::take(&mut self.offset_indexes);
347
348 let mut encoder = ThriftMetadataWriter::new(
349 &mut self.buf,
350 &self.descr,
351 row_groups,
352 Some(self.props.created_by().to_string()),
353 self.props.writer_version().as_num(),
354 );
355
356 #[cfg(feature = "encryption")]
357 {
358 encoder = encoder.with_file_encryptor(self.file_encryptor.clone());
359 }
360
361 if let Some(key_value_metadata) = key_value_metadata {
362 encoder = encoder.with_key_value_metadata(key_value_metadata)
363 }
364
365 encoder = encoder.with_column_indexes(column_indexes);
366 if !self.props.offset_index_disabled() {
367 encoder = encoder.with_offset_indexes(offset_indexes);
368 }
369 encoder.finish()
370 }
371
372 #[inline]
373 fn assert_previous_writer_closed(&self) -> Result<()> {
374 if self.finished {
375 return Err(general_err!("SerializedFileWriter already finished"));
376 }
377
378 if self.row_group_index != self.row_groups.len() {
379 Err(general_err!("Previous row group writer was not closed"))
380 } else {
381 Ok(())
382 }
383 }
384
385 pub fn append_key_value_metadata(&mut self, kv_metadata: KeyValue) {
387 self.kv_metadatas.push(kv_metadata);
388 }
389
390 pub fn schema_descr(&self) -> &SchemaDescriptor {
392 &self.descr
393 }
394
395 #[cfg(feature = "arrow")]
397 pub(crate) fn schema_descr_ptr(&self) -> &SchemaDescPtr {
398 &self.descr
399 }
400
401 pub fn properties(&self) -> &WriterPropertiesPtr {
403 &self.props
404 }
405
406 pub fn inner(&self) -> &W {
408 self.buf.inner()
409 }
410
411 pub fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
420 self.buf.write_all(buf)
421 }
422
423 pub fn flush(&mut self) -> std::io::Result<()> {
425 self.buf.flush()
426 }
427
428 pub fn inner_mut(&mut self) -> &mut W {
437 self.buf.inner_mut()
438 }
439
440 pub fn into_inner(mut self) -> Result<W> {
442 self.assert_previous_writer_closed()?;
443 let _ = self.write_metadata()?;
444
445 self.buf.into_inner()
446 }
447
448 pub fn bytes_written(&self) -> usize {
450 self.buf.bytes_written()
451 }
452
453 #[cfg(feature = "encryption")]
455 pub(crate) fn file_encryptor(&self) -> Option<Arc<FileEncryptor>> {
456 self.file_encryptor.clone()
457 }
458}
459
460fn write_bloom_filters<W: Write + Send>(
463 buf: &mut TrackedWrite<W>,
464 bloom_filters: &mut [Vec<Option<Sbbf>>],
465 row_group: &mut RowGroupMetaData,
466) -> Result<()> {
467 let row_group_idx: u16 = row_group
472 .ordinal()
473 .expect("Missing row group ordinal")
474 .try_into()
475 .map_err(|_| {
476 ParquetError::General(format!(
477 "Negative row group ordinal: {})",
478 row_group.ordinal().unwrap()
479 ))
480 })?;
481 let row_group_idx = row_group_idx as usize;
482 for (column_idx, column_chunk) in row_group.columns_mut().iter_mut().enumerate() {
483 if let Some(bloom_filter) = bloom_filters[row_group_idx][column_idx].take() {
484 let start_offset = buf.bytes_written();
485 bloom_filter.write(&mut *buf)?;
486 let end_offset = buf.bytes_written();
487 *column_chunk = column_chunk
489 .clone()
490 .into_builder()
491 .set_bloom_filter_offset(Some(start_offset as i64))
492 .set_bloom_filter_length(Some((end_offset - start_offset) as i32))
493 .build()?;
494 }
495 }
496 Ok(())
497}
498
499pub struct SerializedRowGroupWriter<'a, W: Write> {
512 descr: SchemaDescPtr,
513 props: WriterPropertiesPtr,
514 buf: &'a mut TrackedWrite<W>,
515 total_rows_written: Option<u64>,
516 total_bytes_written: u64,
517 total_uncompressed_bytes: i64,
518 column_index: usize,
519 row_group_metadata: Option<RowGroupMetaDataPtr>,
520 column_chunks: Vec<ColumnChunkMetaData>,
521 bloom_filters: Vec<Option<Sbbf>>,
522 column_indexes: Vec<Option<ColumnIndexMetaData>>,
523 offset_indexes: Vec<Option<OffsetIndexMetaData>>,
524 row_group_index: i16,
525 file_offset: i64,
526 on_close: Option<OnCloseRowGroup<'a, W>>,
527 #[cfg(feature = "encryption")]
528 file_encryptor: Option<Arc<FileEncryptor>>,
529}
530
531impl<'a, W: Write + Send> SerializedRowGroupWriter<'a, W> {
532 pub fn new(
541 schema_descr: SchemaDescPtr,
542 properties: WriterPropertiesPtr,
543 buf: &'a mut TrackedWrite<W>,
544 row_group_index: i16,
545 on_close: Option<OnCloseRowGroup<'a, W>>,
546 ) -> Self {
547 let num_columns = schema_descr.num_columns();
548 let file_offset = buf.bytes_written() as i64;
549 Self {
550 buf,
551 row_group_index,
552 file_offset,
553 on_close,
554 total_rows_written: None,
555 descr: schema_descr,
556 props: properties,
557 column_index: 0,
558 row_group_metadata: None,
559 column_chunks: Vec::with_capacity(num_columns),
560 bloom_filters: Vec::with_capacity(num_columns),
561 column_indexes: Vec::with_capacity(num_columns),
562 offset_indexes: Vec::with_capacity(num_columns),
563 total_bytes_written: 0,
564 total_uncompressed_bytes: 0,
565 #[cfg(feature = "encryption")]
566 file_encryptor: None,
567 }
568 }
569
570 #[cfg(feature = "encryption")]
571 pub(crate) fn with_file_encryptor(
573 mut self,
574 file_encryptor: Option<Arc<FileEncryptor>>,
575 ) -> Self {
576 self.file_encryptor = file_encryptor;
577 self
578 }
579
580 fn next_column_desc(&mut self) -> Option<ColumnDescPtr> {
582 let ret = self.descr.columns().get(self.column_index)?.clone();
583 self.column_index += 1;
584 Some(ret)
585 }
586
587 fn get_on_close(&mut self) -> (&mut TrackedWrite<W>, OnCloseColumnChunk<'_>) {
589 let total_bytes_written = &mut self.total_bytes_written;
590 let total_uncompressed_bytes = &mut self.total_uncompressed_bytes;
591 let total_rows_written = &mut self.total_rows_written;
592 let column_chunks = &mut self.column_chunks;
593 let column_indexes = &mut self.column_indexes;
594 let offset_indexes = &mut self.offset_indexes;
595 let bloom_filters = &mut self.bloom_filters;
596
597 let on_close = |r: ColumnCloseResult| {
598 *total_bytes_written += r.bytes_written;
600 *total_uncompressed_bytes += r.metadata.uncompressed_size();
601 column_chunks.push(r.metadata);
602 bloom_filters.push(r.bloom_filter);
603 column_indexes.push(r.column_index);
604 offset_indexes.push(r.offset_index);
605
606 if let Some(rows) = *total_rows_written {
607 if rows != r.rows_written {
608 return Err(general_err!(
609 "Incorrect number of rows, expected {} != {} rows",
610 rows,
611 r.rows_written
612 ));
613 }
614 } else {
615 *total_rows_written = Some(r.rows_written);
616 }
617
618 Ok(())
619 };
620 (self.buf, Box::new(on_close))
621 }
622
623 pub(crate) fn next_column_with_factory<'b, F, C>(&'b mut self, factory: F) -> Result<Option<C>>
626 where
627 F: FnOnce(
628 ColumnDescPtr,
629 WriterPropertiesPtr,
630 Box<dyn PageWriter + 'b>,
631 OnCloseColumnChunk<'b>,
632 ) -> Result<C>,
633 {
634 self.assert_previous_writer_closed()?;
635
636 let encryptor_context = self.get_page_encryptor_context();
637
638 Ok(match self.next_column_desc() {
639 Some(column) => {
640 let props = self.props.clone();
641 let (buf, on_close) = self.get_on_close();
642
643 let page_writer = SerializedPageWriter::new(buf);
644 let page_writer =
645 Self::set_page_writer_encryptor(&column, encryptor_context, page_writer)?;
646
647 Some(factory(
648 column,
649 props,
650 Box::new(page_writer),
651 Box::new(on_close),
652 )?)
653 }
654 None => None,
655 })
656 }
657
658 pub fn next_column(&mut self) -> Result<Option<SerializedColumnWriter<'_>>> {
662 self.next_column_with_factory(|descr, props, page_writer, on_close| {
663 let column_writer = get_column_writer(descr, props, page_writer);
664 Ok(SerializedColumnWriter::new(column_writer, Some(on_close)))
665 })
666 }
667
668 pub fn append_column<R: ChunkReader>(
683 &mut self,
684 reader: &R,
685 mut close: ColumnCloseResult,
686 ) -> Result<()> {
687 self.assert_previous_writer_closed()?;
688 let desc = self
689 .next_column_desc()
690 .ok_or_else(|| general_err!("exhausted columns in SerializedRowGroupWriter"))?;
691
692 let metadata = close.metadata;
693
694 if metadata.column_descr() != desc.as_ref() {
695 return Err(general_err!(
696 "column descriptor mismatch, expected {:?} got {:?}",
697 desc,
698 metadata.column_descr()
699 ));
700 }
701
702 let src_dictionary_offset = metadata.dictionary_page_offset();
703 let src_data_offset = metadata.data_page_offset();
704 let src_offset = src_dictionary_offset.unwrap_or(src_data_offset);
705 let src_length = metadata.compressed_size();
706
707 let write_offset = self.buf.bytes_written();
708 let mut read = reader.get_read(src_offset as _)?.take(src_length as _);
709 let write_length = std::io::copy(&mut read, &mut self.buf)?;
710
711 if src_length as u64 != write_length {
712 return Err(general_err!(
713 "Failed to splice column data, expected {read_length} got {write_length}"
714 ));
715 }
716
717 let map_offset = |x| x - src_offset + write_offset as i64;
718 let mut builder = ColumnChunkMetaData::builder(metadata.column_descr_ptr())
719 .set_compression(metadata.compression())
720 .set_encodings_mask(*metadata.encodings_mask())
721 .set_total_compressed_size(metadata.compressed_size())
722 .set_total_uncompressed_size(metadata.uncompressed_size())
723 .set_num_values(metadata.num_values())
724 .set_data_page_offset(map_offset(src_data_offset))
725 .set_dictionary_page_offset(src_dictionary_offset.map(map_offset))
726 .set_unencoded_byte_array_data_bytes(metadata.unencoded_byte_array_data_bytes());
727
728 if let Some(rep_hist) = metadata.repetition_level_histogram() {
729 builder = builder.set_repetition_level_histogram(Some(rep_hist.clone()))
730 }
731 if let Some(def_hist) = metadata.definition_level_histogram() {
732 builder = builder.set_definition_level_histogram(Some(def_hist.clone()))
733 }
734 if let Some(statistics) = metadata.statistics() {
735 builder = builder.set_statistics(statistics.clone())
736 }
737 if let Some(geo_statistics) = metadata.geo_statistics() {
738 builder = builder.set_geo_statistics(Box::new(geo_statistics.clone()))
739 }
740 if let Some(page_encoding_stats) = metadata.page_encoding_stats() {
741 builder = builder.set_page_encoding_stats(page_encoding_stats.clone())
742 }
743 builder = self.set_column_crypto_metadata(builder, &metadata);
744 close.metadata = builder.build()?;
745
746 if let Some(offsets) = close.offset_index.as_mut() {
747 for location in &mut offsets.page_locations {
748 location.offset = map_offset(location.offset)
749 }
750 }
751
752 let (_, on_close) = self.get_on_close();
753 on_close(close)
754 }
755
756 pub fn close(mut self) -> Result<RowGroupMetaDataPtr> {
758 if self.row_group_metadata.is_none() {
759 self.assert_previous_writer_closed()?;
760
761 let column_chunks = std::mem::take(&mut self.column_chunks);
762 let row_group_metadata = RowGroupMetaData::builder(self.descr.clone())
763 .set_column_metadata(column_chunks)
764 .set_total_byte_size(self.total_uncompressed_bytes)
765 .set_num_rows(self.total_rows_written.unwrap_or(0) as i64)
766 .set_sorting_columns(self.props.sorting_columns().cloned())
767 .set_ordinal(self.row_group_index)
768 .set_file_offset(self.file_offset)
769 .build()?;
770
771 self.row_group_metadata = Some(Arc::new(row_group_metadata.clone()));
772
773 if let Some(on_close) = self.on_close.take() {
774 on_close(
775 self.buf,
776 row_group_metadata,
777 self.bloom_filters,
778 self.column_indexes,
779 self.offset_indexes,
780 )?
781 }
782 }
783
784 let metadata = self.row_group_metadata.as_ref().unwrap().clone();
785 Ok(metadata)
786 }
787
788 #[cfg(feature = "encryption")]
790 fn set_column_crypto_metadata(
791 &self,
792 builder: ColumnChunkMetaDataBuilder,
793 metadata: &ColumnChunkMetaData,
794 ) -> ColumnChunkMetaDataBuilder {
795 if let Some(file_encryptor) = self.file_encryptor.as_ref() {
796 builder.set_column_crypto_metadata(get_column_crypto_metadata(
797 file_encryptor.properties(),
798 &metadata.column_descr_ptr(),
799 ))
800 } else {
801 builder
802 }
803 }
804
805 #[cfg(feature = "encryption")]
807 fn get_page_encryptor_context(&self) -> PageEncryptorContext {
808 PageEncryptorContext {
809 file_encryptor: self.file_encryptor.clone(),
810 row_group_index: self.row_group_index as usize,
811 column_index: self.column_index,
812 }
813 }
814
815 #[cfg(feature = "encryption")]
817 fn set_page_writer_encryptor<'b>(
818 column: &ColumnDescPtr,
819 context: PageEncryptorContext,
820 page_writer: SerializedPageWriter<'b, W>,
821 ) -> Result<SerializedPageWriter<'b, W>> {
822 let page_encryptor = PageEncryptor::create_if_column_encrypted(
823 &context.file_encryptor,
824 context.row_group_index,
825 context.column_index,
826 &column.path().string(),
827 )?;
828
829 Ok(page_writer.with_page_encryptor(page_encryptor))
830 }
831
832 #[cfg(not(feature = "encryption"))]
834 fn set_column_crypto_metadata(
835 &self,
836 builder: ColumnChunkMetaDataBuilder,
837 _metadata: &ColumnChunkMetaData,
838 ) -> ColumnChunkMetaDataBuilder {
839 builder
840 }
841
842 #[cfg(not(feature = "encryption"))]
843 fn get_page_encryptor_context(&self) -> PageEncryptorContext {
844 PageEncryptorContext {}
845 }
846
847 #[cfg(not(feature = "encryption"))]
849 fn set_page_writer_encryptor<'b>(
850 _column: &ColumnDescPtr,
851 _context: PageEncryptorContext,
852 page_writer: SerializedPageWriter<'b, W>,
853 ) -> Result<SerializedPageWriter<'b, W>> {
854 Ok(page_writer)
855 }
856
857 #[inline]
858 fn assert_previous_writer_closed(&self) -> Result<()> {
859 if self.column_index != self.column_chunks.len() {
860 Err(general_err!("Previous column writer was not closed"))
861 } else {
862 Ok(())
863 }
864 }
865}
866
867#[cfg(feature = "encryption")]
869struct PageEncryptorContext {
870 file_encryptor: Option<Arc<FileEncryptor>>,
871 row_group_index: usize,
872 column_index: usize,
873}
874
875#[cfg(not(feature = "encryption"))]
876struct PageEncryptorContext {}
877
878pub struct SerializedColumnWriter<'a> {
880 inner: ColumnWriter<'a>,
881 on_close: Option<OnCloseColumnChunk<'a>>,
882}
883
884impl<'a> SerializedColumnWriter<'a> {
885 pub fn new(inner: ColumnWriter<'a>, on_close: Option<OnCloseColumnChunk<'a>>) -> Self {
888 Self { inner, on_close }
889 }
890
891 pub fn untyped(&mut self) -> &mut ColumnWriter<'a> {
893 &mut self.inner
894 }
895
896 pub fn typed<T: DataType>(&mut self) -> &mut ColumnWriterImpl<'a, T> {
898 get_typed_column_writer_mut(&mut self.inner)
899 }
900
901 pub fn close(mut self) -> Result<()> {
903 let r = self.inner.close()?;
904 if let Some(on_close) = self.on_close.take() {
905 on_close(r)?
906 }
907
908 Ok(())
909 }
910}
911
912pub struct SerializedPageWriter<'a, W: Write> {
917 sink: &'a mut TrackedWrite<W>,
918 #[cfg(feature = "encryption")]
919 page_encryptor: Option<PageEncryptor>,
920}
921
922impl<'a, W: Write> SerializedPageWriter<'a, W> {
923 pub fn new(sink: &'a mut TrackedWrite<W>) -> Self {
925 Self {
926 sink,
927 #[cfg(feature = "encryption")]
928 page_encryptor: None,
929 }
930 }
931
932 #[inline]
935 fn serialize_page_header(&mut self, header: PageHeader) -> Result<usize> {
936 let start_pos = self.sink.bytes_written();
937 match self.page_encryptor_and_sink_mut() {
938 Some((page_encryptor, sink)) => {
939 page_encryptor.encrypt_page_header(&header, sink)?;
940 }
941 None => {
942 let mut protocol = ThriftCompactOutputProtocol::new(&mut self.sink);
943 header.write_thrift(&mut protocol)?;
944 }
945 }
946 Ok(self.sink.bytes_written() - start_pos)
947 }
948}
949
950#[cfg(feature = "encryption")]
951impl<'a, W: Write> SerializedPageWriter<'a, W> {
952 fn with_page_encryptor(mut self, page_encryptor: Option<PageEncryptor>) -> Self {
954 self.page_encryptor = page_encryptor;
955 self
956 }
957
958 fn page_encryptor_mut(&mut self) -> Option<&mut PageEncryptor> {
959 self.page_encryptor.as_mut()
960 }
961
962 fn page_encryptor_and_sink_mut(
963 &mut self,
964 ) -> Option<(&mut PageEncryptor, &mut &'a mut TrackedWrite<W>)> {
965 self.page_encryptor.as_mut().map(|pe| (pe, &mut self.sink))
966 }
967}
968
969#[cfg(not(feature = "encryption"))]
970impl<'a, W: Write> SerializedPageWriter<'a, W> {
971 fn page_encryptor_mut(&mut self) -> Option<&mut PageEncryptor> {
972 None
973 }
974
975 fn page_encryptor_and_sink_mut(
976 &mut self,
977 ) -> Option<(&mut PageEncryptor, &mut &'a mut TrackedWrite<W>)> {
978 None
979 }
980}
981
982impl<W: Write + Send> PageWriter for SerializedPageWriter<'_, W> {
983 fn write_page(&mut self, page: CompressedPage) -> Result<PageWriteSpec> {
984 let page = match self.page_encryptor_mut() {
985 Some(page_encryptor) => page_encryptor.encrypt_compressed_page(page)?,
986 None => page,
987 };
988
989 let page_type = page.page_type();
990 let start_pos = self.sink.bytes_written() as u64;
991
992 let page_header = page.to_thrift_header()?;
993 let header_size = self.serialize_page_header(page_header)?;
994
995 self.sink.write_all(page.data())?;
996
997 let mut spec = PageWriteSpec::new();
998 spec.page_type = page_type;
999 spec.uncompressed_size = page.uncompressed_size() + header_size;
1000 spec.compressed_size = page.compressed_size() + header_size;
1001 spec.offset = start_pos;
1002 spec.bytes_written = self.sink.bytes_written() as u64 - start_pos;
1003 spec.num_values = page.num_values();
1004
1005 if let Some(page_encryptor) = self.page_encryptor_mut() {
1006 if page.compressed_page().is_data_page() {
1007 page_encryptor.increment_page();
1008 }
1009 }
1010 Ok(spec)
1011 }
1012
1013 fn close(&mut self) -> Result<()> {
1014 self.sink.flush()?;
1015 Ok(())
1016 }
1017}
1018
1019#[cfg(feature = "encryption")]
1022pub(crate) fn get_file_magic(
1023 file_encryption_properties: Option<&Arc<FileEncryptionProperties>>,
1024) -> &'static [u8; 4] {
1025 match file_encryption_properties.as_ref() {
1026 Some(encryption_properties) if encryption_properties.encrypt_footer() => {
1027 &PARQUET_MAGIC_ENCR_FOOTER
1028 }
1029 _ => &PARQUET_MAGIC,
1030 }
1031}
1032
1033#[cfg(not(feature = "encryption"))]
1034pub(crate) fn get_file_magic() -> &'static [u8; 4] {
1035 &PARQUET_MAGIC
1036}
1037
1038#[cfg(test)]
1039mod tests {
1040 use super::*;
1041
1042 #[cfg(feature = "arrow")]
1043 use arrow_array::RecordBatchReader;
1044 use bytes::Bytes;
1045 use std::fs::File;
1046
1047 #[cfg(feature = "arrow")]
1048 use crate::arrow::ArrowWriter;
1049 #[cfg(feature = "arrow")]
1050 use crate::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
1051 use crate::basic::{
1052 ColumnOrder, Compression, ConvertedType, Encoding, LogicalType, Repetition, SortOrder, Type,
1053 };
1054 use crate::column::page::{Page, PageReader};
1055 use crate::column::reader::get_typed_column_reader;
1056 use crate::compression::{Codec, CodecOptionsBuilder, create_codec};
1057 use crate::data_type::{BoolType, ByteArrayType, Int32Type};
1058 use crate::file::page_index::column_index::ColumnIndexMetaData;
1059 use crate::file::properties::EnabledStatistics;
1060 use crate::file::serialized_reader::ReadOptionsBuilder;
1061 use crate::file::statistics::{from_thrift_page_stats, page_stats_to_thrift};
1062 use crate::file::{
1063 properties::{ReaderProperties, WriterProperties, WriterVersion},
1064 reader::{FileReader, SerializedFileReader, SerializedPageReader},
1065 statistics::Statistics,
1066 };
1067 use crate::record::{Row, RowAccessor};
1068 use crate::schema::parser::parse_message_type;
1069 use crate::schema::types;
1070 use crate::schema::types::{ColumnDescriptor, ColumnPath};
1071 use crate::util::test_common::rand_gen::RandGen;
1072
1073 #[test]
1074 fn test_row_group_writer_error_not_all_columns_written() {
1075 let file = tempfile::tempfile().unwrap();
1076 let schema = Arc::new(
1077 types::Type::group_type_builder("schema")
1078 .with_fields(vec![Arc::new(
1079 types::Type::primitive_type_builder("col1", Type::INT32)
1080 .build()
1081 .unwrap(),
1082 )])
1083 .build()
1084 .unwrap(),
1085 );
1086 let props = Default::default();
1087 let mut writer = SerializedFileWriter::new(file, schema, props).unwrap();
1088 let row_group_writer = writer.next_row_group().unwrap();
1089 let res = row_group_writer.close();
1090 assert!(res.is_err());
1091 if let Err(err) = res {
1092 assert_eq!(
1093 format!("{err}"),
1094 "Parquet error: Column length mismatch: 1 != 0"
1095 );
1096 }
1097 }
1098
1099 #[test]
1100 fn test_row_group_writer_num_records_mismatch() {
1101 let file = tempfile::tempfile().unwrap();
1102 let schema = Arc::new(
1103 types::Type::group_type_builder("schema")
1104 .with_fields(vec![
1105 Arc::new(
1106 types::Type::primitive_type_builder("col1", Type::INT32)
1107 .with_repetition(Repetition::REQUIRED)
1108 .build()
1109 .unwrap(),
1110 ),
1111 Arc::new(
1112 types::Type::primitive_type_builder("col2", Type::INT32)
1113 .with_repetition(Repetition::REQUIRED)
1114 .build()
1115 .unwrap(),
1116 ),
1117 ])
1118 .build()
1119 .unwrap(),
1120 );
1121 let props = Default::default();
1122 let mut writer = SerializedFileWriter::new(file, schema, props).unwrap();
1123 let mut row_group_writer = writer.next_row_group().unwrap();
1124
1125 let mut col_writer = row_group_writer.next_column().unwrap().unwrap();
1126 col_writer
1127 .typed::<Int32Type>()
1128 .write_batch(&[1, 2, 3], None, None)
1129 .unwrap();
1130 col_writer.close().unwrap();
1131
1132 let mut col_writer = row_group_writer.next_column().unwrap().unwrap();
1133 col_writer
1134 .typed::<Int32Type>()
1135 .write_batch(&[1, 2], None, None)
1136 .unwrap();
1137
1138 let err = col_writer.close().unwrap_err();
1139 assert_eq!(
1140 err.to_string(),
1141 "Parquet error: Incorrect number of rows, expected 3 != 2 rows"
1142 );
1143 }
1144
1145 #[test]
1146 fn test_file_writer_empty_file() {
1147 let file = tempfile::tempfile().unwrap();
1148
1149 let schema = Arc::new(
1150 types::Type::group_type_builder("schema")
1151 .with_fields(vec![Arc::new(
1152 types::Type::primitive_type_builder("col1", Type::INT32)
1153 .build()
1154 .unwrap(),
1155 )])
1156 .build()
1157 .unwrap(),
1158 );
1159 let props = Default::default();
1160 let writer = SerializedFileWriter::new(file.try_clone().unwrap(), schema, props).unwrap();
1161 writer.close().unwrap();
1162
1163 let reader = SerializedFileReader::new(file).unwrap();
1164 assert_eq!(reader.get_row_iter(None).unwrap().count(), 0);
1165 }
1166
1167 #[test]
1168 fn test_file_writer_column_orders_populated() {
1169 let file = tempfile::tempfile().unwrap();
1170
1171 let schema = Arc::new(
1172 types::Type::group_type_builder("schema")
1173 .with_fields(vec![
1174 Arc::new(
1175 types::Type::primitive_type_builder("col1", Type::INT32)
1176 .build()
1177 .unwrap(),
1178 ),
1179 Arc::new(
1180 types::Type::primitive_type_builder("col2", Type::FIXED_LEN_BYTE_ARRAY)
1181 .with_converted_type(ConvertedType::INTERVAL)
1182 .with_length(12)
1183 .build()
1184 .unwrap(),
1185 ),
1186 Arc::new(
1187 types::Type::group_type_builder("nested")
1188 .with_repetition(Repetition::REQUIRED)
1189 .with_fields(vec![
1190 Arc::new(
1191 types::Type::primitive_type_builder(
1192 "col3",
1193 Type::FIXED_LEN_BYTE_ARRAY,
1194 )
1195 .with_logical_type(Some(LogicalType::Float16))
1196 .with_length(2)
1197 .build()
1198 .unwrap(),
1199 ),
1200 Arc::new(
1201 types::Type::primitive_type_builder("col4", Type::BYTE_ARRAY)
1202 .with_logical_type(Some(LogicalType::String))
1203 .build()
1204 .unwrap(),
1205 ),
1206 ])
1207 .build()
1208 .unwrap(),
1209 ),
1210 ])
1211 .build()
1212 .unwrap(),
1213 );
1214
1215 let props = Default::default();
1216 let writer = SerializedFileWriter::new(file.try_clone().unwrap(), schema, props).unwrap();
1217 writer.close().unwrap();
1218
1219 let reader = SerializedFileReader::new(file).unwrap();
1220
1221 let expected = vec![
1223 ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::SIGNED),
1225 ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::UNDEFINED),
1227 ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::SIGNED),
1229 ColumnOrder::TYPE_DEFINED_ORDER(SortOrder::UNSIGNED),
1231 ];
1232 let actual = reader.metadata().file_metadata().column_orders();
1233
1234 assert!(actual.is_some());
1235 let actual = actual.unwrap();
1236 assert_eq!(*actual, expected);
1237 }
1238
1239 #[test]
1240 fn test_file_writer_with_metadata() {
1241 let file = tempfile::tempfile().unwrap();
1242
1243 let schema = Arc::new(
1244 types::Type::group_type_builder("schema")
1245 .with_fields(vec![Arc::new(
1246 types::Type::primitive_type_builder("col1", Type::INT32)
1247 .build()
1248 .unwrap(),
1249 )])
1250 .build()
1251 .unwrap(),
1252 );
1253 let props = Arc::new(
1254 WriterProperties::builder()
1255 .set_key_value_metadata(Some(vec![KeyValue::new(
1256 "key".to_string(),
1257 "value".to_string(),
1258 )]))
1259 .build(),
1260 );
1261 let writer = SerializedFileWriter::new(file.try_clone().unwrap(), schema, props).unwrap();
1262 writer.close().unwrap();
1263
1264 let reader = SerializedFileReader::new(file).unwrap();
1265 assert_eq!(
1266 reader
1267 .metadata()
1268 .file_metadata()
1269 .key_value_metadata()
1270 .to_owned()
1271 .unwrap()
1272 .len(),
1273 1
1274 );
1275 }
1276
1277 #[test]
1278 fn test_file_writer_v2_with_metadata() {
1279 let file = tempfile::tempfile().unwrap();
1280 let field_logical_type = Some(LogicalType::Integer {
1281 bit_width: 8,
1282 is_signed: false,
1283 });
1284 let field = Arc::new(
1285 types::Type::primitive_type_builder("col1", Type::INT32)
1286 .with_logical_type(field_logical_type.clone())
1287 .with_converted_type(field_logical_type.into())
1288 .build()
1289 .unwrap(),
1290 );
1291 let schema = Arc::new(
1292 types::Type::group_type_builder("schema")
1293 .with_fields(vec![field.clone()])
1294 .build()
1295 .unwrap(),
1296 );
1297 let props = Arc::new(
1298 WriterProperties::builder()
1299 .set_key_value_metadata(Some(vec![KeyValue::new(
1300 "key".to_string(),
1301 "value".to_string(),
1302 )]))
1303 .set_writer_version(WriterVersion::PARQUET_2_0)
1304 .build(),
1305 );
1306 let writer = SerializedFileWriter::new(file.try_clone().unwrap(), schema, props).unwrap();
1307 writer.close().unwrap();
1308
1309 let reader = SerializedFileReader::new(file).unwrap();
1310
1311 assert_eq!(
1312 reader
1313 .metadata()
1314 .file_metadata()
1315 .key_value_metadata()
1316 .to_owned()
1317 .unwrap()
1318 .len(),
1319 1
1320 );
1321
1322 let fields = reader.metadata().file_metadata().schema().get_fields();
1324 assert_eq!(fields.len(), 1);
1325 assert_eq!(fields[0], field);
1326 }
1327
1328 #[test]
1329 fn test_file_writer_with_sorting_columns_metadata() {
1330 let file = tempfile::tempfile().unwrap();
1331
1332 let schema = Arc::new(
1333 types::Type::group_type_builder("schema")
1334 .with_fields(vec![
1335 Arc::new(
1336 types::Type::primitive_type_builder("col1", Type::INT32)
1337 .build()
1338 .unwrap(),
1339 ),
1340 Arc::new(
1341 types::Type::primitive_type_builder("col2", Type::INT32)
1342 .build()
1343 .unwrap(),
1344 ),
1345 ])
1346 .build()
1347 .unwrap(),
1348 );
1349 let expected_result = Some(vec![SortingColumn {
1350 column_idx: 0,
1351 descending: false,
1352 nulls_first: true,
1353 }]);
1354 let props = Arc::new(
1355 WriterProperties::builder()
1356 .set_key_value_metadata(Some(vec![KeyValue::new(
1357 "key".to_string(),
1358 "value".to_string(),
1359 )]))
1360 .set_sorting_columns(expected_result.clone())
1361 .build(),
1362 );
1363 let mut writer =
1364 SerializedFileWriter::new(file.try_clone().unwrap(), schema, props).unwrap();
1365 let mut row_group_writer = writer.next_row_group().expect("get row group writer");
1366
1367 let col_writer = row_group_writer.next_column().unwrap().unwrap();
1368 col_writer.close().unwrap();
1369
1370 let col_writer = row_group_writer.next_column().unwrap().unwrap();
1371 col_writer.close().unwrap();
1372
1373 row_group_writer.close().unwrap();
1374 writer.close().unwrap();
1375
1376 let reader = SerializedFileReader::new(file).unwrap();
1377 let result: Vec<Option<&Vec<SortingColumn>>> = reader
1378 .metadata()
1379 .row_groups()
1380 .iter()
1381 .map(|f| f.sorting_columns())
1382 .collect();
1383 assert_eq!(expected_result.as_ref(), result[0]);
1385 }
1386
1387 #[test]
1388 fn test_file_writer_empty_row_groups() {
1389 let file = tempfile::tempfile().unwrap();
1390 test_file_roundtrip(file, vec![]);
1391 }
1392
1393 #[test]
1394 fn test_file_writer_single_row_group() {
1395 let file = tempfile::tempfile().unwrap();
1396 test_file_roundtrip(file, vec![vec![1, 2, 3, 4, 5]]);
1397 }
1398
1399 #[test]
1400 fn test_file_writer_multiple_row_groups() {
1401 let file = tempfile::tempfile().unwrap();
1402 test_file_roundtrip(
1403 file,
1404 vec![
1405 vec![1, 2, 3, 4, 5],
1406 vec![1, 2, 3],
1407 vec![1],
1408 vec![1, 2, 3, 4, 5, 6],
1409 ],
1410 );
1411 }
1412
1413 #[test]
1414 fn test_file_writer_multiple_large_row_groups() {
1415 let file = tempfile::tempfile().unwrap();
1416 test_file_roundtrip(
1417 file,
1418 vec![vec![123; 1024], vec![124; 1000], vec![125; 15], vec![]],
1419 );
1420 }
1421
1422 #[test]
1423 fn test_page_writer_data_pages() {
1424 let pages = vec![
1425 Page::DataPage {
1426 buf: Bytes::from(vec![1, 2, 3, 4, 5, 6, 7, 8]),
1427 num_values: 10,
1428 encoding: Encoding::DELTA_BINARY_PACKED,
1429 def_level_encoding: Encoding::RLE,
1430 rep_level_encoding: Encoding::RLE,
1431 statistics: Some(Statistics::int32(Some(1), Some(3), None, Some(7), true)),
1432 },
1433 Page::DataPageV2 {
1434 buf: Bytes::from(vec![4; 128]),
1435 num_values: 10,
1436 encoding: Encoding::DELTA_BINARY_PACKED,
1437 num_nulls: 2,
1438 num_rows: 12,
1439 def_levels_byte_len: 24,
1440 rep_levels_byte_len: 32,
1441 is_compressed: false,
1442 statistics: Some(Statistics::int32(Some(1), Some(3), None, Some(7), true)),
1443 },
1444 ];
1445
1446 test_page_roundtrip(&pages[..], Compression::SNAPPY, Type::INT32);
1447 test_page_roundtrip(&pages[..], Compression::UNCOMPRESSED, Type::INT32);
1448 }
1449
1450 #[test]
1451 fn test_page_writer_dict_pages() {
1452 let pages = vec![
1453 Page::DictionaryPage {
1454 buf: Bytes::from(vec![1, 2, 3, 4, 5]),
1455 num_values: 5,
1456 encoding: Encoding::RLE_DICTIONARY,
1457 is_sorted: false,
1458 },
1459 Page::DataPage {
1460 buf: Bytes::from(vec![1, 2, 3, 4, 5, 6, 7, 8]),
1461 num_values: 10,
1462 encoding: Encoding::DELTA_BINARY_PACKED,
1463 def_level_encoding: Encoding::RLE,
1464 rep_level_encoding: Encoding::RLE,
1465 statistics: Some(Statistics::int32(Some(1), Some(3), None, Some(7), true)),
1466 },
1467 Page::DataPageV2 {
1468 buf: Bytes::from(vec![4; 128]),
1469 num_values: 10,
1470 encoding: Encoding::DELTA_BINARY_PACKED,
1471 num_nulls: 2,
1472 num_rows: 12,
1473 def_levels_byte_len: 24,
1474 rep_levels_byte_len: 32,
1475 is_compressed: false,
1476 statistics: None,
1477 },
1478 ];
1479
1480 test_page_roundtrip(&pages[..], Compression::SNAPPY, Type::INT32);
1481 test_page_roundtrip(&pages[..], Compression::UNCOMPRESSED, Type::INT32);
1482 }
1483
1484 fn test_page_roundtrip(pages: &[Page], codec: Compression, physical_type: Type) {
1488 let mut compressed_pages = vec![];
1489 let mut total_num_values = 0i64;
1490 let codec_options = CodecOptionsBuilder::default()
1491 .set_backward_compatible_lz4(false)
1492 .build();
1493 let mut compressor = create_codec(codec, &codec_options).unwrap();
1494
1495 for page in pages {
1496 let uncompressed_len = page.buffer().len();
1497
1498 let compressed_page = match *page {
1499 Page::DataPage {
1500 ref buf,
1501 num_values,
1502 encoding,
1503 def_level_encoding,
1504 rep_level_encoding,
1505 ref statistics,
1506 } => {
1507 total_num_values += num_values as i64;
1508 let output_buf = compress_helper(compressor.as_mut(), buf);
1509
1510 Page::DataPage {
1511 buf: Bytes::from(output_buf),
1512 num_values,
1513 encoding,
1514 def_level_encoding,
1515 rep_level_encoding,
1516 statistics: from_thrift_page_stats(
1517 physical_type,
1518 page_stats_to_thrift(statistics.as_ref()),
1519 )
1520 .unwrap(),
1521 }
1522 }
1523 Page::DataPageV2 {
1524 ref buf,
1525 num_values,
1526 encoding,
1527 num_nulls,
1528 num_rows,
1529 def_levels_byte_len,
1530 rep_levels_byte_len,
1531 ref statistics,
1532 ..
1533 } => {
1534 total_num_values += num_values as i64;
1535 let offset = (def_levels_byte_len + rep_levels_byte_len) as usize;
1536 let cmp_buf = compress_helper(compressor.as_mut(), &buf[offset..]);
1537 let mut output_buf = Vec::from(&buf[..offset]);
1538 output_buf.extend_from_slice(&cmp_buf[..]);
1539
1540 Page::DataPageV2 {
1541 buf: Bytes::from(output_buf),
1542 num_values,
1543 encoding,
1544 num_nulls,
1545 num_rows,
1546 def_levels_byte_len,
1547 rep_levels_byte_len,
1548 is_compressed: compressor.is_some(),
1549 statistics: from_thrift_page_stats(
1550 physical_type,
1551 page_stats_to_thrift(statistics.as_ref()),
1552 )
1553 .unwrap(),
1554 }
1555 }
1556 Page::DictionaryPage {
1557 ref buf,
1558 num_values,
1559 encoding,
1560 is_sorted,
1561 } => {
1562 let output_buf = compress_helper(compressor.as_mut(), buf);
1563
1564 Page::DictionaryPage {
1565 buf: Bytes::from(output_buf),
1566 num_values,
1567 encoding,
1568 is_sorted,
1569 }
1570 }
1571 };
1572
1573 let compressed_page = CompressedPage::new(compressed_page, uncompressed_len);
1574 compressed_pages.push(compressed_page);
1575 }
1576
1577 let mut buffer: Vec<u8> = vec![];
1578 let mut result_pages: Vec<Page> = vec![];
1579 {
1580 let mut writer = TrackedWrite::new(&mut buffer);
1581 let mut page_writer = SerializedPageWriter::new(&mut writer);
1582
1583 for page in compressed_pages {
1584 page_writer.write_page(page).unwrap();
1585 }
1586 page_writer.close().unwrap();
1587 }
1588 {
1589 let reader = bytes::Bytes::from(buffer);
1590
1591 let t = types::Type::primitive_type_builder("t", physical_type)
1592 .build()
1593 .unwrap();
1594
1595 let desc = ColumnDescriptor::new(Arc::new(t), 0, 0, ColumnPath::new(vec![]));
1596 let meta = ColumnChunkMetaData::builder(Arc::new(desc))
1597 .set_compression(codec)
1598 .set_total_compressed_size(reader.len() as i64)
1599 .set_num_values(total_num_values)
1600 .build()
1601 .unwrap();
1602
1603 let props = ReaderProperties::builder()
1604 .set_backward_compatible_lz4(false)
1605 .set_read_page_statistics(true)
1606 .build();
1607 let mut page_reader = SerializedPageReader::new_with_properties(
1608 Arc::new(reader),
1609 &meta,
1610 total_num_values as usize,
1611 None,
1612 Arc::new(props),
1613 )
1614 .unwrap();
1615
1616 while let Some(page) = page_reader.get_next_page().unwrap() {
1617 result_pages.push(page);
1618 }
1619 }
1620
1621 assert_eq!(result_pages.len(), pages.len());
1622 for i in 0..result_pages.len() {
1623 assert_page(&result_pages[i], &pages[i]);
1624 }
1625 }
1626
1627 fn compress_helper(compressor: Option<&mut Box<dyn Codec>>, data: &[u8]) -> Vec<u8> {
1629 let mut output_buf = vec![];
1630 if let Some(cmpr) = compressor {
1631 cmpr.compress(data, &mut output_buf).unwrap();
1632 } else {
1633 output_buf.extend_from_slice(data);
1634 }
1635 output_buf
1636 }
1637
1638 fn assert_page(left: &Page, right: &Page) {
1640 assert_eq!(left.page_type(), right.page_type());
1641 assert_eq!(&left.buffer(), &right.buffer());
1642 assert_eq!(left.num_values(), right.num_values());
1643 assert_eq!(left.encoding(), right.encoding());
1644 assert_eq!(
1645 page_stats_to_thrift(left.statistics()),
1646 page_stats_to_thrift(right.statistics())
1647 );
1648 }
1649
1650 fn test_roundtrip_i32<W, R>(
1652 file: W,
1653 data: Vec<Vec<i32>>,
1654 compression: Compression,
1655 ) -> ParquetMetaData
1656 where
1657 W: Write + Send,
1658 R: ChunkReader + From<W> + 'static,
1659 {
1660 test_roundtrip::<W, R, Int32Type, _>(file, data, |r| r.get_int(0).unwrap(), compression)
1661 }
1662
1663 fn test_roundtrip<W, R, D, F>(
1666 mut file: W,
1667 data: Vec<Vec<D::T>>,
1668 value: F,
1669 compression: Compression,
1670 ) -> ParquetMetaData
1671 where
1672 W: Write + Send,
1673 R: ChunkReader + From<W> + 'static,
1674 D: DataType,
1675 F: Fn(Row) -> D::T,
1676 {
1677 let schema = Arc::new(
1678 types::Type::group_type_builder("schema")
1679 .with_fields(vec![Arc::new(
1680 types::Type::primitive_type_builder("col1", D::get_physical_type())
1681 .with_repetition(Repetition::REQUIRED)
1682 .build()
1683 .unwrap(),
1684 )])
1685 .build()
1686 .unwrap(),
1687 );
1688 let props = Arc::new(
1689 WriterProperties::builder()
1690 .set_compression(compression)
1691 .build(),
1692 );
1693 let mut file_writer = SerializedFileWriter::new(&mut file, schema, props).unwrap();
1694 let mut rows: i64 = 0;
1695
1696 for (idx, subset) in data.iter().enumerate() {
1697 let row_group_file_offset = file_writer.buf.bytes_written();
1698 let mut row_group_writer = file_writer.next_row_group().unwrap();
1699 if let Some(mut writer) = row_group_writer.next_column().unwrap() {
1700 rows += writer
1701 .typed::<D>()
1702 .write_batch(&subset[..], None, None)
1703 .unwrap() as i64;
1704 writer.close().unwrap();
1705 }
1706 let last_group = row_group_writer.close().unwrap();
1707 let flushed = file_writer.flushed_row_groups();
1708 assert_eq!(flushed.len(), idx + 1);
1709 assert_eq!(Some(idx as i16), last_group.ordinal());
1710 assert_eq!(Some(row_group_file_offset as i64), last_group.file_offset());
1711 assert_eq!(&flushed[idx], last_group.as_ref());
1712 }
1713 let file_metadata = file_writer.close().unwrap();
1714
1715 let reader = SerializedFileReader::new(R::from(file)).unwrap();
1716 assert_eq!(reader.num_row_groups(), data.len());
1717 assert_eq!(
1718 reader.metadata().file_metadata().num_rows(),
1719 rows,
1720 "row count in metadata not equal to number of rows written"
1721 );
1722 for (i, item) in data.iter().enumerate().take(reader.num_row_groups()) {
1723 let row_group_reader = reader.get_row_group(i).unwrap();
1724 let iter = row_group_reader.get_row_iter(None).unwrap();
1725 let res: Vec<_> = iter.map(|row| row.unwrap()).map(&value).collect();
1726 let row_group_size = row_group_reader.metadata().total_byte_size();
1727 let uncompressed_size: i64 = row_group_reader
1728 .metadata()
1729 .columns()
1730 .iter()
1731 .map(|v| v.uncompressed_size())
1732 .sum();
1733 assert_eq!(row_group_size, uncompressed_size);
1734 assert_eq!(res, *item);
1735 }
1736 file_metadata
1737 }
1738
1739 fn test_file_roundtrip(file: File, data: Vec<Vec<i32>>) -> ParquetMetaData {
1742 test_roundtrip_i32::<File, File>(file, data, Compression::UNCOMPRESSED)
1743 }
1744
1745 #[test]
1746 fn test_bytes_writer_empty_row_groups() {
1747 test_bytes_roundtrip(vec![], Compression::UNCOMPRESSED);
1748 }
1749
1750 #[test]
1751 fn test_bytes_writer_single_row_group() {
1752 test_bytes_roundtrip(vec![vec![1, 2, 3, 4, 5]], Compression::UNCOMPRESSED);
1753 }
1754
1755 #[test]
1756 fn test_bytes_writer_multiple_row_groups() {
1757 test_bytes_roundtrip(
1758 vec![
1759 vec![1, 2, 3, 4, 5],
1760 vec![1, 2, 3],
1761 vec![1],
1762 vec![1, 2, 3, 4, 5, 6],
1763 ],
1764 Compression::UNCOMPRESSED,
1765 );
1766 }
1767
1768 #[test]
1769 fn test_bytes_writer_single_row_group_compressed() {
1770 test_bytes_roundtrip(vec![vec![1, 2, 3, 4, 5]], Compression::SNAPPY);
1771 }
1772
1773 #[test]
1774 fn test_bytes_writer_multiple_row_groups_compressed() {
1775 test_bytes_roundtrip(
1776 vec![
1777 vec![1, 2, 3, 4, 5],
1778 vec![1, 2, 3],
1779 vec![1],
1780 vec![1, 2, 3, 4, 5, 6],
1781 ],
1782 Compression::SNAPPY,
1783 );
1784 }
1785
1786 fn test_bytes_roundtrip(data: Vec<Vec<i32>>, compression: Compression) {
1787 test_roundtrip_i32::<Vec<u8>, Bytes>(Vec::with_capacity(1024), data, compression);
1788 }
1789
1790 #[test]
1791 fn test_boolean_roundtrip() {
1792 let my_bool_values: Vec<_> = (0..2049).map(|idx| idx % 2 == 0).collect();
1793 test_roundtrip::<Vec<u8>, Bytes, BoolType, _>(
1794 Vec::with_capacity(1024),
1795 vec![my_bool_values],
1796 |r| r.get_bool(0).unwrap(),
1797 Compression::UNCOMPRESSED,
1798 );
1799 }
1800
1801 #[test]
1802 fn test_boolean_compressed_roundtrip() {
1803 let my_bool_values: Vec<_> = (0..2049).map(|idx| idx % 2 == 0).collect();
1804 test_roundtrip::<Vec<u8>, Bytes, BoolType, _>(
1805 Vec::with_capacity(1024),
1806 vec![my_bool_values],
1807 |r| r.get_bool(0).unwrap(),
1808 Compression::SNAPPY,
1809 );
1810 }
1811
1812 #[test]
1813 fn test_column_offset_index_file() {
1814 let file = tempfile::tempfile().unwrap();
1815 let file_metadata = test_file_roundtrip(file, vec![vec![1, 2, 3, 4, 5]]);
1816 file_metadata.row_groups().iter().for_each(|row_group| {
1817 row_group.columns().iter().for_each(|column_chunk| {
1818 assert!(column_chunk.column_index_offset().is_some());
1819 assert!(column_chunk.column_index_length().is_some());
1820 assert!(column_chunk.offset_index_offset().is_some());
1821 assert!(column_chunk.offset_index_length().is_some());
1822 })
1823 });
1824 }
1825
1826 fn test_kv_metadata(initial_kv: Option<Vec<KeyValue>>, final_kv: Option<Vec<KeyValue>>) {
1827 let schema = Arc::new(
1828 types::Type::group_type_builder("schema")
1829 .with_fields(vec![Arc::new(
1830 types::Type::primitive_type_builder("col1", Type::INT32)
1831 .with_repetition(Repetition::REQUIRED)
1832 .build()
1833 .unwrap(),
1834 )])
1835 .build()
1836 .unwrap(),
1837 );
1838 let mut out = Vec::with_capacity(1024);
1839 let props = Arc::new(
1840 WriterProperties::builder()
1841 .set_key_value_metadata(initial_kv.clone())
1842 .build(),
1843 );
1844 let mut writer = SerializedFileWriter::new(&mut out, schema, props).unwrap();
1845 let mut row_group_writer = writer.next_row_group().unwrap();
1846 let column = row_group_writer.next_column().unwrap().unwrap();
1847 column.close().unwrap();
1848 row_group_writer.close().unwrap();
1849 if let Some(kvs) = &final_kv {
1850 for kv in kvs {
1851 writer.append_key_value_metadata(kv.clone())
1852 }
1853 }
1854 writer.close().unwrap();
1855
1856 let reader = SerializedFileReader::new(Bytes::from(out)).unwrap();
1857 let metadata = reader.metadata().file_metadata();
1858 let keys = metadata.key_value_metadata();
1859
1860 match (initial_kv, final_kv) {
1861 (Some(a), Some(b)) => {
1862 let keys = keys.unwrap();
1863 assert_eq!(keys.len(), a.len() + b.len());
1864 assert_eq!(&keys[..a.len()], a.as_slice());
1865 assert_eq!(&keys[a.len()..], b.as_slice());
1866 }
1867 (Some(v), None) => assert_eq!(keys.unwrap(), &v),
1868 (None, Some(v)) if !v.is_empty() => assert_eq!(keys.unwrap(), &v),
1869 _ => assert!(keys.is_none()),
1870 }
1871 }
1872
1873 #[test]
1874 fn test_append_metadata() {
1875 let kv1 = KeyValue::new("cupcakes".to_string(), "awesome".to_string());
1876 let kv2 = KeyValue::new("bingo".to_string(), "bongo".to_string());
1877
1878 test_kv_metadata(None, None);
1879 test_kv_metadata(Some(vec![kv1.clone()]), None);
1880 test_kv_metadata(None, Some(vec![kv2.clone()]));
1881 test_kv_metadata(Some(vec![kv1.clone()]), Some(vec![kv2.clone()]));
1882 test_kv_metadata(Some(vec![]), Some(vec![kv2]));
1883 test_kv_metadata(Some(vec![]), Some(vec![]));
1884 test_kv_metadata(Some(vec![kv1]), Some(vec![]));
1885 test_kv_metadata(None, Some(vec![]));
1886 }
1887
1888 #[test]
1889 fn test_backwards_compatible_statistics() {
1890 let message_type = "
1891 message test_schema {
1892 REQUIRED INT32 decimal1 (DECIMAL(8,2));
1893 REQUIRED INT32 i32 (INTEGER(32,true));
1894 REQUIRED INT32 u32 (INTEGER(32,false));
1895 }
1896 ";
1897
1898 let schema = Arc::new(parse_message_type(message_type).unwrap());
1899 let props = Default::default();
1900 let mut writer = SerializedFileWriter::new(vec![], schema, props).unwrap();
1901 let mut row_group_writer = writer.next_row_group().unwrap();
1902
1903 for _ in 0..3 {
1904 let mut writer = row_group_writer.next_column().unwrap().unwrap();
1905 writer
1906 .typed::<Int32Type>()
1907 .write_batch(&[1, 2, 3], None, None)
1908 .unwrap();
1909 writer.close().unwrap();
1910 }
1911 let metadata = row_group_writer.close().unwrap();
1912 writer.close().unwrap();
1913
1914 let s = page_stats_to_thrift(metadata.column(0).statistics()).unwrap();
1916 assert_eq!(s.min.as_deref(), Some(1_i32.to_le_bytes().as_ref()));
1917 assert_eq!(s.max.as_deref(), Some(3_i32.to_le_bytes().as_ref()));
1918 assert_eq!(s.min_value.as_deref(), Some(1_i32.to_le_bytes().as_ref()));
1919 assert_eq!(s.max_value.as_deref(), Some(3_i32.to_le_bytes().as_ref()));
1920
1921 let s = page_stats_to_thrift(metadata.column(1).statistics()).unwrap();
1923 assert_eq!(s.min.as_deref(), Some(1_i32.to_le_bytes().as_ref()));
1924 assert_eq!(s.max.as_deref(), Some(3_i32.to_le_bytes().as_ref()));
1925 assert_eq!(s.min_value.as_deref(), Some(1_i32.to_le_bytes().as_ref()));
1926 assert_eq!(s.max_value.as_deref(), Some(3_i32.to_le_bytes().as_ref()));
1927
1928 let s = page_stats_to_thrift(metadata.column(2).statistics()).unwrap();
1930 assert_eq!(s.min.as_deref(), None);
1931 assert_eq!(s.max.as_deref(), None);
1932 assert_eq!(s.min_value.as_deref(), Some(1_i32.to_le_bytes().as_ref()));
1933 assert_eq!(s.max_value.as_deref(), Some(3_i32.to_le_bytes().as_ref()));
1934 }
1935
1936 #[test]
1937 fn test_spliced_write() {
1938 let message_type = "
1939 message test_schema {
1940 REQUIRED INT32 i32 (INTEGER(32,true));
1941 REQUIRED INT32 u32 (INTEGER(32,false));
1942 }
1943 ";
1944 let schema = Arc::new(parse_message_type(message_type).unwrap());
1945 let props = Arc::new(WriterProperties::builder().build());
1946
1947 let mut file = Vec::with_capacity(1024);
1948 let mut file_writer = SerializedFileWriter::new(&mut file, schema, props.clone()).unwrap();
1949
1950 let columns = file_writer.descr.columns();
1951 let mut column_state: Vec<(_, Option<ColumnCloseResult>)> = columns
1952 .iter()
1953 .map(|_| (TrackedWrite::new(Vec::with_capacity(1024)), None))
1954 .collect();
1955
1956 let mut column_state_slice = column_state.as_mut_slice();
1957 let mut column_writers = Vec::with_capacity(columns.len());
1958 for c in columns {
1959 let ((buf, out), tail) = column_state_slice.split_first_mut().unwrap();
1960 column_state_slice = tail;
1961
1962 let page_writer = Box::new(SerializedPageWriter::new(buf));
1963 let col_writer = get_column_writer(c.clone(), props.clone(), page_writer);
1964 column_writers.push(SerializedColumnWriter::new(
1965 col_writer,
1966 Some(Box::new(|on_close| {
1967 *out = Some(on_close);
1968 Ok(())
1969 })),
1970 ));
1971 }
1972
1973 let column_data = [[1, 2, 3, 4], [7, 3, 7, 3]];
1974
1975 for (writer, batch) in column_writers.iter_mut().zip(column_data) {
1977 let writer = writer.typed::<Int32Type>();
1978 writer.write_batch(&batch, None, None).unwrap();
1979 }
1980
1981 for writer in column_writers {
1983 writer.close().unwrap()
1984 }
1985
1986 let mut row_group_writer = file_writer.next_row_group().unwrap();
1988 for (write, close) in column_state {
1989 let buf = Bytes::from(write.into_inner().unwrap());
1990 row_group_writer
1991 .append_column(&buf, close.unwrap())
1992 .unwrap();
1993 }
1994 row_group_writer.close().unwrap();
1995 file_writer.close().unwrap();
1996
1997 let file = Bytes::from(file);
1999 let test_read = |reader: SerializedFileReader<Bytes>| {
2000 let row_group = reader.get_row_group(0).unwrap();
2001
2002 let mut out = Vec::with_capacity(4);
2003 let c1 = row_group.get_column_reader(0).unwrap();
2004 let mut c1 = get_typed_column_reader::<Int32Type>(c1);
2005 c1.read_records(4, None, None, &mut out).unwrap();
2006 assert_eq!(out, column_data[0]);
2007
2008 out.clear();
2009
2010 let c2 = row_group.get_column_reader(1).unwrap();
2011 let mut c2 = get_typed_column_reader::<Int32Type>(c2);
2012 c2.read_records(4, None, None, &mut out).unwrap();
2013 assert_eq!(out, column_data[1]);
2014 };
2015
2016 let reader = SerializedFileReader::new(file.clone()).unwrap();
2017 test_read(reader);
2018
2019 let options = ReadOptionsBuilder::new().with_page_index().build();
2020 let reader = SerializedFileReader::new_with_options(file, options).unwrap();
2021 test_read(reader);
2022 }
2023
2024 #[test]
2025 fn test_disabled_statistics() {
2026 let message_type = "
2027 message test_schema {
2028 REQUIRED INT32 a;
2029 REQUIRED INT32 b;
2030 }
2031 ";
2032 let schema = Arc::new(parse_message_type(message_type).unwrap());
2033 let props = WriterProperties::builder()
2034 .set_statistics_enabled(EnabledStatistics::None)
2035 .set_column_statistics_enabled("a".into(), EnabledStatistics::Page)
2036 .set_offset_index_disabled(true) .build();
2038 let mut file = Vec::with_capacity(1024);
2039 let mut file_writer =
2040 SerializedFileWriter::new(&mut file, schema, Arc::new(props)).unwrap();
2041
2042 let mut row_group_writer = file_writer.next_row_group().unwrap();
2043 let mut a_writer = row_group_writer.next_column().unwrap().unwrap();
2044 let col_writer = a_writer.typed::<Int32Type>();
2045 col_writer.write_batch(&[1, 2, 3], None, None).unwrap();
2046 a_writer.close().unwrap();
2047
2048 let mut b_writer = row_group_writer.next_column().unwrap().unwrap();
2049 let col_writer = b_writer.typed::<Int32Type>();
2050 col_writer.write_batch(&[4, 5, 6], None, None).unwrap();
2051 b_writer.close().unwrap();
2052 row_group_writer.close().unwrap();
2053
2054 let metadata = file_writer.finish().unwrap();
2055 assert_eq!(metadata.num_row_groups(), 1);
2056 let row_group = metadata.row_group(0);
2057 assert_eq!(row_group.num_columns(), 2);
2058 assert!(row_group.column(0).offset_index_offset().is_some());
2060 assert!(row_group.column(0).column_index_offset().is_some());
2061 assert!(row_group.column(1).offset_index_offset().is_some());
2063 assert!(row_group.column(1).column_index_offset().is_none());
2064
2065 let err = file_writer.next_row_group().err().unwrap().to_string();
2066 assert_eq!(err, "Parquet error: SerializedFileWriter already finished");
2067
2068 drop(file_writer);
2069
2070 let options = ReadOptionsBuilder::new().with_page_index().build();
2071 let reader = SerializedFileReader::new_with_options(Bytes::from(file), options).unwrap();
2072
2073 let offset_index = reader.metadata().offset_index().unwrap();
2074 assert_eq!(offset_index.len(), 1); assert_eq!(offset_index[0].len(), 2); let column_index = reader.metadata().column_index().unwrap();
2078 assert_eq!(column_index.len(), 1); assert_eq!(column_index[0].len(), 2); let a_idx = &column_index[0][0];
2082 assert!(matches!(a_idx, ColumnIndexMetaData::INT32(_)), "{a_idx:?}");
2083 let b_idx = &column_index[0][1];
2084 assert!(matches!(b_idx, ColumnIndexMetaData::NONE), "{b_idx:?}");
2085 }
2086
2087 #[test]
2088 fn test_byte_array_size_statistics() {
2089 let message_type = "
2090 message test_schema {
2091 OPTIONAL BYTE_ARRAY a (UTF8);
2092 }
2093 ";
2094 let schema = Arc::new(parse_message_type(message_type).unwrap());
2095 let data = ByteArrayType::gen_vec(32, 7);
2096 let def_levels = [1, 1, 1, 1, 0, 1, 0, 1, 0, 1];
2097 let unenc_size: i64 = data.iter().map(|x| x.len() as i64).sum();
2098 let file: File = tempfile::tempfile().unwrap();
2099 let props = Arc::new(
2100 WriterProperties::builder()
2101 .set_statistics_enabled(EnabledStatistics::Page)
2102 .build(),
2103 );
2104
2105 let mut writer = SerializedFileWriter::new(&file, schema, props).unwrap();
2106 let mut row_group_writer = writer.next_row_group().unwrap();
2107
2108 let mut col_writer = row_group_writer.next_column().unwrap().unwrap();
2109 col_writer
2110 .typed::<ByteArrayType>()
2111 .write_batch(&data, Some(&def_levels), None)
2112 .unwrap();
2113 col_writer.close().unwrap();
2114 row_group_writer.close().unwrap();
2115 let file_metadata = writer.close().unwrap();
2116
2117 assert_eq!(file_metadata.num_row_groups(), 1);
2118 assert_eq!(file_metadata.row_group(0).num_columns(), 1);
2119
2120 let check_def_hist = |def_hist: &[i64]| {
2121 assert_eq!(def_hist.len(), 2);
2122 assert_eq!(def_hist[0], 3);
2123 assert_eq!(def_hist[1], 7);
2124 };
2125
2126 let meta_data = file_metadata.row_group(0).column(0);
2127
2128 assert!(meta_data.repetition_level_histogram().is_none());
2129 assert!(meta_data.definition_level_histogram().is_some());
2130 assert!(meta_data.unencoded_byte_array_data_bytes().is_some());
2131 assert_eq!(
2132 unenc_size,
2133 meta_data.unencoded_byte_array_data_bytes().unwrap()
2134 );
2135 check_def_hist(meta_data.definition_level_histogram().unwrap().values());
2136
2137 let options = ReadOptionsBuilder::new().with_page_index().build();
2139 let reader = SerializedFileReader::new_with_options(file, options).unwrap();
2140
2141 let rfile_metadata = reader.metadata().file_metadata();
2142 assert_eq!(
2143 rfile_metadata.num_rows(),
2144 file_metadata.file_metadata().num_rows()
2145 );
2146 assert_eq!(reader.num_row_groups(), 1);
2147 let rowgroup = reader.get_row_group(0).unwrap();
2148 assert_eq!(rowgroup.num_columns(), 1);
2149 let column = rowgroup.metadata().column(0);
2150 assert!(column.definition_level_histogram().is_some());
2151 assert!(column.repetition_level_histogram().is_none());
2152 assert!(column.unencoded_byte_array_data_bytes().is_some());
2153 check_def_hist(column.definition_level_histogram().unwrap().values());
2154 assert_eq!(
2155 unenc_size,
2156 column.unencoded_byte_array_data_bytes().unwrap()
2157 );
2158
2159 assert!(reader.metadata().column_index().is_some());
2161 let column_index = reader.metadata().column_index().unwrap();
2162 assert_eq!(column_index.len(), 1);
2163 assert_eq!(column_index[0].len(), 1);
2164 let col_idx = if let ColumnIndexMetaData::BYTE_ARRAY(index) = &column_index[0][0] {
2165 assert_eq!(index.num_pages(), 1);
2166 index
2167 } else {
2168 unreachable!()
2169 };
2170
2171 assert!(col_idx.repetition_level_histogram(0).is_none());
2172 assert!(col_idx.definition_level_histogram(0).is_some());
2173 check_def_hist(col_idx.definition_level_histogram(0).unwrap());
2174
2175 assert!(reader.metadata().offset_index().is_some());
2176 let offset_index = reader.metadata().offset_index().unwrap();
2177 assert_eq!(offset_index.len(), 1);
2178 assert_eq!(offset_index[0].len(), 1);
2179 assert!(offset_index[0][0].unencoded_byte_array_data_bytes.is_some());
2180 let page_sizes = offset_index[0][0]
2181 .unencoded_byte_array_data_bytes
2182 .as_ref()
2183 .unwrap();
2184 assert_eq!(page_sizes.len(), 1);
2185 assert_eq!(page_sizes[0], unenc_size);
2186 }
2187
2188 #[test]
2189 fn test_too_many_rowgroups() {
2190 let message_type = "
2191 message test_schema {
2192 REQUIRED BYTE_ARRAY a (UTF8);
2193 }
2194 ";
2195 let schema = Arc::new(parse_message_type(message_type).unwrap());
2196 let file: File = tempfile::tempfile().unwrap();
2197 let props = Arc::new(
2198 WriterProperties::builder()
2199 .set_statistics_enabled(EnabledStatistics::None)
2200 .set_max_row_group_size(1)
2201 .build(),
2202 );
2203 let mut writer = SerializedFileWriter::new(&file, schema, props).unwrap();
2204
2205 for i in 0..0x8001 {
2207 match writer.next_row_group() {
2208 Ok(mut row_group_writer) => {
2209 assert_ne!(i, 0x8000);
2210 let col_writer = row_group_writer.next_column().unwrap().unwrap();
2211 col_writer.close().unwrap();
2212 row_group_writer.close().unwrap();
2213 }
2214 Err(e) => {
2215 assert_eq!(i, 0x8000);
2216 assert_eq!(
2217 e.to_string(),
2218 "Parquet error: Parquet does not support more than 32767 row groups per file (currently: 32768)"
2219 );
2220 }
2221 }
2222 }
2223 writer.close().unwrap();
2224 }
2225
2226 #[test]
2227 fn test_size_statistics_with_repetition_and_nulls() {
2228 let message_type = "
2229 message test_schema {
2230 OPTIONAL group i32_list (LIST) {
2231 REPEATED group list {
2232 OPTIONAL INT32 element;
2233 }
2234 }
2235 }
2236 ";
2237 let schema = Arc::new(parse_message_type(message_type).unwrap());
2244 let data = [1, 2, 4, 7, 8, 9, 10];
2245 let def_levels = [3, 3, 0, 3, 2, 1, 3, 3, 3, 3];
2246 let rep_levels = [0, 1, 0, 0, 1, 0, 0, 1, 1, 1];
2247 let file = tempfile::tempfile().unwrap();
2248 let props = Arc::new(
2249 WriterProperties::builder()
2250 .set_statistics_enabled(EnabledStatistics::Page)
2251 .build(),
2252 );
2253 let mut writer = SerializedFileWriter::new(&file, schema, props).unwrap();
2254 let mut row_group_writer = writer.next_row_group().unwrap();
2255
2256 let mut col_writer = row_group_writer.next_column().unwrap().unwrap();
2257 col_writer
2258 .typed::<Int32Type>()
2259 .write_batch(&data, Some(&def_levels), Some(&rep_levels))
2260 .unwrap();
2261 col_writer.close().unwrap();
2262 row_group_writer.close().unwrap();
2263 let file_metadata = writer.close().unwrap();
2264
2265 assert_eq!(file_metadata.num_row_groups(), 1);
2266 assert_eq!(file_metadata.row_group(0).num_columns(), 1);
2267
2268 let check_def_hist = |def_hist: &[i64]| {
2269 assert_eq!(def_hist.len(), 4);
2270 assert_eq!(def_hist[0], 1);
2271 assert_eq!(def_hist[1], 1);
2272 assert_eq!(def_hist[2], 1);
2273 assert_eq!(def_hist[3], 7);
2274 };
2275
2276 let check_rep_hist = |rep_hist: &[i64]| {
2277 assert_eq!(rep_hist.len(), 2);
2278 assert_eq!(rep_hist[0], 5);
2279 assert_eq!(rep_hist[1], 5);
2280 };
2281
2282 let meta_data = file_metadata.row_group(0).column(0);
2285 assert!(meta_data.repetition_level_histogram().is_some());
2286 assert!(meta_data.definition_level_histogram().is_some());
2287 assert!(meta_data.unencoded_byte_array_data_bytes().is_none());
2288 check_def_hist(meta_data.definition_level_histogram().unwrap().values());
2289 check_rep_hist(meta_data.repetition_level_histogram().unwrap().values());
2290
2291 let options = ReadOptionsBuilder::new().with_page_index().build();
2293 let reader = SerializedFileReader::new_with_options(file, options).unwrap();
2294
2295 let rfile_metadata = reader.metadata().file_metadata();
2296 assert_eq!(
2297 rfile_metadata.num_rows(),
2298 file_metadata.file_metadata().num_rows()
2299 );
2300 assert_eq!(reader.num_row_groups(), 1);
2301 let rowgroup = reader.get_row_group(0).unwrap();
2302 assert_eq!(rowgroup.num_columns(), 1);
2303 let column = rowgroup.metadata().column(0);
2304 assert!(column.definition_level_histogram().is_some());
2305 assert!(column.repetition_level_histogram().is_some());
2306 assert!(column.unencoded_byte_array_data_bytes().is_none());
2307 check_def_hist(column.definition_level_histogram().unwrap().values());
2308 check_rep_hist(column.repetition_level_histogram().unwrap().values());
2309
2310 assert!(reader.metadata().column_index().is_some());
2312 let column_index = reader.metadata().column_index().unwrap();
2313 assert_eq!(column_index.len(), 1);
2314 assert_eq!(column_index[0].len(), 1);
2315 let col_idx = if let ColumnIndexMetaData::INT32(index) = &column_index[0][0] {
2316 assert_eq!(index.num_pages(), 1);
2317 index
2318 } else {
2319 unreachable!()
2320 };
2321
2322 check_def_hist(col_idx.definition_level_histogram(0).unwrap());
2323 check_rep_hist(col_idx.repetition_level_histogram(0).unwrap());
2324
2325 assert!(reader.metadata().offset_index().is_some());
2326 let offset_index = reader.metadata().offset_index().unwrap();
2327 assert_eq!(offset_index.len(), 1);
2328 assert_eq!(offset_index[0].len(), 1);
2329 assert!(offset_index[0][0].unencoded_byte_array_data_bytes.is_none());
2330 }
2331
2332 #[test]
2333 #[cfg(feature = "arrow")]
2334 fn test_byte_stream_split_extended_roundtrip() {
2335 let path = format!(
2336 "{}/byte_stream_split_extended.gzip.parquet",
2337 arrow::util::test_util::parquet_test_data(),
2338 );
2339 let file = File::open(path).unwrap();
2340
2341 let parquet_reader = ParquetRecordBatchReaderBuilder::try_new(file)
2343 .expect("parquet open")
2344 .build()
2345 .expect("parquet open");
2346
2347 let file = tempfile::tempfile().unwrap();
2348 let props = WriterProperties::builder()
2349 .set_dictionary_enabled(false)
2350 .set_column_encoding(
2351 ColumnPath::from("float16_byte_stream_split"),
2352 Encoding::BYTE_STREAM_SPLIT,
2353 )
2354 .set_column_encoding(
2355 ColumnPath::from("float_byte_stream_split"),
2356 Encoding::BYTE_STREAM_SPLIT,
2357 )
2358 .set_column_encoding(
2359 ColumnPath::from("double_byte_stream_split"),
2360 Encoding::BYTE_STREAM_SPLIT,
2361 )
2362 .set_column_encoding(
2363 ColumnPath::from("int32_byte_stream_split"),
2364 Encoding::BYTE_STREAM_SPLIT,
2365 )
2366 .set_column_encoding(
2367 ColumnPath::from("int64_byte_stream_split"),
2368 Encoding::BYTE_STREAM_SPLIT,
2369 )
2370 .set_column_encoding(
2371 ColumnPath::from("flba5_byte_stream_split"),
2372 Encoding::BYTE_STREAM_SPLIT,
2373 )
2374 .set_column_encoding(
2375 ColumnPath::from("decimal_byte_stream_split"),
2376 Encoding::BYTE_STREAM_SPLIT,
2377 )
2378 .build();
2379
2380 let mut parquet_writer = ArrowWriter::try_new(
2381 file.try_clone().expect("cannot open file"),
2382 parquet_reader.schema(),
2383 Some(props),
2384 )
2385 .expect("create arrow writer");
2386
2387 for maybe_batch in parquet_reader {
2388 let batch = maybe_batch.expect("reading batch");
2389 parquet_writer.write(&batch).expect("writing data");
2390 }
2391
2392 parquet_writer.close().expect("finalizing file");
2393
2394 let reader = SerializedFileReader::new(file).expect("Failed to create reader");
2395 let filemeta = reader.metadata();
2396
2397 let check_encoding = |x: usize, filemeta: &ParquetMetaData| {
2399 assert!(
2400 filemeta
2401 .row_group(0)
2402 .column(x)
2403 .encodings()
2404 .collect::<Vec<_>>()
2405 .contains(&Encoding::BYTE_STREAM_SPLIT)
2406 );
2407 };
2408
2409 check_encoding(1, filemeta);
2410 check_encoding(3, filemeta);
2411 check_encoding(5, filemeta);
2412 check_encoding(7, filemeta);
2413 check_encoding(9, filemeta);
2414 check_encoding(11, filemeta);
2415 check_encoding(13, filemeta);
2416
2417 let mut iter = reader
2419 .get_row_iter(None)
2420 .expect("Failed to create row iterator");
2421
2422 let mut start = 0;
2423 let end = reader.metadata().file_metadata().num_rows();
2424
2425 let check_row = |row: Result<Row, ParquetError>| {
2426 assert!(row.is_ok());
2427 let r = row.unwrap();
2428 assert_eq!(r.get_float16(0).unwrap(), r.get_float16(1).unwrap());
2429 assert_eq!(r.get_float(2).unwrap(), r.get_float(3).unwrap());
2430 assert_eq!(r.get_double(4).unwrap(), r.get_double(5).unwrap());
2431 assert_eq!(r.get_int(6).unwrap(), r.get_int(7).unwrap());
2432 assert_eq!(r.get_long(8).unwrap(), r.get_long(9).unwrap());
2433 assert_eq!(r.get_bytes(10).unwrap(), r.get_bytes(11).unwrap());
2434 assert_eq!(r.get_decimal(12).unwrap(), r.get_decimal(13).unwrap());
2435 };
2436
2437 while start < end {
2438 match iter.next() {
2439 Some(row) => check_row(row),
2440 None => break,
2441 };
2442 start += 1;
2443 }
2444 }
2445}