1use crate::column::chunker::ContentDefinedChunker;
21
22use bytes::Bytes;
23use std::io::{Read, Write};
24use std::iter::Peekable;
25use std::slice::Iter;
26use std::sync::{Arc, Mutex};
27use std::vec::IntoIter;
28
29use arrow_array::cast::AsArray;
30use arrow_array::types::*;
31use arrow_array::{ArrayRef, Int32Array, RecordBatch, RecordBatchWriter};
32use arrow_schema::{
33 ArrowError, DataType as ArrowDataType, Field, IntervalUnit, SchemaRef, TimeUnit,
34};
35
36use super::schema::{add_encoded_arrow_schema_to_metadata, decimal_length_from_precision};
37
38use crate::arrow::ArrowSchemaConverter;
39use crate::arrow::arrow_writer::byte_array::ByteArrayEncoder;
40use crate::column::page::{CompressedPage, PageWriteSpec, PageWriter};
41use crate::column::page_encryption::PageEncryptor;
42use crate::column::writer::encoder::ColumnValueEncoder;
43use crate::column::writer::{
44 ColumnCloseResult, ColumnWriter, GenericColumnWriter, get_column_writer,
45};
46use crate::data_type::{ByteArray, FixedLenByteArray};
47#[cfg(feature = "encryption")]
48use crate::encryption::encrypt::FileEncryptor;
49use crate::errors::{ParquetError, Result};
50use crate::file::metadata::{KeyValue, ParquetMetaData, RowGroupMetaData};
51use crate::file::properties::{WriterProperties, WriterPropertiesPtr};
52use crate::file::reader::{ChunkReader, Length};
53use crate::file::writer::{SerializedFileWriter, SerializedRowGroupWriter};
54use crate::parquet_thrift::{ThriftCompactOutputProtocol, WriteThrift};
55use crate::schema::types::{ColumnDescPtr, SchemaDescPtr, SchemaDescriptor};
56use levels::{ArrayLevels, calculate_array_levels};
57
58mod byte_array;
59mod levels;
60
61pub struct ArrowWriter<W: Write> {
178 writer: SerializedFileWriter<W>,
180
181 in_progress: Option<ArrowRowGroupWriter>,
183
184 arrow_schema: SchemaRef,
188
189 row_group_writer_factory: ArrowRowGroupWriterFactory,
191
192 max_row_group_row_count: Option<usize>,
194
195 max_row_group_bytes: Option<usize>,
197
198 cdc_chunkers: Option<Vec<ContentDefinedChunker>>,
200}
201
202impl<W: Write + Send> std::fmt::Debug for ArrowWriter<W> {
203 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
204 let buffered_memory = self.in_progress_size();
205 f.debug_struct("ArrowWriter")
206 .field("writer", &self.writer)
207 .field("in_progress_size", &format_args!("{buffered_memory} bytes"))
208 .field("in_progress_rows", &self.in_progress_rows())
209 .field("arrow_schema", &self.arrow_schema)
210 .field("max_row_group_row_count", &self.max_row_group_row_count)
211 .field("max_row_group_bytes", &self.max_row_group_bytes)
212 .finish()
213 }
214}
215
216impl<W: Write + Send> ArrowWriter<W> {
217 pub fn try_new(
223 writer: W,
224 arrow_schema: SchemaRef,
225 props: Option<WriterProperties>,
226 ) -> Result<Self> {
227 let options = ArrowWriterOptions::new().with_properties(props.unwrap_or_default());
228 Self::try_new_with_options(writer, arrow_schema, options)
229 }
230
231 pub fn try_new_with_options(
237 writer: W,
238 arrow_schema: SchemaRef,
239 options: ArrowWriterOptions,
240 ) -> Result<Self> {
241 let mut props = options.properties;
242
243 let schema = if let Some(parquet_schema) = options.schema_descr {
244 parquet_schema.clone()
245 } else {
246 let mut converter = ArrowSchemaConverter::new().with_coerce_types(props.coerce_types());
247 if let Some(schema_root) = &options.schema_root {
248 converter = converter.schema_root(schema_root);
249 }
250
251 converter.convert(&arrow_schema)?
252 };
253
254 if !options.skip_arrow_metadata {
255 add_encoded_arrow_schema_to_metadata(&arrow_schema, &mut props);
257 }
258
259 let max_row_group_row_count = props.max_row_group_row_count();
260 let max_row_group_bytes = props.max_row_group_bytes();
261
262 let props_ptr = Arc::new(props);
263 let file_writer =
264 SerializedFileWriter::new(writer, schema.root_schema_ptr(), Arc::clone(&props_ptr))?;
265
266 let row_group_writer_factory =
267 ArrowRowGroupWriterFactory::new(&file_writer, arrow_schema.clone());
268
269 let cdc_chunkers = props_ptr
270 .content_defined_chunking()
271 .map(|opts| {
272 file_writer
273 .schema_descr()
274 .columns()
275 .iter()
276 .map(|desc| ContentDefinedChunker::new(desc, opts))
277 .collect::<Result<Vec<_>>>()
278 })
279 .transpose()?;
280
281 Ok(Self {
282 writer: file_writer,
283 in_progress: None,
284 arrow_schema,
285 row_group_writer_factory,
286 max_row_group_row_count,
287 max_row_group_bytes,
288 cdc_chunkers,
289 })
290 }
291
292 pub fn flushed_row_groups(&self) -> &[RowGroupMetaData] {
294 self.writer.flushed_row_groups()
295 }
296
297 pub fn memory_size(&self) -> usize {
302 match &self.in_progress {
303 Some(in_progress) => in_progress.writers.iter().map(|x| x.memory_size()).sum(),
304 None => 0,
305 }
306 }
307
308 pub fn in_progress_size(&self) -> usize {
315 match &self.in_progress {
316 Some(in_progress) => in_progress
317 .writers
318 .iter()
319 .map(|x| x.get_estimated_total_bytes())
320 .sum(),
321 None => 0,
322 }
323 }
324
325 pub fn in_progress_rows(&self) -> usize {
327 self.in_progress
328 .as_ref()
329 .map(|x| x.buffered_rows)
330 .unwrap_or_default()
331 }
332
333 pub fn bytes_written(&self) -> usize {
335 self.writer.bytes_written()
336 }
337
338 pub fn write(&mut self, batch: &RecordBatch) -> Result<()> {
350 if batch.num_rows() == 0 {
351 return Ok(());
352 }
353
354 let in_progress = match &mut self.in_progress {
355 Some(in_progress) => in_progress,
356 x => x.insert(
357 self.row_group_writer_factory
358 .create_row_group_writer(self.writer.flushed_row_groups().len())?,
359 ),
360 };
361
362 if let Some(max_rows) = self.max_row_group_row_count {
363 if in_progress.buffered_rows + batch.num_rows() > max_rows {
364 let to_write = max_rows - in_progress.buffered_rows;
365 let a = batch.slice(0, to_write);
366 let b = batch.slice(to_write, batch.num_rows() - to_write);
367 self.write(&a)?;
368 return self.write(&b);
369 }
370 }
371
372 if let Some(max_bytes) = self.max_row_group_bytes {
375 if in_progress.buffered_rows > 0 {
376 let current_bytes = in_progress.get_estimated_total_bytes();
377
378 if current_bytes >= max_bytes {
379 self.flush()?;
380 return self.write(batch);
381 }
382
383 let avg_row_bytes = current_bytes / in_progress.buffered_rows;
384 if avg_row_bytes > 0 {
385 let remaining_bytes = max_bytes - current_bytes;
387 let rows_that_fit = remaining_bytes / avg_row_bytes;
388
389 if batch.num_rows() > rows_that_fit {
390 if rows_that_fit > 0 {
391 let a = batch.slice(0, rows_that_fit);
392 let b = batch.slice(rows_that_fit, batch.num_rows() - rows_that_fit);
393 self.write(&a)?;
394 return self.write(&b);
395 } else {
396 self.flush()?;
397 return self.write(batch);
398 }
399 }
400 }
401 }
402 }
403
404 match self.cdc_chunkers.as_mut() {
405 Some(chunkers) => in_progress.write_with_chunkers(batch, chunkers)?,
406 None => in_progress.write(batch)?,
407 }
408
409 let should_flush = self
410 .max_row_group_row_count
411 .is_some_and(|max| in_progress.buffered_rows >= max)
412 || self
413 .max_row_group_bytes
414 .is_some_and(|max| in_progress.get_estimated_total_bytes() >= max);
415
416 if should_flush {
417 self.flush()?
418 }
419 Ok(())
420 }
421
422 pub fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
427 self.writer.write_all(buf)
428 }
429
430 pub fn sync(&mut self) -> std::io::Result<()> {
432 self.writer.flush()
433 }
434
435 pub fn flush(&mut self) -> Result<()> {
440 let in_progress = match self.in_progress.take() {
441 Some(in_progress) => in_progress,
442 None => return Ok(()),
443 };
444
445 let mut row_group_writer = self.writer.next_row_group()?;
446 for chunk in in_progress.close()? {
447 chunk.append_to_row_group(&mut row_group_writer)?;
448 }
449 row_group_writer.close()?;
450 Ok(())
451 }
452
453 pub fn append_key_value_metadata(&mut self, kv_metadata: KeyValue) {
457 self.writer.append_key_value_metadata(kv_metadata)
458 }
459
460 pub fn inner(&self) -> &W {
462 self.writer.inner()
463 }
464
465 pub fn inner_mut(&mut self) -> &mut W {
474 self.writer.inner_mut()
475 }
476
477 pub fn into_inner(mut self) -> Result<W> {
479 self.flush()?;
480 self.writer.into_inner()
481 }
482
483 pub fn finish(&mut self) -> Result<ParquetMetaData> {
489 self.flush()?;
490 self.writer.finish()
491 }
492
493 pub fn close(mut self) -> Result<ParquetMetaData> {
495 self.finish()
496 }
497
498 #[deprecated(
500 since = "56.2.0",
501 note = "Use `ArrowRowGroupWriterFactory` instead, see `ArrowColumnWriter` for an example"
502 )]
503 pub fn get_column_writers(&mut self) -> Result<Vec<ArrowColumnWriter>> {
504 self.flush()?;
505 let in_progress = self
506 .row_group_writer_factory
507 .create_row_group_writer(self.writer.flushed_row_groups().len())?;
508 Ok(in_progress.writers)
509 }
510
511 #[deprecated(
513 since = "56.2.0",
514 note = "Use `SerializedFileWriter` directly instead, see `ArrowColumnWriter` for an example"
515 )]
516 pub fn append_row_group(&mut self, chunks: Vec<ArrowColumnChunk>) -> Result<()> {
517 let mut row_group_writer = self.writer.next_row_group()?;
518 for chunk in chunks {
519 chunk.append_to_row_group(&mut row_group_writer)?;
520 }
521 row_group_writer.close()?;
522 Ok(())
523 }
524
525 pub fn into_serialized_writer(
532 mut self,
533 ) -> Result<(SerializedFileWriter<W>, ArrowRowGroupWriterFactory)> {
534 self.flush()?;
535 Ok((self.writer, self.row_group_writer_factory))
536 }
537}
538
539impl<W: Write + Send> RecordBatchWriter for ArrowWriter<W> {
540 fn write(&mut self, batch: &RecordBatch) -> Result<(), ArrowError> {
541 self.write(batch).map_err(|e| e.into())
542 }
543
544 fn close(self) -> std::result::Result<(), ArrowError> {
545 self.close()?;
546 Ok(())
547 }
548}
549
550#[derive(Debug, Clone, Default)]
554pub struct ArrowWriterOptions {
555 properties: WriterProperties,
556 skip_arrow_metadata: bool,
557 schema_root: Option<String>,
558 schema_descr: Option<SchemaDescriptor>,
559}
560
561impl ArrowWriterOptions {
562 pub fn new() -> Self {
564 Self::default()
565 }
566
567 pub fn with_properties(self, properties: WriterProperties) -> Self {
569 Self { properties, ..self }
570 }
571
572 pub fn with_skip_arrow_metadata(self, skip_arrow_metadata: bool) -> Self {
579 Self {
580 skip_arrow_metadata,
581 ..self
582 }
583 }
584
585 pub fn with_schema_root(self, schema_root: String) -> Self {
587 Self {
588 schema_root: Some(schema_root),
589 ..self
590 }
591 }
592
593 pub fn with_parquet_schema(self, schema_descr: SchemaDescriptor) -> Self {
599 Self {
600 schema_descr: Some(schema_descr),
601 ..self
602 }
603 }
604}
605
606#[derive(Default)]
608struct ArrowColumnChunkData {
609 length: usize,
610 data: Vec<Bytes>,
611}
612
613impl Length for ArrowColumnChunkData {
614 fn len(&self) -> u64 {
615 self.length as _
616 }
617}
618
619impl ChunkReader for ArrowColumnChunkData {
620 type T = ArrowColumnChunkReader;
621
622 fn get_read(&self, start: u64) -> Result<Self::T> {
623 assert_eq!(start, 0); Ok(ArrowColumnChunkReader(
625 self.data.clone().into_iter().peekable(),
626 ))
627 }
628
629 fn get_bytes(&self, _start: u64, _length: usize) -> Result<Bytes> {
630 unimplemented!()
631 }
632}
633
634struct ArrowColumnChunkReader(Peekable<IntoIter<Bytes>>);
636
637impl Read for ArrowColumnChunkReader {
638 fn read(&mut self, out: &mut [u8]) -> std::io::Result<usize> {
639 let buffer = loop {
640 match self.0.peek_mut() {
641 Some(b) if b.is_empty() => {
642 self.0.next();
643 continue;
644 }
645 Some(b) => break b,
646 None => return Ok(0),
647 }
648 };
649
650 let len = buffer.len().min(out.len());
651 let b = buffer.split_to(len);
652 out[..len].copy_from_slice(&b);
653 Ok(len)
654 }
655}
656
657type SharedColumnChunk = Arc<Mutex<ArrowColumnChunkData>>;
662
663#[derive(Default)]
664struct ArrowPageWriter {
665 buffer: SharedColumnChunk,
666 #[cfg(feature = "encryption")]
667 page_encryptor: Option<PageEncryptor>,
668}
669
670impl ArrowPageWriter {
671 #[cfg(feature = "encryption")]
672 pub fn with_encryptor(mut self, page_encryptor: Option<PageEncryptor>) -> Self {
673 self.page_encryptor = page_encryptor;
674 self
675 }
676
677 #[cfg(feature = "encryption")]
678 fn page_encryptor_mut(&mut self) -> Option<&mut PageEncryptor> {
679 self.page_encryptor.as_mut()
680 }
681
682 #[cfg(not(feature = "encryption"))]
683 fn page_encryptor_mut(&mut self) -> Option<&mut PageEncryptor> {
684 None
685 }
686}
687
688impl PageWriter for ArrowPageWriter {
689 fn write_page(&mut self, page: CompressedPage) -> Result<PageWriteSpec> {
690 let page = match self.page_encryptor_mut() {
691 Some(page_encryptor) => page_encryptor.encrypt_compressed_page(page)?,
692 None => page,
693 };
694
695 let page_header = page.to_thrift_header()?;
696 let header = {
697 let mut header = Vec::with_capacity(1024);
698
699 match self.page_encryptor_mut() {
700 Some(page_encryptor) => {
701 page_encryptor.encrypt_page_header(&page_header, &mut header)?;
702 if page.compressed_page().is_data_page() {
703 page_encryptor.increment_page();
704 }
705 }
706 None => {
707 let mut protocol = ThriftCompactOutputProtocol::new(&mut header);
708 page_header.write_thrift(&mut protocol)?;
709 }
710 };
711
712 Bytes::from(header)
713 };
714
715 let mut buf = self.buffer.try_lock().unwrap();
716
717 let data = page.compressed_page().buffer().clone();
718 let compressed_size = data.len() + header.len();
719
720 let mut spec = PageWriteSpec::new();
721 spec.page_type = page.page_type();
722 spec.num_values = page.num_values();
723 spec.uncompressed_size = page.uncompressed_size() + header.len();
724 spec.offset = buf.length as u64;
725 spec.compressed_size = compressed_size;
726 spec.bytes_written = compressed_size as u64;
727
728 buf.length += compressed_size;
729 buf.data.push(header);
730 buf.data.push(data);
731
732 Ok(spec)
733 }
734
735 fn close(&mut self) -> Result<()> {
736 Ok(())
737 }
738}
739
740#[derive(Debug)]
742pub struct ArrowLeafColumn(ArrayLevels);
743
744pub fn compute_leaves(field: &Field, array: &ArrayRef) -> Result<Vec<ArrowLeafColumn>> {
749 let levels = calculate_array_levels(array, field)?;
750 Ok(levels.into_iter().map(ArrowLeafColumn).collect())
751}
752
753pub struct ArrowColumnChunk {
755 data: ArrowColumnChunkData,
756 close: ColumnCloseResult,
757}
758
759impl std::fmt::Debug for ArrowColumnChunk {
760 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
761 f.debug_struct("ArrowColumnChunk")
762 .field("length", &self.data.length)
763 .finish_non_exhaustive()
764 }
765}
766
767impl ArrowColumnChunk {
768 pub fn close(&self) -> &ColumnCloseResult {
775 &self.close
776 }
777
778 pub fn close_mut(&mut self) -> &mut ColumnCloseResult {
785 &mut self.close
786 }
787
788 pub fn append_to_row_group<W: Write + Send>(
790 self,
791 writer: &mut SerializedRowGroupWriter<'_, W>,
792 ) -> Result<()> {
793 writer.append_column(&self.data, self.close)
794 }
795}
796
797pub struct ArrowColumnWriter {
895 writer: ArrowColumnWriterImpl,
896 chunk: SharedColumnChunk,
897}
898
899impl std::fmt::Debug for ArrowColumnWriter {
900 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
901 f.debug_struct("ArrowColumnWriter").finish_non_exhaustive()
902 }
903}
904
905enum ArrowColumnWriterImpl {
906 ByteArray(GenericColumnWriter<'static, ByteArrayEncoder>),
907 Column(ColumnWriter<'static>),
908}
909
910impl ArrowColumnWriter {
911 pub fn write(&mut self, col: &ArrowLeafColumn) -> Result<()> {
913 self.write_internal(&col.0)
914 }
915
916 fn write_with_chunker(
918 &mut self,
919 col: &ArrowLeafColumn,
920 chunker: &mut ContentDefinedChunker,
921 ) -> Result<()> {
922 let levels = &col.0;
923 let chunks = chunker.get_arrow_chunks(
924 levels.def_level_data().as_ref(),
925 levels.rep_level_data().as_ref(),
926 levels.array(),
927 )?;
928
929 let num_chunks = chunks.len();
930 for (i, chunk) in chunks.iter().enumerate() {
931 let chunk_levels = levels.slice_for_chunk(chunk);
932 self.write_internal(&chunk_levels)?;
933
934 if i + 1 < num_chunks {
936 match &mut self.writer {
937 ArrowColumnWriterImpl::Column(c) => c.add_data_page()?,
938 ArrowColumnWriterImpl::ByteArray(c) => c.add_data_page()?,
939 }
940 }
941 }
942 Ok(())
943 }
944
945 fn write_internal(&mut self, levels: &ArrayLevels) -> Result<()> {
946 match &mut self.writer {
947 ArrowColumnWriterImpl::Column(c) => {
948 let leaf = levels.array();
949 match leaf.as_any_dictionary_opt() {
950 Some(dictionary) => {
951 let materialized =
952 arrow_select::take::take(dictionary.values(), dictionary.keys(), None)?;
953 write_leaf(c, &materialized, levels)?
954 }
955 None => write_leaf(c, leaf, levels)?,
956 };
957 }
958 ArrowColumnWriterImpl::ByteArray(c) => {
959 write_primitive(c, levels.array().as_ref(), levels)?;
960 }
961 }
962 Ok(())
963 }
964
965 pub fn close(self) -> Result<ArrowColumnChunk> {
967 let close = match self.writer {
968 ArrowColumnWriterImpl::ByteArray(c) => c.close()?,
969 ArrowColumnWriterImpl::Column(c) => c.close()?,
970 };
971 let chunk = Arc::try_unwrap(self.chunk).ok().unwrap();
972 let data = chunk.into_inner().unwrap();
973 Ok(ArrowColumnChunk { data, close })
974 }
975
976 pub fn memory_size(&self) -> usize {
987 match &self.writer {
988 ArrowColumnWriterImpl::ByteArray(c) => c.memory_size(),
989 ArrowColumnWriterImpl::Column(c) => c.memory_size(),
990 }
991 }
992
993 pub fn get_estimated_total_bytes(&self) -> usize {
1001 match &self.writer {
1002 ArrowColumnWriterImpl::ByteArray(c) => c.get_estimated_total_bytes() as _,
1003 ArrowColumnWriterImpl::Column(c) => c.get_estimated_total_bytes() as _,
1004 }
1005 }
1006}
1007
1008#[derive(Debug)]
1015struct ArrowRowGroupWriter {
1016 writers: Vec<ArrowColumnWriter>,
1017 schema: SchemaRef,
1018 buffered_rows: usize,
1019}
1020
1021impl ArrowRowGroupWriter {
1022 fn new(writers: Vec<ArrowColumnWriter>, arrow: &SchemaRef) -> Self {
1023 Self {
1024 writers,
1025 schema: arrow.clone(),
1026 buffered_rows: 0,
1027 }
1028 }
1029
1030 fn write(&mut self, batch: &RecordBatch) -> Result<()> {
1031 self.buffered_rows += batch.num_rows();
1032 let mut writers = self.writers.iter_mut();
1033 for (field, column) in self.schema.fields().iter().zip(batch.columns()) {
1034 for leaf in compute_leaves(field.as_ref(), column)? {
1035 writers.next().unwrap().write(&leaf)?;
1036 }
1037 }
1038 Ok(())
1039 }
1040
1041 fn write_with_chunkers(
1042 &mut self,
1043 batch: &RecordBatch,
1044 chunkers: &mut [ContentDefinedChunker],
1045 ) -> Result<()> {
1046 self.buffered_rows += batch.num_rows();
1047 let mut writers = self.writers.iter_mut();
1048 let mut chunkers = chunkers.iter_mut();
1049 for (field, column) in self.schema.fields().iter().zip(batch.columns()) {
1050 for leaf in compute_leaves(field.as_ref(), column)? {
1051 writers
1052 .next()
1053 .unwrap()
1054 .write_with_chunker(&leaf, chunkers.next().unwrap())?;
1055 }
1056 }
1057 Ok(())
1058 }
1059
1060 fn get_estimated_total_bytes(&self) -> usize {
1062 self.writers
1063 .iter()
1064 .map(|x| x.get_estimated_total_bytes())
1065 .sum()
1066 }
1067
1068 fn close(self) -> Result<Vec<ArrowColumnChunk>> {
1069 self.writers
1070 .into_iter()
1071 .map(|writer| writer.close())
1072 .collect()
1073 }
1074}
1075
1076#[derive(Debug)]
1081pub struct ArrowRowGroupWriterFactory {
1082 schema: SchemaDescPtr,
1083 arrow_schema: SchemaRef,
1084 props: WriterPropertiesPtr,
1085 #[cfg(feature = "encryption")]
1086 file_encryptor: Option<Arc<FileEncryptor>>,
1087}
1088
1089impl ArrowRowGroupWriterFactory {
1090 pub fn new<W: Write + Send>(
1092 file_writer: &SerializedFileWriter<W>,
1093 arrow_schema: SchemaRef,
1094 ) -> Self {
1095 let schema = Arc::clone(file_writer.schema_descr_ptr());
1096 let props = Arc::clone(file_writer.properties());
1097 Self {
1098 schema,
1099 arrow_schema,
1100 props,
1101 #[cfg(feature = "encryption")]
1102 file_encryptor: file_writer.file_encryptor(),
1103 }
1104 }
1105
1106 fn create_row_group_writer(&self, row_group_index: usize) -> Result<ArrowRowGroupWriter> {
1107 let writers = self.create_column_writers(row_group_index)?;
1108 Ok(ArrowRowGroupWriter::new(writers, &self.arrow_schema))
1109 }
1110
1111 pub fn create_column_writers(&self, row_group_index: usize) -> Result<Vec<ArrowColumnWriter>> {
1113 let mut writers = Vec::with_capacity(self.arrow_schema.fields.len());
1114 let mut leaves = self.schema.columns().iter();
1115 let column_factory = self.column_writer_factory(row_group_index);
1116 for field in &self.arrow_schema.fields {
1117 column_factory.get_arrow_column_writer(
1118 field.data_type(),
1119 &self.props,
1120 &mut leaves,
1121 &mut writers,
1122 )?;
1123 }
1124 Ok(writers)
1125 }
1126
1127 #[cfg(feature = "encryption")]
1128 fn column_writer_factory(&self, row_group_idx: usize) -> ArrowColumnWriterFactory {
1129 ArrowColumnWriterFactory::new()
1130 .with_file_encryptor(row_group_idx, self.file_encryptor.clone())
1131 }
1132
1133 #[cfg(not(feature = "encryption"))]
1134 fn column_writer_factory(&self, _row_group_idx: usize) -> ArrowColumnWriterFactory {
1135 ArrowColumnWriterFactory::new()
1136 }
1137}
1138
1139#[deprecated(since = "57.0.0", note = "Use `ArrowRowGroupWriterFactory` instead")]
1141pub fn get_column_writers(
1142 parquet: &SchemaDescriptor,
1143 props: &WriterPropertiesPtr,
1144 arrow: &SchemaRef,
1145) -> Result<Vec<ArrowColumnWriter>> {
1146 let mut writers = Vec::with_capacity(arrow.fields.len());
1147 let mut leaves = parquet.columns().iter();
1148 let column_factory = ArrowColumnWriterFactory::new();
1149 for field in &arrow.fields {
1150 column_factory.get_arrow_column_writer(
1151 field.data_type(),
1152 props,
1153 &mut leaves,
1154 &mut writers,
1155 )?;
1156 }
1157 Ok(writers)
1158}
1159
1160struct ArrowColumnWriterFactory {
1162 #[cfg(feature = "encryption")]
1163 row_group_index: usize,
1164 #[cfg(feature = "encryption")]
1165 file_encryptor: Option<Arc<FileEncryptor>>,
1166}
1167
1168impl ArrowColumnWriterFactory {
1169 pub fn new() -> Self {
1170 Self {
1171 #[cfg(feature = "encryption")]
1172 row_group_index: 0,
1173 #[cfg(feature = "encryption")]
1174 file_encryptor: None,
1175 }
1176 }
1177
1178 #[cfg(feature = "encryption")]
1179 pub fn with_file_encryptor(
1180 mut self,
1181 row_group_index: usize,
1182 file_encryptor: Option<Arc<FileEncryptor>>,
1183 ) -> Self {
1184 self.row_group_index = row_group_index;
1185 self.file_encryptor = file_encryptor;
1186 self
1187 }
1188
1189 #[cfg(feature = "encryption")]
1190 fn create_page_writer(
1191 &self,
1192 column_descriptor: &ColumnDescPtr,
1193 column_index: usize,
1194 ) -> Result<Box<ArrowPageWriter>> {
1195 let column_path = column_descriptor.path().string();
1196 let page_encryptor = PageEncryptor::create_if_column_encrypted(
1197 &self.file_encryptor,
1198 self.row_group_index,
1199 column_index,
1200 &column_path,
1201 )?;
1202 Ok(Box::new(
1203 ArrowPageWriter::default().with_encryptor(page_encryptor),
1204 ))
1205 }
1206
1207 #[cfg(not(feature = "encryption"))]
1208 fn create_page_writer(
1209 &self,
1210 _column_descriptor: &ColumnDescPtr,
1211 _column_index: usize,
1212 ) -> Result<Box<ArrowPageWriter>> {
1213 Ok(Box::<ArrowPageWriter>::default())
1214 }
1215
1216 fn get_arrow_column_writer(
1219 &self,
1220 data_type: &ArrowDataType,
1221 props: &WriterPropertiesPtr,
1222 leaves: &mut Iter<'_, ColumnDescPtr>,
1223 out: &mut Vec<ArrowColumnWriter>,
1224 ) -> Result<()> {
1225 let col = |desc: &ColumnDescPtr| -> Result<ArrowColumnWriter> {
1227 let page_writer = self.create_page_writer(desc, out.len())?;
1228 let chunk = page_writer.buffer.clone();
1229 let writer = get_column_writer(desc.clone(), props.clone(), page_writer);
1230 Ok(ArrowColumnWriter {
1231 chunk,
1232 writer: ArrowColumnWriterImpl::Column(writer),
1233 })
1234 };
1235
1236 let bytes = |desc: &ColumnDescPtr| -> Result<ArrowColumnWriter> {
1238 let page_writer = self.create_page_writer(desc, out.len())?;
1239 let chunk = page_writer.buffer.clone();
1240 let writer = GenericColumnWriter::new(desc.clone(), props.clone(), page_writer);
1241 Ok(ArrowColumnWriter {
1242 chunk,
1243 writer: ArrowColumnWriterImpl::ByteArray(writer),
1244 })
1245 };
1246
1247 match data_type {
1248 _ if data_type.is_primitive() => out.push(col(leaves.next().unwrap())?),
1249 ArrowDataType::FixedSizeBinary(_) | ArrowDataType::Boolean | ArrowDataType::Null => {
1250 out.push(col(leaves.next().unwrap())?)
1251 }
1252 ArrowDataType::LargeBinary
1253 | ArrowDataType::Binary
1254 | ArrowDataType::Utf8
1255 | ArrowDataType::LargeUtf8
1256 | ArrowDataType::BinaryView
1257 | ArrowDataType::Utf8View => out.push(bytes(leaves.next().unwrap())?),
1258 ArrowDataType::List(f)
1259 | ArrowDataType::LargeList(f)
1260 | ArrowDataType::FixedSizeList(f, _)
1261 | ArrowDataType::ListView(f)
1262 | ArrowDataType::LargeListView(f) => {
1263 self.get_arrow_column_writer(f.data_type(), props, leaves, out)?
1264 }
1265 ArrowDataType::Struct(fields) => {
1266 for field in fields {
1267 self.get_arrow_column_writer(field.data_type(), props, leaves, out)?
1268 }
1269 }
1270 ArrowDataType::Map(f, _) => match f.data_type() {
1271 ArrowDataType::Struct(f) => {
1272 self.get_arrow_column_writer(f[0].data_type(), props, leaves, out)?;
1273 self.get_arrow_column_writer(f[1].data_type(), props, leaves, out)?
1274 }
1275 _ => unreachable!("invalid map type"),
1276 },
1277 ArrowDataType::Dictionary(_, value_type) => match value_type.as_ref() {
1278 ArrowDataType::Utf8
1279 | ArrowDataType::LargeUtf8
1280 | ArrowDataType::Binary
1281 | ArrowDataType::LargeBinary => out.push(bytes(leaves.next().unwrap())?),
1282 ArrowDataType::Utf8View | ArrowDataType::BinaryView => {
1283 out.push(bytes(leaves.next().unwrap())?)
1284 }
1285 ArrowDataType::FixedSizeBinary(_) => out.push(bytes(leaves.next().unwrap())?),
1286 _ => out.push(col(leaves.next().unwrap())?),
1287 },
1288 _ => {
1289 return Err(ParquetError::NYI(format!(
1290 "Attempting to write an Arrow type {data_type} to parquet that is not yet implemented"
1291 )));
1292 }
1293 }
1294 Ok(())
1295 }
1296}
1297
1298fn write_leaf(
1299 writer: &mut ColumnWriter<'_>,
1300 column: &dyn arrow_array::Array,
1301 levels: &ArrayLevels,
1302) -> Result<usize> {
1303 let indices = levels.non_null_indices();
1304
1305 match writer {
1306 ColumnWriter::Int32ColumnWriter(typed) => {
1308 match column.data_type() {
1309 ArrowDataType::Null => {
1310 let array = Int32Array::new_null(column.len());
1311 write_primitive(typed, array.values(), levels)
1312 }
1313 ArrowDataType::Int8 => {
1314 let array: Int32Array = column.as_primitive::<Int8Type>().unary(|x| x as i32);
1315 write_primitive(typed, array.values(), levels)
1316 }
1317 ArrowDataType::Int16 => {
1318 let array: Int32Array = column.as_primitive::<Int16Type>().unary(|x| x as i32);
1319 write_primitive(typed, array.values(), levels)
1320 }
1321 ArrowDataType::Int32 => {
1322 write_primitive(typed, column.as_primitive::<Int32Type>().values(), levels)
1323 }
1324 ArrowDataType::UInt8 => {
1325 let array: Int32Array = column.as_primitive::<UInt8Type>().unary(|x| x as i32);
1326 write_primitive(typed, array.values(), levels)
1327 }
1328 ArrowDataType::UInt16 => {
1329 let array: Int32Array = column.as_primitive::<UInt16Type>().unary(|x| x as i32);
1330 write_primitive(typed, array.values(), levels)
1331 }
1332 ArrowDataType::UInt32 => {
1333 let array = column.as_primitive::<UInt32Type>();
1336 write_primitive(typed, array.values().inner().typed_data(), levels)
1337 }
1338 ArrowDataType::Date32 => {
1339 let array = column.as_primitive::<Date32Type>();
1340 write_primitive(typed, array.values(), levels)
1341 }
1342 ArrowDataType::Time32(TimeUnit::Second) => {
1343 let array = column.as_primitive::<Time32SecondType>();
1344 write_primitive(typed, array.values(), levels)
1345 }
1346 ArrowDataType::Time32(TimeUnit::Millisecond) => {
1347 let array = column.as_primitive::<Time32MillisecondType>();
1348 write_primitive(typed, array.values(), levels)
1349 }
1350 ArrowDataType::Date64 => {
1351 let array: Int32Array = column
1353 .as_primitive::<Date64Type>()
1354 .unary(|x| (x / 86_400_000) as _);
1355
1356 write_primitive(typed, array.values(), levels)
1357 }
1358 ArrowDataType::Decimal32(_, _) => {
1359 let array = column
1360 .as_primitive::<Decimal32Type>()
1361 .unary::<_, Int32Type>(|v| v);
1362 write_primitive(typed, array.values(), levels)
1363 }
1364 ArrowDataType::Decimal64(_, _) => {
1365 let array = column
1367 .as_primitive::<Decimal64Type>()
1368 .unary::<_, Int32Type>(|v| v as i32);
1369 write_primitive(typed, array.values(), levels)
1370 }
1371 ArrowDataType::Decimal128(_, _) => {
1372 let array = column
1374 .as_primitive::<Decimal128Type>()
1375 .unary::<_, Int32Type>(|v| v as i32);
1376 write_primitive(typed, array.values(), levels)
1377 }
1378 ArrowDataType::Decimal256(_, _) => {
1379 let array = column
1381 .as_primitive::<Decimal256Type>()
1382 .unary::<_, Int32Type>(|v| v.as_i128() as i32);
1383 write_primitive(typed, array.values(), levels)
1384 }
1385 d => Err(ParquetError::General(format!("Cannot coerce {d} to I32"))),
1386 }
1387 }
1388 ColumnWriter::BoolColumnWriter(typed) => {
1389 let array = column.as_boolean();
1390 let values = get_bool_array_slice(array, indices);
1391 typed.write_batch_internal(
1392 values.as_slice(),
1393 None,
1394 levels.def_level_data().as_ref(),
1395 levels.rep_level_data().as_ref(),
1396 None,
1397 None,
1398 None,
1399 )
1400 }
1401 ColumnWriter::Int64ColumnWriter(typed) => {
1402 match column.data_type() {
1403 ArrowDataType::Date64 => {
1404 let array = column
1405 .as_primitive::<Date64Type>()
1406 .reinterpret_cast::<Int64Type>();
1407
1408 write_primitive(typed, array.values(), levels)
1409 }
1410 ArrowDataType::Int64 => {
1411 let array = column.as_primitive::<Int64Type>();
1412 write_primitive(typed, array.values(), levels)
1413 }
1414 ArrowDataType::UInt64 => {
1415 let values = column.as_primitive::<UInt64Type>().values();
1416 let array = values.inner().typed_data::<i64>();
1419 write_primitive(typed, array, levels)
1420 }
1421 ArrowDataType::Time64(TimeUnit::Microsecond) => {
1422 let array = column.as_primitive::<Time64MicrosecondType>();
1423 write_primitive(typed, array.values(), levels)
1424 }
1425 ArrowDataType::Time64(TimeUnit::Nanosecond) => {
1426 let array = column.as_primitive::<Time64NanosecondType>();
1427 write_primitive(typed, array.values(), levels)
1428 }
1429 ArrowDataType::Timestamp(unit, _) => match unit {
1430 TimeUnit::Second => {
1431 let array = column.as_primitive::<TimestampSecondType>();
1432 write_primitive(typed, array.values(), levels)
1433 }
1434 TimeUnit::Millisecond => {
1435 let array = column.as_primitive::<TimestampMillisecondType>();
1436 write_primitive(typed, array.values(), levels)
1437 }
1438 TimeUnit::Microsecond => {
1439 let array = column.as_primitive::<TimestampMicrosecondType>();
1440 write_primitive(typed, array.values(), levels)
1441 }
1442 TimeUnit::Nanosecond => {
1443 let array = column.as_primitive::<TimestampNanosecondType>();
1444 write_primitive(typed, array.values(), levels)
1445 }
1446 },
1447 ArrowDataType::Duration(unit) => match unit {
1448 TimeUnit::Second => {
1449 let array = column.as_primitive::<DurationSecondType>();
1450 write_primitive(typed, array.values(), levels)
1451 }
1452 TimeUnit::Millisecond => {
1453 let array = column.as_primitive::<DurationMillisecondType>();
1454 write_primitive(typed, array.values(), levels)
1455 }
1456 TimeUnit::Microsecond => {
1457 let array = column.as_primitive::<DurationMicrosecondType>();
1458 write_primitive(typed, array.values(), levels)
1459 }
1460 TimeUnit::Nanosecond => {
1461 let array = column.as_primitive::<DurationNanosecondType>();
1462 write_primitive(typed, array.values(), levels)
1463 }
1464 },
1465 ArrowDataType::Decimal64(_, _) => {
1466 let array = column
1467 .as_primitive::<Decimal64Type>()
1468 .reinterpret_cast::<Int64Type>();
1469 write_primitive(typed, array.values(), levels)
1470 }
1471 ArrowDataType::Decimal128(_, _) => {
1472 let array = column
1474 .as_primitive::<Decimal128Type>()
1475 .unary::<_, Int64Type>(|v| v as i64);
1476 write_primitive(typed, array.values(), levels)
1477 }
1478 ArrowDataType::Decimal256(_, _) => {
1479 let array = column
1481 .as_primitive::<Decimal256Type>()
1482 .unary::<_, Int64Type>(|v| v.as_i128() as i64);
1483 write_primitive(typed, array.values(), levels)
1484 }
1485 d => Err(ParquetError::General(format!("Cannot coerce {d} to I64"))),
1486 }
1487 }
1488 ColumnWriter::Int96ColumnWriter(_typed) => {
1489 unreachable!("Currently unreachable because data type not supported")
1490 }
1491 ColumnWriter::FloatColumnWriter(typed) => {
1492 let array = column.as_primitive::<Float32Type>();
1493 write_primitive(typed, array.values(), levels)
1494 }
1495 ColumnWriter::DoubleColumnWriter(typed) => {
1496 let array = column.as_primitive::<Float64Type>();
1497 write_primitive(typed, array.values(), levels)
1498 }
1499 ColumnWriter::ByteArrayColumnWriter(_) => {
1500 unreachable!("should use ByteArrayWriter")
1501 }
1502 ColumnWriter::FixedLenByteArrayColumnWriter(typed) => {
1503 let bytes = match column.data_type() {
1504 ArrowDataType::Interval(interval_unit) => match interval_unit {
1505 IntervalUnit::YearMonth => {
1506 let array = column.as_primitive::<IntervalYearMonthType>();
1507 get_interval_ym_array_slice(array, indices)
1508 }
1509 IntervalUnit::DayTime => {
1510 let array = column.as_primitive::<IntervalDayTimeType>();
1511 get_interval_dt_array_slice(array, indices)
1512 }
1513 _ => {
1514 return Err(ParquetError::NYI(format!(
1515 "Attempting to write an Arrow interval type {interval_unit:?} to parquet that is not yet implemented"
1516 )));
1517 }
1518 },
1519 ArrowDataType::FixedSizeBinary(_) => {
1520 let array = column.as_fixed_size_binary();
1521 get_fsb_array_slice(array, indices)
1522 }
1523 ArrowDataType::Decimal32(_, _) => {
1524 let array = column.as_primitive::<Decimal32Type>();
1525 get_decimal_32_array_slice(array, indices)
1526 }
1527 ArrowDataType::Decimal64(_, _) => {
1528 let array = column.as_primitive::<Decimal64Type>();
1529 get_decimal_64_array_slice(array, indices)
1530 }
1531 ArrowDataType::Decimal128(_, _) => {
1532 let array = column.as_primitive::<Decimal128Type>();
1533 get_decimal_128_array_slice(array, indices)
1534 }
1535 ArrowDataType::Decimal256(_, _) => {
1536 let array = column.as_primitive::<Decimal256Type>();
1537 get_decimal_256_array_slice(array, indices)
1538 }
1539 ArrowDataType::Float16 => {
1540 let array = column.as_primitive::<Float16Type>();
1541 get_float_16_array_slice(array, indices)
1542 }
1543 _ => {
1544 return Err(ParquetError::NYI(
1545 "Attempting to write an Arrow type that is not yet implemented".to_string(),
1546 ));
1547 }
1548 };
1549 typed.write_batch_internal(
1550 bytes.as_slice(),
1551 None,
1552 levels.def_level_data().as_ref(),
1553 levels.rep_level_data().as_ref(),
1554 None,
1555 None,
1556 None,
1557 )
1558 }
1559 }
1560}
1561
1562fn write_primitive<E: ColumnValueEncoder>(
1563 writer: &mut GenericColumnWriter<E>,
1564 values: &E::Values,
1565 levels: &ArrayLevels,
1566) -> Result<usize> {
1567 writer.write_batch_internal(
1568 values,
1569 Some(levels.non_null_indices()),
1570 levels.def_level_data().as_ref(),
1571 levels.rep_level_data().as_ref(),
1572 None,
1573 None,
1574 None,
1575 )
1576}
1577
1578fn get_bool_array_slice(array: &arrow_array::BooleanArray, indices: &[usize]) -> Vec<bool> {
1579 let mut values = Vec::with_capacity(indices.len());
1580 for i in indices {
1581 values.push(array.value(*i))
1582 }
1583 values
1584}
1585
1586fn get_interval_ym_array_slice(
1589 array: &arrow_array::IntervalYearMonthArray,
1590 indices: &[usize],
1591) -> Vec<FixedLenByteArray> {
1592 let mut values = Vec::with_capacity(indices.len());
1593 for i in indices {
1594 let mut value = array.value(*i).to_le_bytes().to_vec();
1595 let mut suffix = vec![0; 8];
1596 value.append(&mut suffix);
1597 values.push(FixedLenByteArray::from(ByteArray::from(value)))
1598 }
1599 values
1600}
1601
1602fn get_interval_dt_array_slice(
1605 array: &arrow_array::IntervalDayTimeArray,
1606 indices: &[usize],
1607) -> Vec<FixedLenByteArray> {
1608 let mut values = Vec::with_capacity(indices.len());
1609 for i in indices {
1610 let mut out = [0; 12];
1611 let value = array.value(*i);
1612 out[4..8].copy_from_slice(&value.days.to_le_bytes());
1613 out[8..12].copy_from_slice(&value.milliseconds.to_le_bytes());
1614 values.push(FixedLenByteArray::from(ByteArray::from(out.to_vec())));
1615 }
1616 values
1617}
1618
1619fn get_decimal_32_array_slice(
1620 array: &arrow_array::Decimal32Array,
1621 indices: &[usize],
1622) -> Vec<FixedLenByteArray> {
1623 let mut values = Vec::with_capacity(indices.len());
1624 let size = decimal_length_from_precision(array.precision());
1625 for i in indices {
1626 let as_be_bytes = array.value(*i).to_be_bytes();
1627 let resized_value = as_be_bytes[(4 - size)..].to_vec();
1628 values.push(FixedLenByteArray::from(ByteArray::from(resized_value)));
1629 }
1630 values
1631}
1632
1633fn get_decimal_64_array_slice(
1634 array: &arrow_array::Decimal64Array,
1635 indices: &[usize],
1636) -> Vec<FixedLenByteArray> {
1637 let mut values = Vec::with_capacity(indices.len());
1638 let size = decimal_length_from_precision(array.precision());
1639 for i in indices {
1640 let as_be_bytes = array.value(*i).to_be_bytes();
1641 let resized_value = as_be_bytes[(8 - size)..].to_vec();
1642 values.push(FixedLenByteArray::from(ByteArray::from(resized_value)));
1643 }
1644 values
1645}
1646
1647fn get_decimal_128_array_slice(
1648 array: &arrow_array::Decimal128Array,
1649 indices: &[usize],
1650) -> Vec<FixedLenByteArray> {
1651 let mut values = Vec::with_capacity(indices.len());
1652 let size = decimal_length_from_precision(array.precision());
1653 for i in indices {
1654 let as_be_bytes = array.value(*i).to_be_bytes();
1655 let resized_value = as_be_bytes[(16 - size)..].to_vec();
1656 values.push(FixedLenByteArray::from(ByteArray::from(resized_value)));
1657 }
1658 values
1659}
1660
1661fn get_decimal_256_array_slice(
1662 array: &arrow_array::Decimal256Array,
1663 indices: &[usize],
1664) -> Vec<FixedLenByteArray> {
1665 let mut values = Vec::with_capacity(indices.len());
1666 let size = decimal_length_from_precision(array.precision());
1667 for i in indices {
1668 let as_be_bytes = array.value(*i).to_be_bytes();
1669 let resized_value = as_be_bytes[(32 - size)..].to_vec();
1670 values.push(FixedLenByteArray::from(ByteArray::from(resized_value)));
1671 }
1672 values
1673}
1674
1675fn get_float_16_array_slice(
1676 array: &arrow_array::Float16Array,
1677 indices: &[usize],
1678) -> Vec<FixedLenByteArray> {
1679 let mut values = Vec::with_capacity(indices.len());
1680 for i in indices {
1681 let value = array.value(*i).to_le_bytes().to_vec();
1682 values.push(FixedLenByteArray::from(ByteArray::from(value)));
1683 }
1684 values
1685}
1686
1687fn get_fsb_array_slice(
1688 array: &arrow_array::FixedSizeBinaryArray,
1689 indices: &[usize],
1690) -> Vec<FixedLenByteArray> {
1691 let mut values = Vec::with_capacity(indices.len());
1692 for i in indices {
1693 let value = array.value(*i).to_vec();
1694 values.push(FixedLenByteArray::from(ByteArray::from(value)))
1695 }
1696 values
1697}
1698
1699#[cfg(test)]
1700mod tests {
1701 use super::*;
1702 use std::collections::HashMap;
1703
1704 use std::fs::File;
1705
1706 use crate::arrow::arrow_reader::{ParquetRecordBatchReader, ParquetRecordBatchReaderBuilder};
1707 use crate::arrow::{ARROW_SCHEMA_META_KEY, PARQUET_FIELD_ID_META_KEY};
1708 use crate::column::page::{Page, PageReader};
1709 use crate::file::metadata::thrift::PageHeader;
1710 use crate::file::page_index::column_index::ColumnIndexMetaData;
1711 use crate::file::reader::SerializedPageReader;
1712 use crate::parquet_thrift::{ReadThrift, ThriftSliceInputProtocol};
1713 use crate::schema::types::ColumnPath;
1714 use arrow::datatypes::ToByteSlice;
1715 use arrow::datatypes::{DataType, Schema};
1716 use arrow::error::Result as ArrowResult;
1717 use arrow::util::data_gen::create_random_array;
1718 use arrow::util::pretty::pretty_format_batches;
1719 use arrow::{array::*, buffer::Buffer};
1720 use arrow_buffer::{IntervalDayTime, IntervalMonthDayNano, NullBuffer, OffsetBuffer, i256};
1721 use arrow_schema::Fields;
1722 use half::f16;
1723 use num_traits::{FromPrimitive, ToPrimitive};
1724 use tempfile::tempfile;
1725
1726 use crate::basic::Encoding;
1727 use crate::data_type::AsBytes;
1728 use crate::file::metadata::{ColumnChunkMetaData, ParquetMetaData, ParquetMetaDataReader};
1729 use crate::file::properties::{
1730 BloomFilterPosition, EnabledStatistics, ReaderProperties, WriterVersion,
1731 };
1732 use crate::file::serialized_reader::ReadOptionsBuilder;
1733 use crate::file::{
1734 reader::{FileReader, SerializedFileReader},
1735 statistics::Statistics,
1736 };
1737
1738 #[test]
1739 fn arrow_writer() {
1740 let schema = Schema::new(vec![
1742 Field::new("a", DataType::Int32, false),
1743 Field::new("b", DataType::Int32, true),
1744 ]);
1745
1746 let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
1748 let b = Int32Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
1749
1750 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a), Arc::new(b)]).unwrap();
1752
1753 roundtrip(batch, Some(SMALL_SIZE / 2));
1754 }
1755
1756 fn get_bytes_after_close(schema: SchemaRef, expected_batch: &RecordBatch) -> Vec<u8> {
1757 let mut buffer = vec![];
1758
1759 let mut writer = ArrowWriter::try_new(&mut buffer, schema, None).unwrap();
1760 writer.write(expected_batch).unwrap();
1761 writer.close().unwrap();
1762
1763 buffer
1764 }
1765
1766 fn get_bytes_by_into_inner(schema: SchemaRef, expected_batch: &RecordBatch) -> Vec<u8> {
1767 let mut writer = ArrowWriter::try_new(Vec::new(), schema, None).unwrap();
1768 writer.write(expected_batch).unwrap();
1769 writer.into_inner().unwrap()
1770 }
1771
1772 #[test]
1773 fn roundtrip_bytes() {
1774 let schema = Arc::new(Schema::new(vec![
1776 Field::new("a", DataType::Int32, false),
1777 Field::new("b", DataType::Int32, true),
1778 ]));
1779
1780 let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
1782 let b = Int32Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
1783
1784 let expected_batch =
1786 RecordBatch::try_new(schema.clone(), vec![Arc::new(a), Arc::new(b)]).unwrap();
1787
1788 for buffer in [
1789 get_bytes_after_close(schema.clone(), &expected_batch),
1790 get_bytes_by_into_inner(schema, &expected_batch),
1791 ] {
1792 let cursor = Bytes::from(buffer);
1793 let mut record_batch_reader = ParquetRecordBatchReader::try_new(cursor, 1024).unwrap();
1794
1795 let actual_batch = record_batch_reader
1796 .next()
1797 .expect("No batch found")
1798 .expect("Unable to get batch");
1799
1800 assert_eq!(expected_batch.schema(), actual_batch.schema());
1801 assert_eq!(expected_batch.num_columns(), actual_batch.num_columns());
1802 assert_eq!(expected_batch.num_rows(), actual_batch.num_rows());
1803 for i in 0..expected_batch.num_columns() {
1804 let expected_data = expected_batch.column(i).to_data();
1805 let actual_data = actual_batch.column(i).to_data();
1806
1807 assert_eq!(expected_data, actual_data);
1808 }
1809 }
1810 }
1811
1812 #[test]
1813 fn arrow_writer_non_null() {
1814 let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
1816
1817 let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
1819
1820 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1822
1823 roundtrip(batch, Some(SMALL_SIZE / 2));
1824 }
1825
1826 #[test]
1827 fn arrow_writer_list() {
1828 let schema = Schema::new(vec![Field::new(
1830 "a",
1831 DataType::List(Arc::new(Field::new_list_field(DataType::Int32, false))),
1832 true,
1833 )]);
1834
1835 let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
1837
1838 let a_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
1841
1842 let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
1844 DataType::Int32,
1845 false,
1846 ))))
1847 .len(5)
1848 .add_buffer(a_value_offsets)
1849 .add_child_data(a_values.into_data())
1850 .null_bit_buffer(Some(Buffer::from([0b00011011])))
1851 .build()
1852 .unwrap();
1853 let a = ListArray::from(a_list_data);
1854
1855 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1857
1858 assert_eq!(batch.column(0).null_count(), 1);
1859
1860 roundtrip(batch, None);
1863 }
1864
1865 #[test]
1866 fn arrow_writer_list_non_null() {
1867 let schema = Schema::new(vec![Field::new(
1869 "a",
1870 DataType::List(Arc::new(Field::new_list_field(DataType::Int32, false))),
1871 false,
1872 )]);
1873
1874 let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
1876
1877 let a_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
1880
1881 let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
1883 DataType::Int32,
1884 false,
1885 ))))
1886 .len(5)
1887 .add_buffer(a_value_offsets)
1888 .add_child_data(a_values.into_data())
1889 .build()
1890 .unwrap();
1891 let a = ListArray::from(a_list_data);
1892
1893 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1895
1896 assert_eq!(batch.column(0).null_count(), 0);
1899
1900 roundtrip(batch, None);
1901 }
1902
1903 #[test]
1904 fn arrow_writer_list_view() {
1905 let list_field = Arc::new(Field::new_list_field(DataType::Int32, false));
1906 let schema = Schema::new(vec![Field::new(
1907 "a",
1908 DataType::ListView(list_field.clone()),
1909 true,
1910 )]);
1911
1912 let a = ListViewArray::new(
1914 list_field,
1915 vec![0, 1, 0, 3, 6].into(),
1916 vec![1, 2, 0, 3, 4].into(),
1917 Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])),
1918 Some(vec![true, true, false, true, true].into()),
1919 );
1920
1921 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1922
1923 assert_eq!(batch.column(0).null_count(), 1);
1924
1925 roundtrip(batch, None);
1926 }
1927
1928 #[test]
1929 fn arrow_writer_list_view_non_null() {
1930 let list_field = Arc::new(Field::new_list_field(DataType::Int32, false));
1931 let schema = Schema::new(vec![Field::new(
1932 "a",
1933 DataType::ListView(list_field.clone()),
1934 false,
1935 )]);
1936
1937 let a = ListViewArray::new(
1939 list_field,
1940 vec![0, 1, 0, 3, 6].into(),
1941 vec![1, 2, 0, 3, 4].into(),
1942 Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])),
1943 None,
1944 );
1945
1946 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1947
1948 assert_eq!(batch.column(0).null_count(), 0);
1949
1950 roundtrip(batch, None);
1951 }
1952
1953 #[test]
1954 fn arrow_writer_list_view_out_of_order() {
1955 let list_field = Arc::new(Field::new_list_field(DataType::Int32, false));
1956 let schema = Schema::new(vec![Field::new(
1957 "a",
1958 DataType::ListView(list_field.clone()),
1959 false,
1960 )]);
1961
1962 let a = ListViewArray::new(
1964 list_field,
1965 vec![0, 1, 0, 6, 3].into(),
1966 vec![1, 2, 0, 4, 3].into(),
1967 Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])),
1968 None,
1969 );
1970
1971 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1972
1973 roundtrip(batch, None);
1974 }
1975
1976 #[test]
1977 fn arrow_writer_large_list_view() {
1978 let list_field = Arc::new(Field::new_list_field(DataType::Int32, false));
1979 let schema = Schema::new(vec![Field::new(
1980 "a",
1981 DataType::LargeListView(list_field.clone()),
1982 true,
1983 )]);
1984
1985 let a = LargeListViewArray::new(
1987 list_field,
1988 vec![0i64, 1, 0, 3, 6].into(),
1989 vec![1i64, 2, 0, 3, 4].into(),
1990 Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])),
1991 Some(vec![true, true, false, true, true].into()),
1992 );
1993
1994 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
1995
1996 assert_eq!(batch.column(0).null_count(), 1);
1997
1998 roundtrip(batch, None);
1999 }
2000
2001 #[test]
2002 fn arrow_writer_list_view_with_struct() {
2003 let struct_fields = Fields::from(vec![
2005 Field::new("id", DataType::Int32, false),
2006 Field::new("name", DataType::Utf8, false),
2007 ]);
2008 let struct_type = DataType::Struct(struct_fields.clone());
2009 let list_field = Arc::new(Field::new("item", struct_type.clone(), false));
2010
2011 let schema = Schema::new(vec![Field::new(
2012 "a",
2013 DataType::ListView(list_field.clone()),
2014 true,
2015 )]);
2016
2017 let id_array = Int32Array::from(vec![1, 2, 3, 4, 5]);
2019 let name_array = StringArray::from(vec!["a", "b", "c", "d", "e"]);
2020 let struct_array = StructArray::new(
2021 struct_fields,
2022 vec![Arc::new(id_array), Arc::new(name_array)],
2023 None,
2024 );
2025
2026 let list_view = ListViewArray::new(
2028 list_field,
2029 vec![0, 2, 2].into(), vec![2, 0, 3].into(), Arc::new(struct_array),
2032 Some(vec![true, false, true].into()),
2033 );
2034
2035 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(list_view)]).unwrap();
2036
2037 roundtrip(batch, None);
2038 }
2039
2040 #[test]
2041 fn arrow_writer_binary() {
2042 let string_field = Field::new("a", DataType::Utf8, false);
2043 let binary_field = Field::new("b", DataType::Binary, false);
2044 let schema = Schema::new(vec![string_field, binary_field]);
2045
2046 let raw_string_values = vec!["foo", "bar", "baz", "quux"];
2047 let raw_binary_values = [
2048 b"foo".to_vec(),
2049 b"bar".to_vec(),
2050 b"baz".to_vec(),
2051 b"quux".to_vec(),
2052 ];
2053 let raw_binary_value_refs = raw_binary_values
2054 .iter()
2055 .map(|x| x.as_slice())
2056 .collect::<Vec<_>>();
2057
2058 let string_values = StringArray::from(raw_string_values.clone());
2059 let binary_values = BinaryArray::from(raw_binary_value_refs);
2060 let batch = RecordBatch::try_new(
2061 Arc::new(schema),
2062 vec![Arc::new(string_values), Arc::new(binary_values)],
2063 )
2064 .unwrap();
2065
2066 roundtrip(batch, Some(SMALL_SIZE / 2));
2067 }
2068
2069 #[test]
2070 fn arrow_writer_binary_view() {
2071 let string_field = Field::new("a", DataType::Utf8View, false);
2072 let binary_field = Field::new("b", DataType::BinaryView, false);
2073 let nullable_string_field = Field::new("a", DataType::Utf8View, true);
2074 let schema = Schema::new(vec![string_field, binary_field, nullable_string_field]);
2075
2076 let raw_string_values = vec!["foo", "bar", "large payload over 12 bytes", "lulu"];
2077 let raw_binary_values = vec![
2078 b"foo".to_vec(),
2079 b"bar".to_vec(),
2080 b"large payload over 12 bytes".to_vec(),
2081 b"lulu".to_vec(),
2082 ];
2083 let nullable_string_values =
2084 vec![Some("foo"), None, Some("large payload over 12 bytes"), None];
2085
2086 let string_view_values = StringViewArray::from(raw_string_values);
2087 let binary_view_values = BinaryViewArray::from_iter_values(raw_binary_values);
2088 let nullable_string_view_values = StringViewArray::from(nullable_string_values);
2089 let batch = RecordBatch::try_new(
2090 Arc::new(schema),
2091 vec![
2092 Arc::new(string_view_values),
2093 Arc::new(binary_view_values),
2094 Arc::new(nullable_string_view_values),
2095 ],
2096 )
2097 .unwrap();
2098
2099 roundtrip(batch.clone(), Some(SMALL_SIZE / 2));
2100 roundtrip(batch, None);
2101 }
2102
2103 #[test]
2104 fn arrow_writer_binary_view_long_value() {
2105 let string_field = Field::new("a", DataType::Utf8View, false);
2106 let binary_field = Field::new("b", DataType::BinaryView, false);
2107 let schema = Schema::new(vec![string_field, binary_field]);
2108
2109 let long = "a".repeat(128);
2113 let raw_string_values = vec!["foo", long.as_str(), "bar"];
2114 let raw_binary_values = vec![b"foo".to_vec(), long.as_bytes().to_vec(), b"bar".to_vec()];
2115
2116 let string_view_values: ArrayRef = Arc::new(StringViewArray::from(raw_string_values));
2117 let binary_view_values: ArrayRef =
2118 Arc::new(BinaryViewArray::from_iter_values(raw_binary_values));
2119
2120 one_column_roundtrip(Arc::clone(&string_view_values), false);
2121 one_column_roundtrip(Arc::clone(&binary_view_values), false);
2122
2123 let batch = RecordBatch::try_new(
2124 Arc::new(schema),
2125 vec![string_view_values, binary_view_values],
2126 )
2127 .unwrap();
2128
2129 for version in [WriterVersion::PARQUET_1_0, WriterVersion::PARQUET_2_0] {
2131 let props = WriterProperties::builder()
2132 .set_writer_version(version)
2133 .set_dictionary_enabled(false)
2134 .build();
2135 roundtrip_opts(&batch, props);
2136 }
2137 }
2138
2139 fn get_decimal_batch(precision: u8, scale: i8) -> RecordBatch {
2140 let decimal_field = Field::new("a", DataType::Decimal128(precision, scale), false);
2141 let schema = Schema::new(vec![decimal_field]);
2142
2143 let decimal_values = vec![10_000, 50_000, 0, -100]
2144 .into_iter()
2145 .map(Some)
2146 .collect::<Decimal128Array>()
2147 .with_precision_and_scale(precision, scale)
2148 .unwrap();
2149
2150 RecordBatch::try_new(Arc::new(schema), vec![Arc::new(decimal_values)]).unwrap()
2151 }
2152
2153 #[test]
2154 fn arrow_writer_decimal() {
2155 let batch_int32_decimal = get_decimal_batch(5, 2);
2157 roundtrip(batch_int32_decimal, Some(SMALL_SIZE / 2));
2158 let batch_int64_decimal = get_decimal_batch(12, 2);
2160 roundtrip(batch_int64_decimal, Some(SMALL_SIZE / 2));
2161 let batch_fixed_len_byte_array_decimal = get_decimal_batch(30, 2);
2163 roundtrip(batch_fixed_len_byte_array_decimal, Some(SMALL_SIZE / 2));
2164 }
2165
2166 #[test]
2167 fn arrow_writer_complex() {
2168 let struct_field_d = Arc::new(Field::new("d", DataType::Float64, true));
2170 let struct_field_f = Arc::new(Field::new("f", DataType::Float32, true));
2171 let struct_field_g = Arc::new(Field::new_list(
2172 "g",
2173 Field::new_list_field(DataType::Int16, true),
2174 false,
2175 ));
2176 let struct_field_h = Arc::new(Field::new_list(
2177 "h",
2178 Field::new_list_field(DataType::Int16, false),
2179 true,
2180 ));
2181 let struct_field_e = Arc::new(Field::new_struct(
2182 "e",
2183 vec![
2184 struct_field_f.clone(),
2185 struct_field_g.clone(),
2186 struct_field_h.clone(),
2187 ],
2188 false,
2189 ));
2190 let schema = Schema::new(vec![
2191 Field::new("a", DataType::Int32, false),
2192 Field::new("b", DataType::Int32, true),
2193 Field::new_struct(
2194 "c",
2195 vec![struct_field_d.clone(), struct_field_e.clone()],
2196 false,
2197 ),
2198 ]);
2199
2200 let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
2202 let b = Int32Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
2203 let d = Float64Array::from(vec![None, None, None, Some(1.0), None]);
2204 let f = Float32Array::from(vec![Some(0.0), None, Some(333.3), None, Some(5.25)]);
2205
2206 let g_value = Int16Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
2207
2208 let g_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
2211
2212 let g_list_data = ArrayData::builder(struct_field_g.data_type().clone())
2214 .len(5)
2215 .add_buffer(g_value_offsets.clone())
2216 .add_child_data(g_value.to_data())
2217 .build()
2218 .unwrap();
2219 let g = ListArray::from(g_list_data);
2220 let h_list_data = ArrayData::builder(struct_field_h.data_type().clone())
2222 .len(5)
2223 .add_buffer(g_value_offsets)
2224 .add_child_data(g_value.to_data())
2225 .null_bit_buffer(Some(Buffer::from([0b00011011])))
2226 .build()
2227 .unwrap();
2228 let h = ListArray::from(h_list_data);
2229
2230 let e = StructArray::from(vec![
2231 (struct_field_f, Arc::new(f) as ArrayRef),
2232 (struct_field_g, Arc::new(g) as ArrayRef),
2233 (struct_field_h, Arc::new(h) as ArrayRef),
2234 ]);
2235
2236 let c = StructArray::from(vec![
2237 (struct_field_d, Arc::new(d) as ArrayRef),
2238 (struct_field_e, Arc::new(e) as ArrayRef),
2239 ]);
2240
2241 let batch = RecordBatch::try_new(
2243 Arc::new(schema),
2244 vec![Arc::new(a), Arc::new(b), Arc::new(c)],
2245 )
2246 .unwrap();
2247
2248 roundtrip(batch.clone(), Some(SMALL_SIZE / 2));
2249 roundtrip(batch, Some(SMALL_SIZE / 3));
2250 }
2251
2252 #[test]
2253 fn arrow_writer_complex_mixed() {
2254 let offset_field = Arc::new(Field::new("offset", DataType::Int32, false));
2259 let partition_field = Arc::new(Field::new("partition", DataType::Int64, true));
2260 let topic_field = Arc::new(Field::new("topic", DataType::Utf8, true));
2261 let schema = Schema::new(vec![Field::new(
2262 "some_nested_object",
2263 DataType::Struct(Fields::from(vec![
2264 offset_field.clone(),
2265 partition_field.clone(),
2266 topic_field.clone(),
2267 ])),
2268 false,
2269 )]);
2270
2271 let offset = Int32Array::from(vec![1, 2, 3, 4, 5]);
2273 let partition = Int64Array::from(vec![Some(1), None, None, Some(4), Some(5)]);
2274 let topic = StringArray::from(vec![Some("A"), None, Some("A"), Some(""), None]);
2275
2276 let some_nested_object = StructArray::from(vec![
2277 (offset_field, Arc::new(offset) as ArrayRef),
2278 (partition_field, Arc::new(partition) as ArrayRef),
2279 (topic_field, Arc::new(topic) as ArrayRef),
2280 ]);
2281
2282 let batch =
2284 RecordBatch::try_new(Arc::new(schema), vec![Arc::new(some_nested_object)]).unwrap();
2285
2286 roundtrip(batch, Some(SMALL_SIZE / 2));
2287 }
2288
2289 #[test]
2290 fn arrow_writer_map() {
2291 let json_content = r#"
2293 {"stocks":{"long": "$AAA", "short": "$BBB"}}
2294 {"stocks":{"long": null, "long": "$CCC", "short": null}}
2295 {"stocks":{"hedged": "$YYY", "long": null, "short": "$D"}}
2296 "#;
2297 let entries_struct_type = DataType::Struct(Fields::from(vec![
2298 Field::new("key", DataType::Utf8, false),
2299 Field::new("value", DataType::Utf8, true),
2300 ]));
2301 let stocks_field = Field::new(
2302 "stocks",
2303 DataType::Map(
2304 Arc::new(Field::new("entries", entries_struct_type, false)),
2305 false,
2306 ),
2307 true,
2308 );
2309 let schema = Arc::new(Schema::new(vec![stocks_field]));
2310 let builder = arrow::json::ReaderBuilder::new(schema).with_batch_size(64);
2311 let mut reader = builder.build(std::io::Cursor::new(json_content)).unwrap();
2312
2313 let batch = reader.next().unwrap().unwrap();
2314 roundtrip(batch, None);
2315 }
2316
2317 #[test]
2318 fn arrow_writer_2_level_struct() {
2319 let field_c = Field::new("c", DataType::Int32, true);
2321 let field_b = Field::new("b", DataType::Struct(vec![field_c].into()), true);
2322 let type_a = DataType::Struct(vec![field_b.clone()].into());
2323 let field_a = Field::new("a", type_a, true);
2324 let schema = Schema::new(vec![field_a.clone()]);
2325
2326 let c = Int32Array::from(vec![Some(1), None, Some(3), None, None, Some(6)]);
2328 let b_data = ArrayDataBuilder::new(field_b.data_type().clone())
2329 .len(6)
2330 .null_bit_buffer(Some(Buffer::from([0b00100111])))
2331 .add_child_data(c.into_data())
2332 .build()
2333 .unwrap();
2334 let b = StructArray::from(b_data);
2335 let a_data = ArrayDataBuilder::new(field_a.data_type().clone())
2336 .len(6)
2337 .null_bit_buffer(Some(Buffer::from([0b00101111])))
2338 .add_child_data(b.into_data())
2339 .build()
2340 .unwrap();
2341 let a = StructArray::from(a_data);
2342
2343 assert_eq!(a.null_count(), 1);
2344 assert_eq!(a.column(0).null_count(), 2);
2345
2346 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
2348
2349 roundtrip(batch, Some(SMALL_SIZE / 2));
2350 }
2351
2352 #[test]
2353 fn arrow_writer_2_level_struct_non_null() {
2354 let field_c = Field::new("c", DataType::Int32, false);
2356 let type_b = DataType::Struct(vec![field_c].into());
2357 let field_b = Field::new("b", type_b.clone(), false);
2358 let type_a = DataType::Struct(vec![field_b].into());
2359 let field_a = Field::new("a", type_a.clone(), false);
2360 let schema = Schema::new(vec![field_a]);
2361
2362 let c = Int32Array::from(vec![1, 2, 3, 4, 5, 6]);
2364 let b_data = ArrayDataBuilder::new(type_b)
2365 .len(6)
2366 .add_child_data(c.into_data())
2367 .build()
2368 .unwrap();
2369 let b = StructArray::from(b_data);
2370 let a_data = ArrayDataBuilder::new(type_a)
2371 .len(6)
2372 .add_child_data(b.into_data())
2373 .build()
2374 .unwrap();
2375 let a = StructArray::from(a_data);
2376
2377 assert_eq!(a.null_count(), 0);
2378 assert_eq!(a.column(0).null_count(), 0);
2379
2380 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
2382
2383 roundtrip(batch, Some(SMALL_SIZE / 2));
2384 }
2385
2386 #[test]
2387 fn arrow_writer_2_level_struct_mixed_null() {
2388 let field_c = Field::new("c", DataType::Int32, false);
2390 let type_b = DataType::Struct(vec![field_c].into());
2391 let field_b = Field::new("b", type_b.clone(), true);
2392 let type_a = DataType::Struct(vec![field_b].into());
2393 let field_a = Field::new("a", type_a.clone(), false);
2394 let schema = Schema::new(vec![field_a]);
2395
2396 let c = Int32Array::from(vec![1, 2, 3, 4, 5, 6]);
2398 let b_data = ArrayDataBuilder::new(type_b)
2399 .len(6)
2400 .null_bit_buffer(Some(Buffer::from([0b00100111])))
2401 .add_child_data(c.into_data())
2402 .build()
2403 .unwrap();
2404 let b = StructArray::from(b_data);
2405 let a_data = ArrayDataBuilder::new(type_a)
2407 .len(6)
2408 .add_child_data(b.into_data())
2409 .build()
2410 .unwrap();
2411 let a = StructArray::from(a_data);
2412
2413 assert_eq!(a.null_count(), 0);
2414 assert_eq!(a.column(0).null_count(), 2);
2415
2416 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
2418
2419 roundtrip(batch, Some(SMALL_SIZE / 2));
2420 }
2421
2422 #[test]
2423 fn arrow_writer_2_level_struct_mixed_null_2() {
2424 let field_c = Field::new("c", DataType::Int32, false);
2426 let field_d = Field::new("d", DataType::FixedSizeBinary(4), false);
2427 let field_e = Field::new(
2428 "e",
2429 DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)),
2430 false,
2431 );
2432
2433 let field_b = Field::new(
2434 "b",
2435 DataType::Struct(vec![field_c, field_d, field_e].into()),
2436 false,
2437 );
2438 let type_a = DataType::Struct(vec![field_b.clone()].into());
2439 let field_a = Field::new("a", type_a, true);
2440 let schema = Schema::new(vec![field_a.clone()]);
2441
2442 let c = Int32Array::from_iter_values(0..6);
2444 let d = FixedSizeBinaryArray::try_from_iter(
2445 ["aaaa", "bbbb", "cccc", "dddd", "eeee", "ffff"].into_iter(),
2446 )
2447 .expect("four byte values");
2448 let e = Int32DictionaryArray::from_iter(["one", "two", "three", "four", "five", "one"]);
2449 let b_data = ArrayDataBuilder::new(field_b.data_type().clone())
2450 .len(6)
2451 .add_child_data(c.into_data())
2452 .add_child_data(d.into_data())
2453 .add_child_data(e.into_data())
2454 .build()
2455 .unwrap();
2456 let b = StructArray::from(b_data);
2457 let a_data = ArrayDataBuilder::new(field_a.data_type().clone())
2458 .len(6)
2459 .null_bit_buffer(Some(Buffer::from([0b00100101])))
2460 .add_child_data(b.into_data())
2461 .build()
2462 .unwrap();
2463 let a = StructArray::from(a_data);
2464
2465 assert_eq!(a.null_count(), 3);
2466 assert_eq!(a.column(0).null_count(), 0);
2467
2468 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
2470
2471 roundtrip(batch, Some(SMALL_SIZE / 2));
2472 }
2473
2474 #[test]
2475 fn test_fixed_size_binary_in_dict() {
2476 fn test_fixed_size_binary_in_dict_inner<K>()
2477 where
2478 K: ArrowDictionaryKeyType,
2479 K::Native: FromPrimitive + ToPrimitive + TryFrom<u8>,
2480 <<K as arrow_array::ArrowPrimitiveType>::Native as TryFrom<u8>>::Error: std::fmt::Debug,
2481 {
2482 let field = Field::new(
2483 "a",
2484 DataType::Dictionary(
2485 Box::new(K::DATA_TYPE),
2486 Box::new(DataType::FixedSizeBinary(4)),
2487 ),
2488 false,
2489 );
2490 let schema = Schema::new(vec![field]);
2491
2492 let keys: Vec<K::Native> = vec![
2493 K::Native::try_from(0u8).unwrap(),
2494 K::Native::try_from(0u8).unwrap(),
2495 K::Native::try_from(1u8).unwrap(),
2496 ];
2497 let keys = PrimitiveArray::<K>::from_iter_values(keys);
2498 let values = FixedSizeBinaryArray::try_from_iter(
2499 vec![vec![0, 0, 0, 0], vec![1, 1, 1, 1]].into_iter(),
2500 )
2501 .unwrap();
2502
2503 let data = DictionaryArray::<K>::new(keys, Arc::new(values));
2504 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(data)]).unwrap();
2505 roundtrip(batch, None);
2506 }
2507
2508 test_fixed_size_binary_in_dict_inner::<UInt8Type>();
2509 test_fixed_size_binary_in_dict_inner::<UInt16Type>();
2510 test_fixed_size_binary_in_dict_inner::<UInt32Type>();
2511 test_fixed_size_binary_in_dict_inner::<UInt16Type>();
2512 test_fixed_size_binary_in_dict_inner::<Int8Type>();
2513 test_fixed_size_binary_in_dict_inner::<Int16Type>();
2514 test_fixed_size_binary_in_dict_inner::<Int32Type>();
2515 test_fixed_size_binary_in_dict_inner::<Int64Type>();
2516 }
2517
2518 #[test]
2519 fn test_empty_dict() {
2520 let struct_fields = Fields::from(vec![Field::new(
2521 "dict",
2522 DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)),
2523 false,
2524 )]);
2525
2526 let schema = Schema::new(vec![Field::new_struct(
2527 "struct",
2528 struct_fields.clone(),
2529 true,
2530 )]);
2531 let dictionary = Arc::new(DictionaryArray::new(
2532 Int32Array::new_null(5),
2533 Arc::new(StringArray::new_null(0)),
2534 ));
2535
2536 let s = StructArray::new(
2537 struct_fields,
2538 vec![dictionary],
2539 Some(NullBuffer::new_null(5)),
2540 );
2541
2542 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(s)]).unwrap();
2543 roundtrip(batch, None);
2544 }
2545 #[test]
2546 fn arrow_writer_page_size() {
2547 let schema = Arc::new(Schema::new(vec![Field::new("col", DataType::Utf8, false)]));
2548
2549 let mut builder = StringBuilder::with_capacity(100, 329 * 10_000);
2550
2551 for i in 0..10 {
2553 let value = i
2554 .to_string()
2555 .repeat(10)
2556 .chars()
2557 .take(10)
2558 .collect::<String>();
2559
2560 builder.append_value(value);
2561 }
2562
2563 let array = Arc::new(builder.finish());
2564
2565 let batch = RecordBatch::try_new(schema, vec![array]).unwrap();
2566
2567 let file = tempfile::tempfile().unwrap();
2568
2569 let props = WriterProperties::builder()
2571 .set_data_page_size_limit(1)
2572 .set_dictionary_page_size_limit(1)
2573 .set_write_batch_size(1)
2574 .build();
2575
2576 let mut writer =
2577 ArrowWriter::try_new(file.try_clone().unwrap(), batch.schema(), Some(props))
2578 .expect("Unable to write file");
2579 writer.write(&batch).unwrap();
2580 writer.close().unwrap();
2581
2582 let options = ReadOptionsBuilder::new().with_page_index().build();
2583 let reader =
2584 SerializedFileReader::new_with_options(file.try_clone().unwrap(), options).unwrap();
2585
2586 let column = reader.metadata().row_group(0).columns();
2587
2588 assert_eq!(column.len(), 1);
2589
2590 assert!(
2593 column[0].dictionary_page_offset().is_some(),
2594 "Expected a dictionary page"
2595 );
2596
2597 assert!(reader.metadata().offset_index().is_some());
2598 let offset_indexes = &reader.metadata().offset_index().unwrap()[0];
2599
2600 let page_locations = offset_indexes[0].page_locations.clone();
2601
2602 assert_eq!(
2605 page_locations.len(),
2606 10,
2607 "Expected 10 pages but got {page_locations:#?}"
2608 );
2609 }
2610
2611 #[test]
2612 fn arrow_writer_float_nans() {
2613 let f16_field = Field::new("a", DataType::Float16, false);
2614 let f32_field = Field::new("b", DataType::Float32, false);
2615 let f64_field = Field::new("c", DataType::Float64, false);
2616 let schema = Schema::new(vec![f16_field, f32_field, f64_field]);
2617
2618 let f16_values = (0..MEDIUM_SIZE)
2619 .map(|i| {
2620 Some(if i % 2 == 0 {
2621 f16::NAN
2622 } else {
2623 f16::from_f32(i as f32)
2624 })
2625 })
2626 .collect::<Float16Array>();
2627
2628 let f32_values = (0..MEDIUM_SIZE)
2629 .map(|i| Some(if i % 2 == 0 { f32::NAN } else { i as f32 }))
2630 .collect::<Float32Array>();
2631
2632 let f64_values = (0..MEDIUM_SIZE)
2633 .map(|i| Some(if i % 2 == 0 { f64::NAN } else { i as f64 }))
2634 .collect::<Float64Array>();
2635
2636 let batch = RecordBatch::try_new(
2637 Arc::new(schema),
2638 vec![
2639 Arc::new(f16_values),
2640 Arc::new(f32_values),
2641 Arc::new(f64_values),
2642 ],
2643 )
2644 .unwrap();
2645
2646 roundtrip(batch, None);
2647 }
2648
2649 const SMALL_SIZE: usize = 7;
2650 const MEDIUM_SIZE: usize = 63;
2651
2652 fn roundtrip(expected_batch: RecordBatch, max_row_group_size: Option<usize>) -> Vec<Bytes> {
2655 let mut files = vec![];
2656 for version in [WriterVersion::PARQUET_1_0, WriterVersion::PARQUET_2_0] {
2657 let mut props = WriterProperties::builder().set_writer_version(version);
2658
2659 if let Some(size) = max_row_group_size {
2660 props = props.set_max_row_group_row_count(Some(size))
2661 }
2662
2663 let props = props.build();
2664 files.push(roundtrip_opts(&expected_batch, props))
2665 }
2666 files
2667 }
2668
2669 fn roundtrip_opts_with_array_validation<F>(
2673 expected_batch: &RecordBatch,
2674 props: WriterProperties,
2675 validate: F,
2676 ) -> Bytes
2677 where
2678 F: Fn(&ArrayData, &ArrayData),
2679 {
2680 let mut file = vec![];
2681
2682 let mut writer = ArrowWriter::try_new(&mut file, expected_batch.schema(), Some(props))
2683 .expect("Unable to write file");
2684 writer.write(expected_batch).unwrap();
2685 writer.close().unwrap();
2686
2687 let file = Bytes::from(file);
2688 let mut record_batch_reader =
2689 ParquetRecordBatchReader::try_new(file.clone(), 1024).unwrap();
2690
2691 let actual_batch = record_batch_reader
2692 .next()
2693 .expect("No batch found")
2694 .expect("Unable to get batch");
2695
2696 assert_eq!(expected_batch.schema(), actual_batch.schema());
2697 assert_eq!(expected_batch.num_columns(), actual_batch.num_columns());
2698 assert_eq!(expected_batch.num_rows(), actual_batch.num_rows());
2699 for i in 0..expected_batch.num_columns() {
2700 let expected_data = expected_batch.column(i).to_data();
2701 let actual_data = actual_batch.column(i).to_data();
2702 validate(&expected_data, &actual_data);
2703 }
2704
2705 file
2706 }
2707
2708 fn roundtrip_opts(expected_batch: &RecordBatch, props: WriterProperties) -> Bytes {
2709 roundtrip_opts_with_array_validation(expected_batch, props, |a, b| {
2710 a.validate_full().expect("valid expected data");
2711 b.validate_full().expect("valid actual data");
2712 assert_eq!(a, b)
2713 })
2714 }
2715
2716 struct RoundTripOptions {
2717 values: ArrayRef,
2718 schema: SchemaRef,
2719 bloom_filter: bool,
2720 bloom_filter_ndv: Option<u64>,
2721 bloom_filter_position: BloomFilterPosition,
2722 }
2723
2724 impl RoundTripOptions {
2725 fn new(values: ArrayRef, nullable: bool) -> Self {
2726 let data_type = values.data_type().clone();
2727 let schema = Schema::new(vec![Field::new("col", data_type, nullable)]);
2728 Self {
2729 values,
2730 schema: Arc::new(schema),
2731 bloom_filter: false,
2732 bloom_filter_ndv: None,
2733 bloom_filter_position: BloomFilterPosition::AfterRowGroup,
2734 }
2735 }
2736 }
2737
2738 fn one_column_roundtrip(values: ArrayRef, nullable: bool) -> Vec<Bytes> {
2739 one_column_roundtrip_with_options(RoundTripOptions::new(values, nullable))
2740 }
2741
2742 fn one_column_roundtrip_with_schema(values: ArrayRef, schema: SchemaRef) -> Vec<Bytes> {
2743 let mut options = RoundTripOptions::new(values, false);
2744 options.schema = schema;
2745 one_column_roundtrip_with_options(options)
2746 }
2747
2748 fn one_column_roundtrip_with_options(options: RoundTripOptions) -> Vec<Bytes> {
2749 let RoundTripOptions {
2750 values,
2751 schema,
2752 bloom_filter,
2753 bloom_filter_ndv,
2754 bloom_filter_position,
2755 } = options;
2756
2757 let encodings = match values.data_type() {
2758 DataType::Utf8 | DataType::LargeUtf8 | DataType::Binary | DataType::LargeBinary => {
2759 vec![
2760 Encoding::PLAIN,
2761 Encoding::DELTA_BYTE_ARRAY,
2762 Encoding::DELTA_LENGTH_BYTE_ARRAY,
2763 ]
2764 }
2765 DataType::Int64
2766 | DataType::Int32
2767 | DataType::Int16
2768 | DataType::Int8
2769 | DataType::UInt64
2770 | DataType::UInt32
2771 | DataType::UInt16
2772 | DataType::UInt8 => vec![
2773 Encoding::PLAIN,
2774 Encoding::DELTA_BINARY_PACKED,
2775 Encoding::BYTE_STREAM_SPLIT,
2776 ],
2777 DataType::Float32 | DataType::Float64 => {
2778 vec![Encoding::PLAIN, Encoding::BYTE_STREAM_SPLIT]
2779 }
2780 _ => vec![Encoding::PLAIN],
2781 };
2782
2783 let expected_batch = RecordBatch::try_new(schema, vec![values]).unwrap();
2784
2785 let row_group_sizes = [1024, SMALL_SIZE, SMALL_SIZE / 2, SMALL_SIZE / 2 + 1, 10];
2786
2787 let mut files = vec![];
2788 for dictionary_size in [0, 1, 1024] {
2789 for encoding in &encodings {
2790 for version in [WriterVersion::PARQUET_1_0, WriterVersion::PARQUET_2_0] {
2791 for row_group_size in row_group_sizes {
2792 let mut builder = WriterProperties::builder()
2793 .set_writer_version(version)
2794 .set_max_row_group_row_count(Some(row_group_size))
2795 .set_dictionary_enabled(dictionary_size != 0)
2796 .set_dictionary_page_size_limit(dictionary_size.max(1))
2797 .set_encoding(*encoding)
2798 .set_bloom_filter_enabled(bloom_filter)
2799 .set_bloom_filter_position(bloom_filter_position);
2800 if let Some(ndv) = bloom_filter_ndv {
2801 builder = builder.set_bloom_filter_max_ndv(ndv);
2802 }
2803 let props = builder.build();
2804
2805 files.push(roundtrip_opts(&expected_batch, props))
2806 }
2807 }
2808 }
2809 }
2810 files
2811 }
2812
2813 fn values_required<A, I>(iter: I) -> Vec<Bytes>
2814 where
2815 A: From<Vec<I::Item>> + Array + 'static,
2816 I: IntoIterator,
2817 {
2818 let raw_values: Vec<_> = iter.into_iter().collect();
2819 let values = Arc::new(A::from(raw_values));
2820 one_column_roundtrip(values, false)
2821 }
2822
2823 fn values_optional<A, I>(iter: I) -> Vec<Bytes>
2824 where
2825 A: From<Vec<Option<I::Item>>> + Array + 'static,
2826 I: IntoIterator,
2827 {
2828 let optional_raw_values: Vec<_> = iter
2829 .into_iter()
2830 .enumerate()
2831 .map(|(i, v)| if i % 2 == 0 { None } else { Some(v) })
2832 .collect();
2833 let optional_values = Arc::new(A::from(optional_raw_values));
2834 one_column_roundtrip(optional_values, true)
2835 }
2836
2837 fn required_and_optional<A, I>(iter: I)
2838 where
2839 A: From<Vec<I::Item>> + From<Vec<Option<I::Item>>> + Array + 'static,
2840 I: IntoIterator + Clone,
2841 {
2842 values_required::<A, I>(iter.clone());
2843 values_optional::<A, I>(iter);
2844 }
2845
2846 fn check_bloom_filter<T: AsBytes>(
2847 files: Vec<Bytes>,
2848 file_column: String,
2849 positive_values: Vec<T>,
2850 negative_values: Vec<T>,
2851 ) {
2852 files.into_iter().take(1).for_each(|file| {
2853 let file_reader = SerializedFileReader::new_with_options(
2854 file,
2855 ReadOptionsBuilder::new()
2856 .with_reader_properties(
2857 ReaderProperties::builder()
2858 .set_read_bloom_filter(true)
2859 .build(),
2860 )
2861 .build(),
2862 )
2863 .expect("Unable to open file as Parquet");
2864 let metadata = file_reader.metadata();
2865
2866 let mut bloom_filters: Vec<_> = vec![];
2868 for (ri, row_group) in metadata.row_groups().iter().enumerate() {
2869 if let Some((column_index, _)) = row_group
2870 .columns()
2871 .iter()
2872 .enumerate()
2873 .find(|(_, column)| column.column_path().string() == file_column)
2874 {
2875 let row_group_reader = file_reader
2876 .get_row_group(ri)
2877 .expect("Unable to read row group");
2878 if let Some(sbbf) = row_group_reader.get_column_bloom_filter(column_index) {
2879 bloom_filters.push(sbbf.clone());
2880 } else {
2881 panic!("No bloom filter for column named {file_column} found");
2882 }
2883 } else {
2884 panic!("No column named {file_column} found");
2885 }
2886 }
2887
2888 positive_values.iter().for_each(|value| {
2889 let found = bloom_filters.iter().find(|sbbf| sbbf.check(value));
2890 assert!(
2891 found.is_some(),
2892 "{}",
2893 format!("Value {:?} should be in bloom filter", value.as_bytes())
2894 );
2895 });
2896
2897 negative_values.iter().for_each(|value| {
2898 let found = bloom_filters.iter().find(|sbbf| sbbf.check(value));
2899 assert!(
2900 found.is_none(),
2901 "{}",
2902 format!("Value {:?} should not be in bloom filter", value.as_bytes())
2903 );
2904 });
2905 });
2906 }
2907
2908 #[test]
2909 fn all_null_primitive_single_column() {
2910 let values = Arc::new(Int32Array::from(vec![None; SMALL_SIZE]));
2911 one_column_roundtrip(values, true);
2912 }
2913 #[test]
2914 fn null_single_column() {
2915 let values = Arc::new(NullArray::new(SMALL_SIZE));
2916 one_column_roundtrip(values, true);
2917 }
2919
2920 #[test]
2921 fn bool_single_column() {
2922 required_and_optional::<BooleanArray, _>(
2923 [true, false].iter().cycle().copied().take(SMALL_SIZE),
2924 );
2925 }
2926
2927 #[test]
2928 fn bool_large_single_column() {
2929 let values = Arc::new(
2930 [None, Some(true), Some(false)]
2931 .iter()
2932 .cycle()
2933 .copied()
2934 .take(200_000)
2935 .collect::<BooleanArray>(),
2936 );
2937 let schema = Schema::new(vec![Field::new("col", values.data_type().clone(), true)]);
2938 let expected_batch = RecordBatch::try_new(Arc::new(schema), vec![values]).unwrap();
2939 let file = tempfile::tempfile().unwrap();
2940
2941 let mut writer =
2942 ArrowWriter::try_new(file.try_clone().unwrap(), expected_batch.schema(), None)
2943 .expect("Unable to write file");
2944 writer.write(&expected_batch).unwrap();
2945 writer.close().unwrap();
2946 }
2947
2948 #[test]
2949 fn check_page_offset_index_with_nan() {
2950 let values = Arc::new(Float64Array::from(vec![f64::NAN; 10]));
2951 let schema = Schema::new(vec![Field::new("col", DataType::Float64, true)]);
2952 let batch = RecordBatch::try_new(Arc::new(schema), vec![values]).unwrap();
2953
2954 let mut out = Vec::with_capacity(1024);
2955 let mut writer =
2956 ArrowWriter::try_new(&mut out, batch.schema(), None).expect("Unable to write file");
2957 writer.write(&batch).unwrap();
2958 let file_meta_data = writer.close().unwrap();
2959 for row_group in file_meta_data.row_groups() {
2960 for column in row_group.columns() {
2961 assert!(column.offset_index_offset().is_some());
2962 assert!(column.offset_index_length().is_some());
2963 assert!(column.column_index_offset().is_none());
2964 assert!(column.column_index_length().is_none());
2965 }
2966 }
2967 }
2968
2969 #[test]
2970 fn i8_single_column() {
2971 required_and_optional::<Int8Array, _>(0..SMALL_SIZE as i8);
2972 }
2973
2974 #[test]
2975 fn i16_single_column() {
2976 required_and_optional::<Int16Array, _>(0..SMALL_SIZE as i16);
2977 }
2978
2979 #[test]
2980 fn i32_single_column() {
2981 required_and_optional::<Int32Array, _>(0..SMALL_SIZE as i32);
2982 }
2983
2984 #[test]
2985 fn i64_single_column() {
2986 required_and_optional::<Int64Array, _>(0..SMALL_SIZE as i64);
2987 }
2988
2989 #[test]
2990 fn u8_single_column() {
2991 required_and_optional::<UInt8Array, _>(0..SMALL_SIZE as u8);
2992 }
2993
2994 #[test]
2995 fn u16_single_column() {
2996 required_and_optional::<UInt16Array, _>(0..SMALL_SIZE as u16);
2997 }
2998
2999 #[test]
3000 fn u32_single_column() {
3001 required_and_optional::<UInt32Array, _>(0..SMALL_SIZE as u32);
3002 }
3003
3004 #[test]
3005 fn u64_single_column() {
3006 required_and_optional::<UInt64Array, _>(0..SMALL_SIZE as u64);
3007 }
3008
3009 #[test]
3010 fn f32_single_column() {
3011 required_and_optional::<Float32Array, _>((0..SMALL_SIZE).map(|i| i as f32));
3012 }
3013
3014 #[test]
3015 fn f64_single_column() {
3016 required_and_optional::<Float64Array, _>((0..SMALL_SIZE).map(|i| i as f64));
3017 }
3018
3019 #[test]
3024 fn timestamp_second_single_column() {
3025 let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
3026 let values = Arc::new(TimestampSecondArray::from(raw_values));
3027
3028 one_column_roundtrip(values, false);
3029 }
3030
3031 #[test]
3032 fn timestamp_millisecond_single_column() {
3033 let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
3034 let values = Arc::new(TimestampMillisecondArray::from(raw_values));
3035
3036 one_column_roundtrip(values, false);
3037 }
3038
3039 #[test]
3040 fn timestamp_microsecond_single_column() {
3041 let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
3042 let values = Arc::new(TimestampMicrosecondArray::from(raw_values));
3043
3044 one_column_roundtrip(values, false);
3045 }
3046
3047 #[test]
3048 fn timestamp_nanosecond_single_column() {
3049 let raw_values: Vec<_> = (0..SMALL_SIZE as i64).collect();
3050 let values = Arc::new(TimestampNanosecondArray::from(raw_values));
3051
3052 one_column_roundtrip(values, false);
3053 }
3054
3055 #[test]
3056 fn date32_single_column() {
3057 required_and_optional::<Date32Array, _>(0..SMALL_SIZE as i32);
3058 }
3059
3060 #[test]
3061 fn date64_single_column() {
3062 required_and_optional::<Date64Array, _>(
3064 (0..(SMALL_SIZE as i64 * 86400000)).step_by(86400000),
3065 );
3066 }
3067
3068 #[test]
3069 fn time32_second_single_column() {
3070 required_and_optional::<Time32SecondArray, _>(0..SMALL_SIZE as i32);
3071 }
3072
3073 #[test]
3074 fn time32_millisecond_single_column() {
3075 required_and_optional::<Time32MillisecondArray, _>(0..SMALL_SIZE as i32);
3076 }
3077
3078 #[test]
3079 fn time64_microsecond_single_column() {
3080 required_and_optional::<Time64MicrosecondArray, _>(0..SMALL_SIZE as i64);
3081 }
3082
3083 #[test]
3084 fn time64_nanosecond_single_column() {
3085 required_and_optional::<Time64NanosecondArray, _>(0..SMALL_SIZE as i64);
3086 }
3087
3088 #[test]
3089 fn duration_second_single_column() {
3090 required_and_optional::<DurationSecondArray, _>(0..SMALL_SIZE as i64);
3091 }
3092
3093 #[test]
3094 fn duration_millisecond_single_column() {
3095 required_and_optional::<DurationMillisecondArray, _>(0..SMALL_SIZE as i64);
3096 }
3097
3098 #[test]
3099 fn duration_microsecond_single_column() {
3100 required_and_optional::<DurationMicrosecondArray, _>(0..SMALL_SIZE as i64);
3101 }
3102
3103 #[test]
3104 fn duration_nanosecond_single_column() {
3105 required_and_optional::<DurationNanosecondArray, _>(0..SMALL_SIZE as i64);
3106 }
3107
3108 #[test]
3109 fn interval_year_month_single_column() {
3110 required_and_optional::<IntervalYearMonthArray, _>(0..SMALL_SIZE as i32);
3111 }
3112
3113 #[test]
3114 fn interval_day_time_single_column() {
3115 required_and_optional::<IntervalDayTimeArray, _>(vec![
3116 IntervalDayTime::new(0, 1),
3117 IntervalDayTime::new(0, 3),
3118 IntervalDayTime::new(3, -2),
3119 IntervalDayTime::new(-200, 4),
3120 ]);
3121 }
3122
3123 #[test]
3124 #[should_panic(
3125 expected = "Attempting to write an Arrow interval type MonthDayNano to parquet that is not yet implemented"
3126 )]
3127 fn interval_month_day_nano_single_column() {
3128 required_and_optional::<IntervalMonthDayNanoArray, _>(vec![
3129 IntervalMonthDayNano::new(0, 1, 5),
3130 IntervalMonthDayNano::new(0, 3, 2),
3131 IntervalMonthDayNano::new(3, -2, -5),
3132 IntervalMonthDayNano::new(-200, 4, -1),
3133 ]);
3134 }
3135
3136 #[test]
3137 fn binary_single_column() {
3138 let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
3139 let many_vecs: Vec<_> = std::iter::repeat_n(one_vec, SMALL_SIZE).collect();
3140 let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
3141
3142 values_required::<BinaryArray, _>(many_vecs_iter);
3144 }
3145
3146 #[test]
3147 fn binary_view_single_column() {
3148 let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
3149 let many_vecs: Vec<_> = std::iter::repeat_n(one_vec, SMALL_SIZE).collect();
3150 let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
3151
3152 values_required::<BinaryViewArray, _>(many_vecs_iter);
3154 }
3155
3156 #[test]
3157 fn i32_column_bloom_filter_at_end() {
3158 let array = Arc::new(Int32Array::from_iter(0..SMALL_SIZE as i32));
3159 let mut options = RoundTripOptions::new(array, false);
3160 options.bloom_filter = true;
3161 options.bloom_filter_position = BloomFilterPosition::End;
3162
3163 let files = one_column_roundtrip_with_options(options);
3164 check_bloom_filter(
3165 files,
3166 "col".to_string(),
3167 (0..SMALL_SIZE as i32).collect(),
3168 (SMALL_SIZE as i32 + 1..SMALL_SIZE as i32 + 10).collect(),
3169 );
3170 }
3171
3172 #[test]
3173 fn i32_column_bloom_filter() {
3174 let array = Arc::new(Int32Array::from_iter(0..SMALL_SIZE as i32));
3175 let mut options = RoundTripOptions::new(array, false);
3176 options.bloom_filter = true;
3177
3178 let files = one_column_roundtrip_with_options(options);
3179 check_bloom_filter(
3180 files,
3181 "col".to_string(),
3182 (0..SMALL_SIZE as i32).collect(),
3183 (SMALL_SIZE as i32 + 1..SMALL_SIZE as i32 + 10).collect(),
3184 );
3185 }
3186
3187 #[test]
3192 fn i32_column_bloom_filter_fixed_ndv() {
3193 let array = Arc::new(Int32Array::from_iter(0..SMALL_SIZE as i32));
3194
3195 let mut options = RoundTripOptions::new(array.clone(), false);
3197 options.bloom_filter = true;
3198 options.bloom_filter_ndv = Some(1_000_000);
3199
3200 let files = one_column_roundtrip_with_options(options);
3201 check_bloom_filter(
3202 files,
3203 "col".to_string(),
3204 (0..SMALL_SIZE as i32).collect(),
3205 (SMALL_SIZE as i32 + 1..SMALL_SIZE as i32 + 10).collect(),
3206 );
3207
3208 let mut options = RoundTripOptions::new(array, false);
3210 options.bloom_filter = true;
3211 options.bloom_filter_ndv = Some(3);
3212
3213 let files = one_column_roundtrip_with_options(options);
3214 check_bloom_filter(
3215 files,
3216 "col".to_string(),
3217 (0..SMALL_SIZE as i32).collect(),
3218 (SMALL_SIZE as i32 + 1..SMALL_SIZE as i32 + 10).collect(),
3219 );
3220 }
3221
3222 #[test]
3223 fn binary_column_bloom_filter() {
3224 let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
3225 let many_vecs: Vec<_> = std::iter::repeat_n(one_vec, SMALL_SIZE).collect();
3226 let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
3227
3228 let array = Arc::new(BinaryArray::from_iter_values(many_vecs_iter));
3229 let mut options = RoundTripOptions::new(array, false);
3230 options.bloom_filter = true;
3231
3232 let files = one_column_roundtrip_with_options(options);
3233 check_bloom_filter(
3234 files,
3235 "col".to_string(),
3236 many_vecs,
3237 vec![vec![(SMALL_SIZE + 1) as u8]],
3238 );
3239 }
3240
3241 #[test]
3242 fn empty_string_null_column_bloom_filter() {
3243 let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
3244 let raw_strs = raw_values.iter().map(|s| s.as_str());
3245
3246 let array = Arc::new(StringArray::from_iter_values(raw_strs));
3247 let mut options = RoundTripOptions::new(array, false);
3248 options.bloom_filter = true;
3249
3250 let files = one_column_roundtrip_with_options(options);
3251
3252 let optional_raw_values: Vec<_> = raw_values
3253 .iter()
3254 .enumerate()
3255 .filter_map(|(i, v)| if i % 2 == 0 { None } else { Some(v.as_str()) })
3256 .collect();
3257 check_bloom_filter(files, "col".to_string(), optional_raw_values, vec![""]);
3259 }
3260
3261 #[test]
3262 fn large_binary_single_column() {
3263 let one_vec: Vec<u8> = (0..SMALL_SIZE as u8).collect();
3264 let many_vecs: Vec<_> = std::iter::repeat_n(one_vec, SMALL_SIZE).collect();
3265 let many_vecs_iter = many_vecs.iter().map(|v| v.as_slice());
3266
3267 values_required::<LargeBinaryArray, _>(many_vecs_iter);
3269 }
3270
3271 #[test]
3272 fn fixed_size_binary_single_column() {
3273 let mut builder = FixedSizeBinaryBuilder::new(4);
3274 builder.append_value(b"0123").unwrap();
3275 builder.append_null();
3276 builder.append_value(b"8910").unwrap();
3277 builder.append_value(b"1112").unwrap();
3278 let array = Arc::new(builder.finish());
3279
3280 one_column_roundtrip(array, true);
3281 }
3282
3283 #[test]
3284 fn string_single_column() {
3285 let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
3286 let raw_strs = raw_values.iter().map(|s| s.as_str());
3287
3288 required_and_optional::<StringArray, _>(raw_strs);
3289 }
3290
3291 #[test]
3292 fn large_string_single_column() {
3293 let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
3294 let raw_strs = raw_values.iter().map(|s| s.as_str());
3295
3296 required_and_optional::<LargeStringArray, _>(raw_strs);
3297 }
3298
3299 #[test]
3300 fn string_view_single_column() {
3301 let raw_values: Vec<_> = (0..SMALL_SIZE).map(|i| i.to_string()).collect();
3302 let raw_strs = raw_values.iter().map(|s| s.as_str());
3303
3304 required_and_optional::<StringViewArray, _>(raw_strs);
3305 }
3306
3307 #[test]
3308 fn null_list_single_column() {
3309 let null_field = Field::new_list_field(DataType::Null, true);
3310 let list_field = Field::new("emptylist", DataType::List(Arc::new(null_field)), true);
3311
3312 let schema = Schema::new(vec![list_field]);
3313
3314 let a_values = NullArray::new(2);
3316 let a_value_offsets = arrow::buffer::Buffer::from([0, 0, 0, 2].to_byte_slice());
3317 let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
3318 DataType::Null,
3319 true,
3320 ))))
3321 .len(3)
3322 .add_buffer(a_value_offsets)
3323 .null_bit_buffer(Some(Buffer::from([0b00000101])))
3324 .add_child_data(a_values.into_data())
3325 .build()
3326 .unwrap();
3327
3328 let a = ListArray::from(a_list_data);
3329
3330 assert!(a.is_valid(0));
3331 assert!(!a.is_valid(1));
3332 assert!(a.is_valid(2));
3333
3334 assert_eq!(a.value(0).len(), 0);
3335 assert_eq!(a.value(2).len(), 2);
3336 assert_eq!(a.value(2).logical_nulls().unwrap().null_count(), 2);
3337
3338 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
3339 roundtrip(batch, None);
3340 }
3341
3342 #[test]
3343 fn list_single_column() {
3344 let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
3345 let a_value_offsets = arrow::buffer::Buffer::from([0, 1, 3, 3, 6, 10].to_byte_slice());
3346 let a_list_data = ArrayData::builder(DataType::List(Arc::new(Field::new_list_field(
3347 DataType::Int32,
3348 false,
3349 ))))
3350 .len(5)
3351 .add_buffer(a_value_offsets)
3352 .null_bit_buffer(Some(Buffer::from([0b00011011])))
3353 .add_child_data(a_values.into_data())
3354 .build()
3355 .unwrap();
3356
3357 assert_eq!(a_list_data.null_count(), 1);
3358
3359 let a = ListArray::from(a_list_data);
3360 let values = Arc::new(a);
3361
3362 one_column_roundtrip(values, true);
3363 }
3364
3365 #[test]
3366 fn large_list_single_column() {
3367 let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
3368 let a_value_offsets = arrow::buffer::Buffer::from([0i64, 1, 3, 3, 6, 10].to_byte_slice());
3369 let a_list_data = ArrayData::builder(DataType::LargeList(Arc::new(Field::new(
3370 "large_item",
3371 DataType::Int32,
3372 true,
3373 ))))
3374 .len(5)
3375 .add_buffer(a_value_offsets)
3376 .add_child_data(a_values.into_data())
3377 .null_bit_buffer(Some(Buffer::from([0b00011011])))
3378 .build()
3379 .unwrap();
3380
3381 assert_eq!(a_list_data.null_count(), 1);
3383
3384 let a = LargeListArray::from(a_list_data);
3385 let values = Arc::new(a);
3386
3387 one_column_roundtrip(values, true);
3388 }
3389
3390 #[test]
3391 fn list_nested_nulls() {
3392 use arrow::datatypes::Int32Type;
3393 let data = vec![
3394 Some(vec![Some(1)]),
3395 Some(vec![Some(2), Some(3)]),
3396 None,
3397 Some(vec![Some(4), Some(5), None]),
3398 Some(vec![None]),
3399 Some(vec![Some(6), Some(7)]),
3400 ];
3401
3402 let list = ListArray::from_iter_primitive::<Int32Type, _, _>(data.clone());
3403 one_column_roundtrip(Arc::new(list), true);
3404
3405 let list = LargeListArray::from_iter_primitive::<Int32Type, _, _>(data);
3406 one_column_roundtrip(Arc::new(list), true);
3407 }
3408
3409 #[test]
3410 fn struct_single_column() {
3411 let a_values = Int32Array::from(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
3412 let struct_field_a = Arc::new(Field::new("f", DataType::Int32, false));
3413 let s = StructArray::from(vec![(struct_field_a, Arc::new(a_values) as ArrayRef)]);
3414
3415 let values = Arc::new(s);
3416 one_column_roundtrip(values, false);
3417 }
3418
3419 #[test]
3420 fn list_and_map_coerced_names() {
3421 let list_field =
3423 Field::new_list("my_list", Field::new("item", DataType::Int32, false), false);
3424 let map_field = Field::new_map(
3425 "my_map",
3426 "entries",
3427 Field::new("keys", DataType::Int32, false),
3428 Field::new("values", DataType::Int32, true),
3429 false,
3430 true,
3431 );
3432
3433 let list_array = create_random_array(&list_field, 100, 0.0, 0.0).unwrap();
3434 let map_array = create_random_array(&map_field, 100, 0.0, 0.0).unwrap();
3435
3436 let arrow_schema = Arc::new(Schema::new(vec![list_field, map_field]));
3437
3438 let props = Some(WriterProperties::builder().set_coerce_types(true).build());
3440 let file = tempfile::tempfile().unwrap();
3441 let mut writer =
3442 ArrowWriter::try_new(file.try_clone().unwrap(), arrow_schema.clone(), props).unwrap();
3443
3444 let batch = RecordBatch::try_new(arrow_schema, vec![list_array, map_array]).unwrap();
3445 writer.write(&batch).unwrap();
3446 let file_metadata = writer.close().unwrap();
3447
3448 let schema = file_metadata.file_metadata().schema();
3449 let list_field = &schema.get_fields()[0].get_fields()[0];
3451 assert_eq!(list_field.get_fields()[0].name(), "element");
3452
3453 let map_field = &schema.get_fields()[1].get_fields()[0];
3454 assert_eq!(map_field.name(), "key_value");
3456 assert_eq!(map_field.get_fields()[0].name(), "key");
3458 assert_eq!(map_field.get_fields()[1].name(), "value");
3460
3461 let reader = SerializedFileReader::new(file).unwrap();
3463 let file_schema = reader.metadata().file_metadata().schema();
3464 let fields = file_schema.get_fields();
3465 let list_field = &fields[0].get_fields()[0];
3466 assert_eq!(list_field.get_fields()[0].name(), "element");
3467 let map_field = &fields[1].get_fields()[0];
3468 assert_eq!(map_field.name(), "key_value");
3469 assert_eq!(map_field.get_fields()[0].name(), "key");
3470 assert_eq!(map_field.get_fields()[1].name(), "value");
3471 }
3472
3473 #[test]
3474 fn fallback_flush_data_page() {
3475 let raw_values: Vec<_> = (0..MEDIUM_SIZE).map(|i| i.to_string()).collect();
3477 let values = Arc::new(StringArray::from(raw_values));
3478 let encodings = vec![
3479 Encoding::DELTA_BYTE_ARRAY,
3480 Encoding::DELTA_LENGTH_BYTE_ARRAY,
3481 ];
3482 let data_type = values.data_type().clone();
3483 let schema = Arc::new(Schema::new(vec![Field::new("col", data_type, false)]));
3484 let expected_batch = RecordBatch::try_new(schema, vec![values]).unwrap();
3485
3486 let row_group_sizes = [1024, SMALL_SIZE, SMALL_SIZE / 2, SMALL_SIZE / 2 + 1, 10];
3487 let data_page_size_limit: usize = 32;
3488 let write_batch_size: usize = 16;
3489
3490 for encoding in &encodings {
3491 for row_group_size in row_group_sizes {
3492 let props = WriterProperties::builder()
3493 .set_writer_version(WriterVersion::PARQUET_2_0)
3494 .set_max_row_group_row_count(Some(row_group_size))
3495 .set_dictionary_enabled(false)
3496 .set_encoding(*encoding)
3497 .set_data_page_size_limit(data_page_size_limit)
3498 .set_write_batch_size(write_batch_size)
3499 .build();
3500
3501 roundtrip_opts_with_array_validation(&expected_batch, props, |a, b| {
3502 let string_array_a = StringArray::from(a.clone());
3503 let string_array_b = StringArray::from(b.clone());
3504 let vec_a: Vec<&str> = string_array_a.iter().map(|v| v.unwrap()).collect();
3505 let vec_b: Vec<&str> = string_array_b.iter().map(|v| v.unwrap()).collect();
3506 assert_eq!(
3507 vec_a, vec_b,
3508 "failed for encoder: {encoding:?} and row_group_size: {row_group_size:?}"
3509 );
3510 });
3511 }
3512 }
3513 }
3514
3515 #[test]
3516 fn arrow_writer_string_dictionary() {
3517 #[allow(deprecated)]
3519 let schema = Arc::new(Schema::new(vec![Field::new_dict(
3520 "dictionary",
3521 DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8)),
3522 true,
3523 42,
3524 true,
3525 )]));
3526
3527 let d: Int32DictionaryArray = [Some("alpha"), None, Some("beta"), Some("alpha")]
3529 .iter()
3530 .copied()
3531 .collect();
3532
3533 one_column_roundtrip_with_schema(Arc::new(d), schema);
3535 }
3536
3537 #[test]
3538 fn arrow_writer_test_type_compatibility() {
3539 fn ensure_compatible_write<T1, T2>(array1: T1, array2: T2, expected_result: T1)
3540 where
3541 T1: Array + 'static,
3542 T2: Array + 'static,
3543 {
3544 let schema1 = Arc::new(Schema::new(vec![Field::new(
3545 "a",
3546 array1.data_type().clone(),
3547 false,
3548 )]));
3549
3550 let file = tempfile().unwrap();
3551 let mut writer =
3552 ArrowWriter::try_new(file.try_clone().unwrap(), schema1.clone(), None).unwrap();
3553
3554 let rb1 = RecordBatch::try_new(schema1.clone(), vec![Arc::new(array1)]).unwrap();
3555 writer.write(&rb1).unwrap();
3556
3557 let schema2 = Arc::new(Schema::new(vec![Field::new(
3558 "a",
3559 array2.data_type().clone(),
3560 false,
3561 )]));
3562 let rb2 = RecordBatch::try_new(schema2, vec![Arc::new(array2)]).unwrap();
3563 writer.write(&rb2).unwrap();
3564
3565 writer.close().unwrap();
3566
3567 let mut record_batch_reader =
3568 ParquetRecordBatchReader::try_new(file.try_clone().unwrap(), 1024).unwrap();
3569 let actual_batch = record_batch_reader.next().unwrap().unwrap();
3570
3571 let expected_batch =
3572 RecordBatch::try_new(schema1, vec![Arc::new(expected_result)]).unwrap();
3573 assert_eq!(actual_batch, expected_batch);
3574 }
3575
3576 ensure_compatible_write(
3579 DictionaryArray::new(
3580 UInt8Array::from_iter_values(vec![0]),
3581 Arc::new(StringArray::from_iter_values(vec!["parquet"])),
3582 ),
3583 StringArray::from_iter_values(vec!["barquet"]),
3584 DictionaryArray::new(
3585 UInt8Array::from_iter_values(vec![0, 1]),
3586 Arc::new(StringArray::from_iter_values(vec!["parquet", "barquet"])),
3587 ),
3588 );
3589
3590 ensure_compatible_write(
3591 StringArray::from_iter_values(vec!["parquet"]),
3592 DictionaryArray::new(
3593 UInt8Array::from_iter_values(vec![0]),
3594 Arc::new(StringArray::from_iter_values(vec!["barquet"])),
3595 ),
3596 StringArray::from_iter_values(vec!["parquet", "barquet"]),
3597 );
3598
3599 ensure_compatible_write(
3602 DictionaryArray::new(
3603 UInt8Array::from_iter_values(vec![0]),
3604 Arc::new(StringArray::from_iter_values(vec!["parquet"])),
3605 ),
3606 DictionaryArray::new(
3607 UInt16Array::from_iter_values(vec![0]),
3608 Arc::new(StringArray::from_iter_values(vec!["barquet"])),
3609 ),
3610 DictionaryArray::new(
3611 UInt8Array::from_iter_values(vec![0, 1]),
3612 Arc::new(StringArray::from_iter_values(vec!["parquet", "barquet"])),
3613 ),
3614 );
3615
3616 ensure_compatible_write(
3618 DictionaryArray::new(
3619 UInt8Array::from_iter_values(vec![0]),
3620 Arc::new(StringArray::from_iter_values(vec!["parquet"])),
3621 ),
3622 DictionaryArray::new(
3623 UInt8Array::from_iter_values(vec![0]),
3624 Arc::new(LargeStringArray::from_iter_values(vec!["barquet"])),
3625 ),
3626 DictionaryArray::new(
3627 UInt8Array::from_iter_values(vec![0, 1]),
3628 Arc::new(StringArray::from_iter_values(vec!["parquet", "barquet"])),
3629 ),
3630 );
3631
3632 ensure_compatible_write(
3634 DictionaryArray::new(
3635 UInt8Array::from_iter_values(vec![0]),
3636 Arc::new(StringArray::from_iter_values(vec!["parquet"])),
3637 ),
3638 LargeStringArray::from_iter_values(vec!["barquet"]),
3639 DictionaryArray::new(
3640 UInt8Array::from_iter_values(vec![0, 1]),
3641 Arc::new(StringArray::from_iter_values(vec!["parquet", "barquet"])),
3642 ),
3643 );
3644
3645 ensure_compatible_write(
3648 StringArray::from_iter_values(vec!["parquet"]),
3649 LargeStringArray::from_iter_values(vec!["barquet"]),
3650 StringArray::from_iter_values(vec!["parquet", "barquet"]),
3651 );
3652
3653 ensure_compatible_write(
3654 LargeStringArray::from_iter_values(vec!["parquet"]),
3655 StringArray::from_iter_values(vec!["barquet"]),
3656 LargeStringArray::from_iter_values(vec!["parquet", "barquet"]),
3657 );
3658
3659 ensure_compatible_write(
3660 StringArray::from_iter_values(vec!["parquet"]),
3661 StringViewArray::from_iter_values(vec!["barquet"]),
3662 StringArray::from_iter_values(vec!["parquet", "barquet"]),
3663 );
3664
3665 ensure_compatible_write(
3666 StringViewArray::from_iter_values(vec!["parquet"]),
3667 StringArray::from_iter_values(vec!["barquet"]),
3668 StringViewArray::from_iter_values(vec!["parquet", "barquet"]),
3669 );
3670
3671 ensure_compatible_write(
3672 LargeStringArray::from_iter_values(vec!["parquet"]),
3673 StringViewArray::from_iter_values(vec!["barquet"]),
3674 LargeStringArray::from_iter_values(vec!["parquet", "barquet"]),
3675 );
3676
3677 ensure_compatible_write(
3678 StringViewArray::from_iter_values(vec!["parquet"]),
3679 LargeStringArray::from_iter_values(vec!["barquet"]),
3680 StringViewArray::from_iter_values(vec!["parquet", "barquet"]),
3681 );
3682
3683 ensure_compatible_write(
3686 BinaryArray::from_iter_values(vec![b"parquet"]),
3687 LargeBinaryArray::from_iter_values(vec![b"barquet"]),
3688 BinaryArray::from_iter_values(vec![b"parquet", b"barquet"]),
3689 );
3690
3691 ensure_compatible_write(
3692 LargeBinaryArray::from_iter_values(vec![b"parquet"]),
3693 BinaryArray::from_iter_values(vec![b"barquet"]),
3694 LargeBinaryArray::from_iter_values(vec![b"parquet", b"barquet"]),
3695 );
3696
3697 ensure_compatible_write(
3698 BinaryArray::from_iter_values(vec![b"parquet"]),
3699 BinaryViewArray::from_iter_values(vec![b"barquet"]),
3700 BinaryArray::from_iter_values(vec![b"parquet", b"barquet"]),
3701 );
3702
3703 ensure_compatible_write(
3704 BinaryViewArray::from_iter_values(vec![b"parquet"]),
3705 BinaryArray::from_iter_values(vec![b"barquet"]),
3706 BinaryViewArray::from_iter_values(vec![b"parquet", b"barquet"]),
3707 );
3708
3709 ensure_compatible_write(
3710 BinaryViewArray::from_iter_values(vec![b"parquet"]),
3711 LargeBinaryArray::from_iter_values(vec![b"barquet"]),
3712 BinaryViewArray::from_iter_values(vec![b"parquet", b"barquet"]),
3713 );
3714
3715 ensure_compatible_write(
3716 LargeBinaryArray::from_iter_values(vec![b"parquet"]),
3717 BinaryViewArray::from_iter_values(vec![b"barquet"]),
3718 LargeBinaryArray::from_iter_values(vec![b"parquet", b"barquet"]),
3719 );
3720
3721 let list_field_metadata = HashMap::from_iter(vec![(
3724 PARQUET_FIELD_ID_META_KEY.to_string(),
3725 "1".to_string(),
3726 )]);
3727 let list_field = Field::new_list_field(DataType::Int32, false);
3728
3729 let values1 = Arc::new(Int32Array::from(vec![0, 1, 2, 3, 4]));
3730 let offsets1 = OffsetBuffer::new(vec![0, 2, 5].into());
3731
3732 let values2 = Arc::new(Int32Array::from(vec![5, 6, 7, 8, 9]));
3733 let offsets2 = OffsetBuffer::new(vec![0, 3, 5].into());
3734
3735 let values_expected = Arc::new(Int32Array::from(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
3736 let offsets_expected = OffsetBuffer::new(vec![0, 2, 5, 8, 10].into());
3737
3738 ensure_compatible_write(
3739 ListArray::try_new(
3741 Arc::new(
3742 list_field
3743 .clone()
3744 .with_metadata(list_field_metadata.clone()),
3745 ),
3746 offsets1,
3747 values1,
3748 None,
3749 )
3750 .unwrap(),
3751 ListArray::try_new(Arc::new(list_field.clone()), offsets2, values2, None).unwrap(),
3753 ListArray::try_new(
3755 Arc::new(
3756 list_field
3757 .clone()
3758 .with_metadata(list_field_metadata.clone()),
3759 ),
3760 offsets_expected,
3761 values_expected,
3762 None,
3763 )
3764 .unwrap(),
3765 );
3766 }
3767
3768 #[test]
3769 fn arrow_writer_primitive_dictionary() {
3770 #[allow(deprecated)]
3772 let schema = Arc::new(Schema::new(vec![Field::new_dict(
3773 "dictionary",
3774 DataType::Dictionary(Box::new(DataType::UInt8), Box::new(DataType::UInt32)),
3775 true,
3776 42,
3777 true,
3778 )]));
3779
3780 let mut builder = PrimitiveDictionaryBuilder::<UInt8Type, UInt32Type>::new();
3782 builder.append(12345678).unwrap();
3783 builder.append_null();
3784 builder.append(22345678).unwrap();
3785 builder.append(12345678).unwrap();
3786 let d = builder.finish();
3787
3788 one_column_roundtrip_with_schema(Arc::new(d), schema);
3789 }
3790
3791 #[test]
3792 fn arrow_writer_decimal32_dictionary() {
3793 let integers = vec![12345, 56789, 34567];
3794
3795 let keys = UInt8Array::from(vec![Some(0), None, Some(1), Some(2), Some(1)]);
3796
3797 let values = Decimal32Array::from(integers.clone())
3798 .with_precision_and_scale(5, 2)
3799 .unwrap();
3800
3801 let array = DictionaryArray::new(keys, Arc::new(values));
3802 one_column_roundtrip(Arc::new(array.clone()), true);
3803
3804 let values = Decimal32Array::from(integers)
3805 .with_precision_and_scale(9, 2)
3806 .unwrap();
3807
3808 let array = array.with_values(Arc::new(values));
3809 one_column_roundtrip(Arc::new(array), true);
3810 }
3811
3812 #[test]
3813 fn arrow_writer_decimal64_dictionary() {
3814 let integers = vec![12345, 56789, 34567];
3815
3816 let keys = UInt8Array::from(vec![Some(0), None, Some(1), Some(2), Some(1)]);
3817
3818 let values = Decimal64Array::from(integers.clone())
3819 .with_precision_and_scale(5, 2)
3820 .unwrap();
3821
3822 let array = DictionaryArray::new(keys, Arc::new(values));
3823 one_column_roundtrip(Arc::new(array.clone()), true);
3824
3825 let values = Decimal64Array::from(integers)
3826 .with_precision_and_scale(12, 2)
3827 .unwrap();
3828
3829 let array = array.with_values(Arc::new(values));
3830 one_column_roundtrip(Arc::new(array), true);
3831 }
3832
3833 #[test]
3834 fn arrow_writer_decimal128_dictionary() {
3835 let integers = vec![12345, 56789, 34567];
3836
3837 let keys = UInt8Array::from(vec![Some(0), None, Some(1), Some(2), Some(1)]);
3838
3839 let values = Decimal128Array::from(integers.clone())
3840 .with_precision_and_scale(5, 2)
3841 .unwrap();
3842
3843 let array = DictionaryArray::new(keys, Arc::new(values));
3844 one_column_roundtrip(Arc::new(array.clone()), true);
3845
3846 let values = Decimal128Array::from(integers)
3847 .with_precision_and_scale(12, 2)
3848 .unwrap();
3849
3850 let array = array.with_values(Arc::new(values));
3851 one_column_roundtrip(Arc::new(array), true);
3852 }
3853
3854 #[test]
3855 fn arrow_writer_decimal256_dictionary() {
3856 let integers = vec![
3857 i256::from_i128(12345),
3858 i256::from_i128(56789),
3859 i256::from_i128(34567),
3860 ];
3861
3862 let keys = UInt8Array::from(vec![Some(0), None, Some(1), Some(2), Some(1)]);
3863
3864 let values = Decimal256Array::from(integers.clone())
3865 .with_precision_and_scale(5, 2)
3866 .unwrap();
3867
3868 let array = DictionaryArray::new(keys, Arc::new(values));
3869 one_column_roundtrip(Arc::new(array.clone()), true);
3870
3871 let values = Decimal256Array::from(integers)
3872 .with_precision_and_scale(12, 2)
3873 .unwrap();
3874
3875 let array = array.with_values(Arc::new(values));
3876 one_column_roundtrip(Arc::new(array), true);
3877 }
3878
3879 #[test]
3880 fn arrow_writer_string_dictionary_unsigned_index() {
3881 #[allow(deprecated)]
3883 let schema = Arc::new(Schema::new(vec![Field::new_dict(
3884 "dictionary",
3885 DataType::Dictionary(Box::new(DataType::UInt8), Box::new(DataType::Utf8)),
3886 true,
3887 42,
3888 true,
3889 )]));
3890
3891 let d: UInt8DictionaryArray = [Some("alpha"), None, Some("beta"), Some("alpha")]
3893 .iter()
3894 .copied()
3895 .collect();
3896
3897 one_column_roundtrip_with_schema(Arc::new(d), schema);
3898 }
3899
3900 #[test]
3901 fn u32_min_max() {
3902 let src = [
3904 u32::MIN,
3905 u32::MIN + 1,
3906 (i32::MAX as u32) - 1,
3907 i32::MAX as u32,
3908 (i32::MAX as u32) + 1,
3909 u32::MAX - 1,
3910 u32::MAX,
3911 ];
3912 let values = Arc::new(UInt32Array::from_iter_values(src.iter().cloned()));
3913 let files = one_column_roundtrip(values, false);
3914
3915 for file in files {
3916 let reader = SerializedFileReader::new(file).unwrap();
3918 let metadata = reader.metadata();
3919
3920 let mut row_offset = 0;
3921 for row_group in metadata.row_groups() {
3922 assert_eq!(row_group.num_columns(), 1);
3923 let column = row_group.column(0);
3924
3925 let num_values = column.num_values() as usize;
3926 let src_slice = &src[row_offset..row_offset + num_values];
3927 row_offset += column.num_values() as usize;
3928
3929 let stats = column.statistics().unwrap();
3930 if let Statistics::Int32(stats) = stats {
3931 assert_eq!(
3932 *stats.min_opt().unwrap() as u32,
3933 *src_slice.iter().min().unwrap()
3934 );
3935 assert_eq!(
3936 *stats.max_opt().unwrap() as u32,
3937 *src_slice.iter().max().unwrap()
3938 );
3939 } else {
3940 panic!("Statistics::Int32 missing")
3941 }
3942 }
3943 }
3944 }
3945
3946 #[test]
3947 fn u64_min_max() {
3948 let src = [
3950 u64::MIN,
3951 u64::MIN + 1,
3952 (i64::MAX as u64) - 1,
3953 i64::MAX as u64,
3954 (i64::MAX as u64) + 1,
3955 u64::MAX - 1,
3956 u64::MAX,
3957 ];
3958 let values = Arc::new(UInt64Array::from_iter_values(src.iter().cloned()));
3959 let files = one_column_roundtrip(values, false);
3960
3961 for file in files {
3962 let reader = SerializedFileReader::new(file).unwrap();
3964 let metadata = reader.metadata();
3965
3966 let mut row_offset = 0;
3967 for row_group in metadata.row_groups() {
3968 assert_eq!(row_group.num_columns(), 1);
3969 let column = row_group.column(0);
3970
3971 let num_values = column.num_values() as usize;
3972 let src_slice = &src[row_offset..row_offset + num_values];
3973 row_offset += column.num_values() as usize;
3974
3975 let stats = column.statistics().unwrap();
3976 if let Statistics::Int64(stats) = stats {
3977 assert_eq!(
3978 *stats.min_opt().unwrap() as u64,
3979 *src_slice.iter().min().unwrap()
3980 );
3981 assert_eq!(
3982 *stats.max_opt().unwrap() as u64,
3983 *src_slice.iter().max().unwrap()
3984 );
3985 } else {
3986 panic!("Statistics::Int64 missing")
3987 }
3988 }
3989 }
3990 }
3991
3992 #[test]
3993 fn statistics_null_counts_only_nulls() {
3994 let values = Arc::new(UInt64Array::from(vec![None, None]));
3996 let files = one_column_roundtrip(values, true);
3997
3998 for file in files {
3999 let reader = SerializedFileReader::new(file).unwrap();
4001 let metadata = reader.metadata();
4002 assert_eq!(metadata.num_row_groups(), 1);
4003 let row_group = metadata.row_group(0);
4004 assert_eq!(row_group.num_columns(), 1);
4005 let column = row_group.column(0);
4006 let stats = column.statistics().unwrap();
4007 assert_eq!(stats.null_count_opt(), Some(2));
4008 }
4009 }
4010
4011 #[test]
4012 fn test_list_of_struct_roundtrip() {
4013 let int_field = Field::new("a", DataType::Int32, true);
4015 let int_field2 = Field::new("b", DataType::Int32, true);
4016
4017 let int_builder = Int32Builder::with_capacity(10);
4018 let int_builder2 = Int32Builder::with_capacity(10);
4019
4020 let struct_builder = StructBuilder::new(
4021 vec![int_field, int_field2],
4022 vec![Box::new(int_builder), Box::new(int_builder2)],
4023 );
4024 let mut list_builder = ListBuilder::new(struct_builder);
4025
4026 let values = list_builder.values();
4031 values
4032 .field_builder::<Int32Builder>(0)
4033 .unwrap()
4034 .append_value(1);
4035 values
4036 .field_builder::<Int32Builder>(1)
4037 .unwrap()
4038 .append_value(2);
4039 values.append(true);
4040 list_builder.append(true);
4041
4042 list_builder.append(true);
4044
4045 list_builder.append(false);
4047
4048 let values = list_builder.values();
4050 values
4051 .field_builder::<Int32Builder>(0)
4052 .unwrap()
4053 .append_null();
4054 values
4055 .field_builder::<Int32Builder>(1)
4056 .unwrap()
4057 .append_null();
4058 values.append(false);
4059 values
4060 .field_builder::<Int32Builder>(0)
4061 .unwrap()
4062 .append_null();
4063 values
4064 .field_builder::<Int32Builder>(1)
4065 .unwrap()
4066 .append_null();
4067 values.append(false);
4068 list_builder.append(true);
4069
4070 let values = list_builder.values();
4072 values
4073 .field_builder::<Int32Builder>(0)
4074 .unwrap()
4075 .append_null();
4076 values
4077 .field_builder::<Int32Builder>(1)
4078 .unwrap()
4079 .append_value(3);
4080 values.append(true);
4081 list_builder.append(true);
4082
4083 let values = list_builder.values();
4085 values
4086 .field_builder::<Int32Builder>(0)
4087 .unwrap()
4088 .append_value(2);
4089 values
4090 .field_builder::<Int32Builder>(1)
4091 .unwrap()
4092 .append_null();
4093 values.append(true);
4094 list_builder.append(true);
4095
4096 let array = Arc::new(list_builder.finish());
4097
4098 one_column_roundtrip(array, true);
4099 }
4100
4101 fn row_group_sizes(metadata: &ParquetMetaData) -> Vec<i64> {
4102 metadata.row_groups().iter().map(|x| x.num_rows()).collect()
4103 }
4104
4105 #[test]
4106 fn test_aggregates_records() {
4107 let arrays = [
4108 Int32Array::from((0..100).collect::<Vec<_>>()),
4109 Int32Array::from((0..50).collect::<Vec<_>>()),
4110 Int32Array::from((200..500).collect::<Vec<_>>()),
4111 ];
4112
4113 let schema = Arc::new(Schema::new(vec![Field::new(
4114 "int",
4115 ArrowDataType::Int32,
4116 false,
4117 )]));
4118
4119 let file = tempfile::tempfile().unwrap();
4120
4121 let props = WriterProperties::builder()
4122 .set_max_row_group_row_count(Some(200))
4123 .build();
4124
4125 let mut writer =
4126 ArrowWriter::try_new(file.try_clone().unwrap(), schema.clone(), Some(props)).unwrap();
4127
4128 for array in arrays {
4129 let batch = RecordBatch::try_new(schema.clone(), vec![Arc::new(array)]).unwrap();
4130 writer.write(&batch).unwrap();
4131 }
4132
4133 writer.close().unwrap();
4134
4135 let builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
4136 assert_eq!(&row_group_sizes(builder.metadata()), &[200, 200, 50]);
4137
4138 let batches = builder
4139 .with_batch_size(100)
4140 .build()
4141 .unwrap()
4142 .collect::<ArrowResult<Vec<_>>>()
4143 .unwrap();
4144
4145 assert_eq!(batches.len(), 5);
4146 assert!(batches.iter().all(|x| x.num_columns() == 1));
4147
4148 let batch_sizes: Vec<_> = batches.iter().map(|x| x.num_rows()).collect();
4149
4150 assert_eq!(&batch_sizes, &[100, 100, 100, 100, 50]);
4151
4152 let values: Vec<_> = batches
4153 .iter()
4154 .flat_map(|x| {
4155 x.column(0)
4156 .as_any()
4157 .downcast_ref::<Int32Array>()
4158 .unwrap()
4159 .values()
4160 .iter()
4161 .cloned()
4162 })
4163 .collect();
4164
4165 let expected_values: Vec<_> = [0..100, 0..50, 200..500].into_iter().flatten().collect();
4166 assert_eq!(&values, &expected_values)
4167 }
4168
4169 #[test]
4170 fn complex_aggregate() {
4171 let field_a = Arc::new(Field::new("leaf_a", DataType::Int32, false));
4173 let field_b = Arc::new(Field::new("leaf_b", DataType::Int32, true));
4174 let struct_a = Arc::new(Field::new(
4175 "struct_a",
4176 DataType::Struct(vec![field_a.clone(), field_b.clone()].into()),
4177 true,
4178 ));
4179
4180 let list_a = Arc::new(Field::new("list", DataType::List(struct_a), true));
4181 let struct_b = Arc::new(Field::new(
4182 "struct_b",
4183 DataType::Struct(vec![list_a.clone()].into()),
4184 false,
4185 ));
4186
4187 let schema = Arc::new(Schema::new(vec![struct_b]));
4188
4189 let field_a_array = Int32Array::from(vec![1, 2, 3, 4, 5, 6]);
4191 let field_b_array =
4192 Int32Array::from_iter(vec![Some(1), None, Some(2), None, None, Some(6)]);
4193
4194 let struct_a_array = StructArray::from(vec![
4195 (field_a.clone(), Arc::new(field_a_array) as ArrayRef),
4196 (field_b.clone(), Arc::new(field_b_array) as ArrayRef),
4197 ]);
4198
4199 let list_data = ArrayDataBuilder::new(list_a.data_type().clone())
4200 .len(5)
4201 .add_buffer(Buffer::from_iter(vec![
4202 0_i32, 1_i32, 1_i32, 3_i32, 3_i32, 5_i32,
4203 ]))
4204 .null_bit_buffer(Some(Buffer::from_iter(vec![
4205 true, false, true, false, true,
4206 ])))
4207 .child_data(vec![struct_a_array.into_data()])
4208 .build()
4209 .unwrap();
4210
4211 let list_a_array = Arc::new(ListArray::from(list_data)) as ArrayRef;
4212 let struct_b_array = StructArray::from(vec![(list_a.clone(), list_a_array)]);
4213
4214 let batch1 =
4215 RecordBatch::try_from_iter(vec![("struct_b", Arc::new(struct_b_array) as ArrayRef)])
4216 .unwrap();
4217
4218 let field_a_array = Int32Array::from(vec![6, 7, 8, 9, 10]);
4219 let field_b_array = Int32Array::from_iter(vec![None, None, None, Some(1), None]);
4220
4221 let struct_a_array = StructArray::from(vec![
4222 (field_a, Arc::new(field_a_array) as ArrayRef),
4223 (field_b, Arc::new(field_b_array) as ArrayRef),
4224 ]);
4225
4226 let list_data = ArrayDataBuilder::new(list_a.data_type().clone())
4227 .len(2)
4228 .add_buffer(Buffer::from_iter(vec![0_i32, 4_i32, 5_i32]))
4229 .child_data(vec![struct_a_array.into_data()])
4230 .build()
4231 .unwrap();
4232
4233 let list_a_array = Arc::new(ListArray::from(list_data)) as ArrayRef;
4234 let struct_b_array = StructArray::from(vec![(list_a, list_a_array)]);
4235
4236 let batch2 =
4237 RecordBatch::try_from_iter(vec![("struct_b", Arc::new(struct_b_array) as ArrayRef)])
4238 .unwrap();
4239
4240 let batches = &[batch1, batch2];
4241
4242 let expected = r#"
4245 +-------------------------------------------------------------------------------------------------------+
4246 | struct_b |
4247 +-------------------------------------------------------------------------------------------------------+
4248 | {list: [{leaf_a: 1, leaf_b: 1}]} |
4249 | {list: } |
4250 | {list: [{leaf_a: 2, leaf_b: }, {leaf_a: 3, leaf_b: 2}]} |
4251 | {list: } |
4252 | {list: [{leaf_a: 4, leaf_b: }, {leaf_a: 5, leaf_b: }]} |
4253 | {list: [{leaf_a: 6, leaf_b: }, {leaf_a: 7, leaf_b: }, {leaf_a: 8, leaf_b: }, {leaf_a: 9, leaf_b: 1}]} |
4254 | {list: [{leaf_a: 10, leaf_b: }]} |
4255 +-------------------------------------------------------------------------------------------------------+
4256 "#.trim().split('\n').map(|x| x.trim()).collect::<Vec<_>>().join("\n");
4257
4258 let actual = pretty_format_batches(batches).unwrap().to_string();
4259 assert_eq!(actual, expected);
4260
4261 let file = tempfile::tempfile().unwrap();
4263 let props = WriterProperties::builder()
4264 .set_max_row_group_row_count(Some(6))
4265 .build();
4266
4267 let mut writer =
4268 ArrowWriter::try_new(file.try_clone().unwrap(), schema, Some(props)).unwrap();
4269
4270 for batch in batches {
4271 writer.write(batch).unwrap();
4272 }
4273 writer.close().unwrap();
4274
4275 let builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
4280 assert_eq!(&row_group_sizes(builder.metadata()), &[6, 1]);
4281
4282 let batches = builder
4283 .with_batch_size(2)
4284 .build()
4285 .unwrap()
4286 .collect::<ArrowResult<Vec<_>>>()
4287 .unwrap();
4288
4289 assert_eq!(batches.len(), 4);
4290 let batch_counts: Vec<_> = batches.iter().map(|x| x.num_rows()).collect();
4291 assert_eq!(&batch_counts, &[2, 2, 2, 1]);
4292
4293 let actual = pretty_format_batches(&batches).unwrap().to_string();
4294 assert_eq!(actual, expected);
4295 }
4296
4297 #[test]
4298 fn test_arrow_writer_metadata() {
4299 let batch_schema = Schema::new(vec![Field::new("int32", DataType::Int32, false)]);
4300 let file_schema = batch_schema.clone().with_metadata(
4301 vec![("foo".to_string(), "bar".to_string())]
4302 .into_iter()
4303 .collect(),
4304 );
4305
4306 let batch = RecordBatch::try_new(
4307 Arc::new(batch_schema),
4308 vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
4309 )
4310 .unwrap();
4311
4312 let mut buf = Vec::with_capacity(1024);
4313 let mut writer = ArrowWriter::try_new(&mut buf, Arc::new(file_schema), None).unwrap();
4314 writer.write(&batch).unwrap();
4315 writer.close().unwrap();
4316 }
4317
4318 #[test]
4319 fn test_arrow_writer_nullable() {
4320 let batch_schema = Schema::new(vec![Field::new("int32", DataType::Int32, false)]);
4321 let file_schema = Schema::new(vec![Field::new("int32", DataType::Int32, true)]);
4322 let file_schema = Arc::new(file_schema);
4323
4324 let batch = RecordBatch::try_new(
4325 Arc::new(batch_schema),
4326 vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
4327 )
4328 .unwrap();
4329
4330 let mut buf = Vec::with_capacity(1024);
4331 let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), None).unwrap();
4332 writer.write(&batch).unwrap();
4333 writer.close().unwrap();
4334
4335 let mut read = ParquetRecordBatchReader::try_new(Bytes::from(buf), 1024).unwrap();
4336 let back = read.next().unwrap().unwrap();
4337 assert_eq!(back.schema(), file_schema);
4338 assert_ne!(back.schema(), batch.schema());
4339 assert_eq!(back.column(0).as_ref(), batch.column(0).as_ref());
4340 }
4341
4342 #[test]
4343 fn in_progress_accounting() {
4344 let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
4346
4347 let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
4349
4350 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)]).unwrap();
4352
4353 let mut writer = ArrowWriter::try_new(vec![], batch.schema(), None).unwrap();
4354
4355 assert_eq!(writer.in_progress_size(), 0);
4357 assert_eq!(writer.in_progress_rows(), 0);
4358 assert_eq!(writer.memory_size(), 0);
4359 assert_eq!(writer.bytes_written(), 4); writer.write(&batch).unwrap();
4361
4362 let initial_size = writer.in_progress_size();
4364 assert!(initial_size > 0);
4365 assert_eq!(writer.in_progress_rows(), 5);
4366 let initial_memory = writer.memory_size();
4367 assert!(initial_memory > 0);
4368 assert!(
4370 initial_size <= initial_memory,
4371 "{initial_size} <= {initial_memory}"
4372 );
4373
4374 writer.write(&batch).unwrap();
4376 assert!(writer.in_progress_size() > initial_size);
4377 assert_eq!(writer.in_progress_rows(), 10);
4378 assert!(writer.memory_size() > initial_memory);
4379 assert!(
4380 writer.in_progress_size() <= writer.memory_size(),
4381 "in_progress_size {} <= memory_size {}",
4382 writer.in_progress_size(),
4383 writer.memory_size()
4384 );
4385
4386 let pre_flush_bytes_written = writer.bytes_written();
4388 writer.flush().unwrap();
4389 assert_eq!(writer.in_progress_size(), 0);
4390 assert_eq!(writer.memory_size(), 0);
4391 assert!(writer.bytes_written() > pre_flush_bytes_written);
4392
4393 writer.close().unwrap();
4394 }
4395
4396 #[test]
4397 fn test_writer_all_null() {
4398 let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
4399 let b = Int32Array::new(vec![0; 5].into(), Some(NullBuffer::new_null(5)));
4400 let batch = RecordBatch::try_from_iter(vec![
4401 ("a", Arc::new(a) as ArrayRef),
4402 ("b", Arc::new(b) as ArrayRef),
4403 ])
4404 .unwrap();
4405
4406 let mut buf = Vec::with_capacity(1024);
4407 let mut writer = ArrowWriter::try_new(&mut buf, batch.schema(), None).unwrap();
4408 writer.write(&batch).unwrap();
4409 writer.close().unwrap();
4410
4411 let bytes = Bytes::from(buf);
4412 let options = ReadOptionsBuilder::new().with_page_index().build();
4413 let reader = SerializedFileReader::new_with_options(bytes, options).unwrap();
4414 let index = reader.metadata().offset_index().unwrap();
4415
4416 assert_eq!(index.len(), 1);
4417 assert_eq!(index[0].len(), 2); assert_eq!(index[0][0].page_locations().len(), 1); assert_eq!(index[0][1].page_locations().len(), 1); }
4421
4422 #[test]
4423 fn test_disabled_statistics_with_page() {
4424 let file_schema = Schema::new(vec![
4425 Field::new("a", DataType::Utf8, true),
4426 Field::new("b", DataType::Utf8, true),
4427 ]);
4428 let file_schema = Arc::new(file_schema);
4429
4430 let batch = RecordBatch::try_new(
4431 file_schema.clone(),
4432 vec![
4433 Arc::new(StringArray::from(vec!["a", "b", "c", "d"])) as _,
4434 Arc::new(StringArray::from(vec!["w", "x", "y", "z"])) as _,
4435 ],
4436 )
4437 .unwrap();
4438
4439 let props = WriterProperties::builder()
4440 .set_statistics_enabled(EnabledStatistics::None)
4441 .set_column_statistics_enabled("a".into(), EnabledStatistics::Page)
4442 .build();
4443
4444 let mut buf = Vec::with_capacity(1024);
4445 let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), Some(props)).unwrap();
4446 writer.write(&batch).unwrap();
4447
4448 let metadata = writer.close().unwrap();
4449 assert_eq!(metadata.num_row_groups(), 1);
4450 let row_group = metadata.row_group(0);
4451 assert_eq!(row_group.num_columns(), 2);
4452 assert!(row_group.column(0).offset_index_offset().is_some());
4454 assert!(row_group.column(0).column_index_offset().is_some());
4455 assert!(row_group.column(1).offset_index_offset().is_some());
4457 assert!(row_group.column(1).column_index_offset().is_none());
4458
4459 let options = ReadOptionsBuilder::new().with_page_index().build();
4460 let reader = SerializedFileReader::new_with_options(Bytes::from(buf), options).unwrap();
4461
4462 let row_group = reader.get_row_group(0).unwrap();
4463 let a_col = row_group.metadata().column(0);
4464 let b_col = row_group.metadata().column(1);
4465
4466 if let Statistics::ByteArray(byte_array_stats) = a_col.statistics().unwrap() {
4468 let min = byte_array_stats.min_opt().unwrap();
4469 let max = byte_array_stats.max_opt().unwrap();
4470
4471 assert_eq!(min.as_bytes(), b"a");
4472 assert_eq!(max.as_bytes(), b"d");
4473 } else {
4474 panic!("expecting Statistics::ByteArray");
4475 }
4476
4477 assert!(b_col.statistics().is_none());
4479
4480 let offset_index = reader.metadata().offset_index().unwrap();
4481 assert_eq!(offset_index.len(), 1); assert_eq!(offset_index[0].len(), 2); let column_index = reader.metadata().column_index().unwrap();
4485 assert_eq!(column_index.len(), 1); assert_eq!(column_index[0].len(), 2); let a_idx = &column_index[0][0];
4489 assert!(
4490 matches!(a_idx, ColumnIndexMetaData::BYTE_ARRAY(_)),
4491 "{a_idx:?}"
4492 );
4493 let b_idx = &column_index[0][1];
4494 assert!(matches!(b_idx, ColumnIndexMetaData::NONE), "{b_idx:?}");
4495 }
4496
4497 #[test]
4498 fn test_disabled_statistics_with_chunk() {
4499 let file_schema = Schema::new(vec![
4500 Field::new("a", DataType::Utf8, true),
4501 Field::new("b", DataType::Utf8, true),
4502 ]);
4503 let file_schema = Arc::new(file_schema);
4504
4505 let batch = RecordBatch::try_new(
4506 file_schema.clone(),
4507 vec![
4508 Arc::new(StringArray::from(vec!["a", "b", "c", "d"])) as _,
4509 Arc::new(StringArray::from(vec!["w", "x", "y", "z"])) as _,
4510 ],
4511 )
4512 .unwrap();
4513
4514 let props = WriterProperties::builder()
4515 .set_statistics_enabled(EnabledStatistics::None)
4516 .set_column_statistics_enabled("a".into(), EnabledStatistics::Chunk)
4517 .build();
4518
4519 let mut buf = Vec::with_capacity(1024);
4520 let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), Some(props)).unwrap();
4521 writer.write(&batch).unwrap();
4522
4523 let metadata = writer.close().unwrap();
4524 assert_eq!(metadata.num_row_groups(), 1);
4525 let row_group = metadata.row_group(0);
4526 assert_eq!(row_group.num_columns(), 2);
4527 assert!(row_group.column(0).offset_index_offset().is_some());
4529 assert!(row_group.column(0).column_index_offset().is_none());
4530 assert!(row_group.column(1).offset_index_offset().is_some());
4532 assert!(row_group.column(1).column_index_offset().is_none());
4533
4534 let options = ReadOptionsBuilder::new().with_page_index().build();
4535 let reader = SerializedFileReader::new_with_options(Bytes::from(buf), options).unwrap();
4536
4537 let row_group = reader.get_row_group(0).unwrap();
4538 let a_col = row_group.metadata().column(0);
4539 let b_col = row_group.metadata().column(1);
4540
4541 if let Statistics::ByteArray(byte_array_stats) = a_col.statistics().unwrap() {
4543 let min = byte_array_stats.min_opt().unwrap();
4544 let max = byte_array_stats.max_opt().unwrap();
4545
4546 assert_eq!(min.as_bytes(), b"a");
4547 assert_eq!(max.as_bytes(), b"d");
4548 } else {
4549 panic!("expecting Statistics::ByteArray");
4550 }
4551
4552 assert!(b_col.statistics().is_none());
4554
4555 let column_index = reader.metadata().column_index().unwrap();
4556 assert_eq!(column_index.len(), 1); assert_eq!(column_index[0].len(), 2); let a_idx = &column_index[0][0];
4560 assert!(matches!(a_idx, ColumnIndexMetaData::NONE), "{a_idx:?}");
4561 let b_idx = &column_index[0][1];
4562 assert!(matches!(b_idx, ColumnIndexMetaData::NONE), "{b_idx:?}");
4563 }
4564
4565 #[test]
4566 fn test_arrow_writer_skip_metadata() {
4567 let batch_schema = Schema::new(vec![Field::new("int32", DataType::Int32, false)]);
4568 let file_schema = Arc::new(batch_schema.clone());
4569
4570 let batch = RecordBatch::try_new(
4571 Arc::new(batch_schema),
4572 vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
4573 )
4574 .unwrap();
4575 let skip_options = ArrowWriterOptions::new().with_skip_arrow_metadata(true);
4576
4577 let mut buf = Vec::with_capacity(1024);
4578 let mut writer =
4579 ArrowWriter::try_new_with_options(&mut buf, file_schema.clone(), skip_options).unwrap();
4580 writer.write(&batch).unwrap();
4581 writer.close().unwrap();
4582
4583 let bytes = Bytes::from(buf);
4584 let reader_builder = ParquetRecordBatchReaderBuilder::try_new(bytes).unwrap();
4585 assert_eq!(file_schema, *reader_builder.schema());
4586 if let Some(key_value_metadata) = reader_builder
4587 .metadata()
4588 .file_metadata()
4589 .key_value_metadata()
4590 {
4591 assert!(
4592 !key_value_metadata
4593 .iter()
4594 .any(|kv| kv.key.as_str() == ARROW_SCHEMA_META_KEY)
4595 );
4596 }
4597 }
4598
4599 #[test]
4600 fn test_arrow_writer_skip_path_in_schema() {
4601 let batch_schema = Schema::new(vec![Field::new("int32", DataType::Int32, false)]);
4602 let file_schema = Arc::new(batch_schema.clone());
4603
4604 let batch = RecordBatch::try_new(
4605 Arc::new(batch_schema),
4606 vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
4607 )
4608 .unwrap();
4609
4610 let skip_options = ArrowWriterOptions::new();
4612
4613 let mut buf = Vec::with_capacity(1024);
4614 let mut writer =
4615 ArrowWriter::try_new_with_options(&mut buf, file_schema.clone(), skip_options).unwrap();
4616 writer.write(&batch).unwrap();
4617 writer.close().unwrap();
4618
4619 let skip_options = ArrowWriterOptions::new().with_properties(
4621 WriterProperties::builder()
4622 .set_write_path_in_schema(false)
4623 .build(),
4624 );
4625
4626 let mut buf2 = Vec::with_capacity(1024);
4627 let mut writer =
4628 ArrowWriter::try_new_with_options(&mut buf2, file_schema.clone(), skip_options)
4629 .unwrap();
4630 writer.write(&batch).unwrap();
4631 writer.close().unwrap();
4632
4633 assert!(buf.len() > buf2.len());
4635 }
4636
4637 #[test]
4638 fn mismatched_schemas() {
4639 let batch_schema = Schema::new(vec![Field::new("count", DataType::Int32, false)]);
4640 let file_schema = Arc::new(Schema::new(vec![Field::new(
4641 "temperature",
4642 DataType::Float64,
4643 false,
4644 )]));
4645
4646 let batch = RecordBatch::try_new(
4647 Arc::new(batch_schema),
4648 vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
4649 )
4650 .unwrap();
4651
4652 let mut buf = Vec::with_capacity(1024);
4653 let mut writer = ArrowWriter::try_new(&mut buf, file_schema.clone(), None).unwrap();
4654
4655 let err = writer.write(&batch).unwrap_err().to_string();
4656 assert_eq!(
4657 err,
4658 "Arrow: Incompatible type. Field 'temperature' has type Float64, array has type Int32"
4659 );
4660 }
4661
4662 #[test]
4663 fn test_roundtrip_empty_schema() {
4665 let empty_batch = RecordBatch::try_new_with_options(
4667 Arc::new(Schema::empty()),
4668 vec![],
4669 &RecordBatchOptions::default().with_row_count(Some(0)),
4670 )
4671 .unwrap();
4672
4673 let mut parquet_bytes: Vec<u8> = Vec::new();
4675 let mut writer =
4676 ArrowWriter::try_new(&mut parquet_bytes, empty_batch.schema(), None).unwrap();
4677 writer.write(&empty_batch).unwrap();
4678 writer.close().unwrap();
4679
4680 let bytes = Bytes::from(parquet_bytes);
4682 let reader = ParquetRecordBatchReaderBuilder::try_new(bytes).unwrap();
4683 assert_eq!(reader.schema(), &empty_batch.schema());
4684 let batches: Vec<_> = reader
4685 .build()
4686 .unwrap()
4687 .collect::<ArrowResult<Vec<_>>>()
4688 .unwrap();
4689 assert_eq!(batches.len(), 0);
4690 }
4691
4692 #[test]
4693 fn test_page_stats_not_written_by_default() {
4694 let string_field = Field::new("a", DataType::Utf8, false);
4695 let schema = Schema::new(vec![string_field]);
4696 let raw_string_values = vec!["Blart Versenwald III"];
4697 let string_values = StringArray::from(raw_string_values.clone());
4698 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(string_values)]).unwrap();
4699
4700 let props = WriterProperties::builder()
4701 .set_statistics_enabled(EnabledStatistics::Page)
4702 .set_dictionary_enabled(false)
4703 .set_encoding(Encoding::PLAIN)
4704 .set_compression(crate::basic::Compression::UNCOMPRESSED)
4705 .build();
4706
4707 let file = roundtrip_opts(&batch, props);
4708
4709 let first_page = &file[4..];
4714 let mut prot = ThriftSliceInputProtocol::new(first_page);
4715 let hdr = PageHeader::read_thrift(&mut prot).unwrap();
4716 let stats = hdr.data_page_header.unwrap().statistics;
4717
4718 assert!(stats.is_none());
4719 }
4720
4721 #[test]
4722 fn test_page_stats_when_enabled() {
4723 let string_field = Field::new("a", DataType::Utf8, false);
4724 let schema = Schema::new(vec![string_field]);
4725 let raw_string_values = vec!["Blart Versenwald III", "Andrew Lamb"];
4726 let string_values = StringArray::from(raw_string_values.clone());
4727 let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(string_values)]).unwrap();
4728
4729 let props = WriterProperties::builder()
4730 .set_statistics_enabled(EnabledStatistics::Page)
4731 .set_dictionary_enabled(false)
4732 .set_encoding(Encoding::PLAIN)
4733 .set_write_page_header_statistics(true)
4734 .set_compression(crate::basic::Compression::UNCOMPRESSED)
4735 .build();
4736
4737 let file = roundtrip_opts(&batch, props);
4738
4739 let first_page = &file[4..];
4744 let mut prot = ThriftSliceInputProtocol::new(first_page);
4745 let hdr = PageHeader::read_thrift(&mut prot).unwrap();
4746 let stats = hdr.data_page_header.unwrap().statistics;
4747
4748 let stats = stats.unwrap();
4749 assert!(stats.is_max_value_exact.unwrap());
4751 assert!(stats.is_min_value_exact.unwrap());
4752 assert_eq!(stats.max_value.unwrap(), "Blart Versenwald III".as_bytes());
4753 assert_eq!(stats.min_value.unwrap(), "Andrew Lamb".as_bytes());
4754 }
4755
4756 #[test]
4757 fn test_page_stats_truncation() {
4758 let string_field = Field::new("a", DataType::Utf8, false);
4759 let binary_field = Field::new("b", DataType::Binary, false);
4760 let schema = Schema::new(vec![string_field, binary_field]);
4761
4762 let raw_string_values = vec!["Blart Versenwald III"];
4763 let raw_binary_values = [b"Blart Versenwald III".to_vec()];
4764 let raw_binary_value_refs = raw_binary_values
4765 .iter()
4766 .map(|x| x.as_slice())
4767 .collect::<Vec<_>>();
4768
4769 let string_values = StringArray::from(raw_string_values.clone());
4770 let binary_values = BinaryArray::from(raw_binary_value_refs);
4771 let batch = RecordBatch::try_new(
4772 Arc::new(schema),
4773 vec![Arc::new(string_values), Arc::new(binary_values)],
4774 )
4775 .unwrap();
4776
4777 let props = WriterProperties::builder()
4778 .set_statistics_truncate_length(Some(2))
4779 .set_dictionary_enabled(false)
4780 .set_encoding(Encoding::PLAIN)
4781 .set_write_page_header_statistics(true)
4782 .set_compression(crate::basic::Compression::UNCOMPRESSED)
4783 .build();
4784
4785 let file = roundtrip_opts(&batch, props);
4786
4787 let first_page = &file[4..];
4792 let mut prot = ThriftSliceInputProtocol::new(first_page);
4793 let hdr = PageHeader::read_thrift(&mut prot).unwrap();
4794 let stats = hdr.data_page_header.unwrap().statistics;
4795 assert!(stats.is_some());
4796 let stats = stats.unwrap();
4797 assert!(!stats.is_max_value_exact.unwrap());
4799 assert!(!stats.is_min_value_exact.unwrap());
4800 assert_eq!(stats.max_value.unwrap(), "Bm".as_bytes());
4801 assert_eq!(stats.min_value.unwrap(), "Bl".as_bytes());
4802
4803 let second_page = &prot.as_slice()[hdr.compressed_page_size as usize..];
4805 let mut prot = ThriftSliceInputProtocol::new(second_page);
4806 let hdr = PageHeader::read_thrift(&mut prot).unwrap();
4807 let stats = hdr.data_page_header.unwrap().statistics;
4808 assert!(stats.is_some());
4809 let stats = stats.unwrap();
4810 assert!(!stats.is_max_value_exact.unwrap());
4812 assert!(!stats.is_min_value_exact.unwrap());
4813 assert_eq!(stats.max_value.unwrap(), "Bm".as_bytes());
4814 assert_eq!(stats.min_value.unwrap(), "Bl".as_bytes());
4815 }
4816
4817 #[test]
4818 fn test_page_encoding_statistics_roundtrip() {
4819 let batch_schema = Schema::new(vec![Field::new(
4820 "int32",
4821 arrow_schema::DataType::Int32,
4822 false,
4823 )]);
4824
4825 let batch = RecordBatch::try_new(
4826 Arc::new(batch_schema.clone()),
4827 vec![Arc::new(Int32Array::from(vec![1, 2, 3, 4])) as _],
4828 )
4829 .unwrap();
4830
4831 let mut file: File = tempfile::tempfile().unwrap();
4832 let mut writer = ArrowWriter::try_new(&mut file, Arc::new(batch_schema), None).unwrap();
4833 writer.write(&batch).unwrap();
4834 let file_metadata = writer.close().unwrap();
4835
4836 assert_eq!(file_metadata.num_row_groups(), 1);
4837 assert_eq!(file_metadata.row_group(0).num_columns(), 1);
4838 assert!(
4839 file_metadata
4840 .row_group(0)
4841 .column(0)
4842 .page_encoding_stats()
4843 .is_some()
4844 );
4845 let chunk_page_stats = file_metadata
4846 .row_group(0)
4847 .column(0)
4848 .page_encoding_stats()
4849 .unwrap();
4850
4851 let options = ReadOptionsBuilder::new()
4853 .with_page_index()
4854 .with_encoding_stats_as_mask(false)
4855 .build();
4856 let reader = SerializedFileReader::new_with_options(file, options).unwrap();
4857
4858 let rowgroup = reader.get_row_group(0).expect("row group missing");
4859 assert_eq!(rowgroup.num_columns(), 1);
4860 let column = rowgroup.metadata().column(0);
4861 assert!(column.page_encoding_stats().is_some());
4862 let file_page_stats = column.page_encoding_stats().unwrap();
4863 assert_eq!(chunk_page_stats, file_page_stats);
4864 }
4865
4866 #[test]
4867 fn test_different_dict_page_size_limit() {
4868 let array = Arc::new(Int64Array::from_iter(0..1024 * 1024));
4869 let schema = Arc::new(Schema::new(vec![
4870 Field::new("col0", arrow_schema::DataType::Int64, false),
4871 Field::new("col1", arrow_schema::DataType::Int64, false),
4872 ]));
4873 let batch =
4874 arrow_array::RecordBatch::try_new(schema.clone(), vec![array.clone(), array]).unwrap();
4875
4876 let props = WriterProperties::builder()
4877 .set_dictionary_page_size_limit(1024 * 1024)
4878 .set_column_dictionary_page_size_limit(ColumnPath::from("col1"), 1024 * 1024 * 4)
4879 .build();
4880 let mut writer = ArrowWriter::try_new(Vec::new(), schema, Some(props)).unwrap();
4881 writer.write(&batch).unwrap();
4882 let data = Bytes::from(writer.into_inner().unwrap());
4883
4884 let mut metadata = ParquetMetaDataReader::new();
4885 metadata.try_parse(&data).unwrap();
4886 let metadata = metadata.finish().unwrap();
4887 let col0_meta = metadata.row_group(0).column(0);
4888 let col1_meta = metadata.row_group(0).column(1);
4889
4890 let get_dict_page_size = move |meta: &ColumnChunkMetaData| {
4891 let mut reader =
4892 SerializedPageReader::new(Arc::new(data.clone()), meta, 0, None).unwrap();
4893 let page = reader.get_next_page().unwrap().unwrap();
4894 match page {
4895 Page::DictionaryPage { buf, .. } => buf.len(),
4896 _ => panic!("expected DictionaryPage"),
4897 }
4898 };
4899
4900 assert_eq!(get_dict_page_size(col0_meta), 1024 * 1024);
4901 assert_eq!(get_dict_page_size(col1_meta), 1024 * 1024 * 4);
4902 }
4903
4904 struct WriteBatchesShape {
4905 num_batches: usize,
4906 rows_per_batch: usize,
4907 row_size: usize,
4908 }
4909
4910 fn write_batches(
4912 WriteBatchesShape {
4913 num_batches,
4914 rows_per_batch,
4915 row_size,
4916 }: WriteBatchesShape,
4917 props: WriterProperties,
4918 ) -> ParquetRecordBatchReaderBuilder<File> {
4919 let schema = Arc::new(Schema::new(vec![Field::new(
4920 "str",
4921 ArrowDataType::Utf8,
4922 false,
4923 )]));
4924 let file = tempfile::tempfile().unwrap();
4925 let mut writer =
4926 ArrowWriter::try_new(file.try_clone().unwrap(), schema.clone(), Some(props)).unwrap();
4927
4928 for batch_idx in 0..num_batches {
4929 let strings: Vec<String> = (0..rows_per_batch)
4930 .map(|i| format!("{:0>width$}", batch_idx * 10 + i, width = row_size))
4931 .collect();
4932 let array = StringArray::from(strings);
4933 let batch = RecordBatch::try_new(schema.clone(), vec![Arc::new(array)]).unwrap();
4934 writer.write(&batch).unwrap();
4935 }
4936 writer.close().unwrap();
4937 ParquetRecordBatchReaderBuilder::try_new(file).unwrap()
4938 }
4939
4940 #[test]
4941 fn test_row_group_limit_none_writes_single_row_group() {
4943 let props = WriterProperties::builder()
4944 .set_max_row_group_row_count(None)
4945 .set_max_row_group_bytes(None)
4946 .build();
4947
4948 let builder = write_batches(
4949 WriteBatchesShape {
4950 num_batches: 1,
4951 rows_per_batch: 1000,
4952 row_size: 4,
4953 },
4954 props,
4955 );
4956
4957 assert_eq!(
4958 &row_group_sizes(builder.metadata()),
4959 &[1000],
4960 "With no limits, all rows should be in a single row group"
4961 );
4962 }
4963
4964 #[test]
4965 fn test_row_group_limit_rows_only() {
4967 let props = WriterProperties::builder()
4968 .set_max_row_group_row_count(Some(300))
4969 .set_max_row_group_bytes(None)
4970 .build();
4971
4972 let builder = write_batches(
4973 WriteBatchesShape {
4974 num_batches: 1,
4975 rows_per_batch: 1000,
4976 row_size: 4,
4977 },
4978 props,
4979 );
4980
4981 assert_eq!(
4982 &row_group_sizes(builder.metadata()),
4983 &[300, 300, 300, 100],
4984 "Row groups should be split by row count"
4985 );
4986 }
4987
4988 #[test]
4989 fn test_row_group_limit_bytes_only() {
4991 let props = WriterProperties::builder()
4992 .set_max_row_group_row_count(None)
4993 .set_max_row_group_bytes(Some(3500))
4995 .build();
4996
4997 let builder = write_batches(
4998 WriteBatchesShape {
4999 num_batches: 10,
5000 rows_per_batch: 10,
5001 row_size: 100,
5002 },
5003 props,
5004 );
5005
5006 let sizes = row_group_sizes(builder.metadata());
5007
5008 assert!(
5009 sizes.len() > 1,
5010 "Should have multiple row groups due to byte limit, got {sizes:?}",
5011 );
5012
5013 let total_rows: i64 = sizes.iter().sum();
5014 assert_eq!(total_rows, 100, "Total rows should be preserved");
5015 }
5016
5017 #[test]
5018 fn test_row_group_limit_bytes_flushes_when_current_group_already_too_large() {
5020 let schema = Arc::new(Schema::new(vec![Field::new(
5021 "str",
5022 ArrowDataType::Utf8,
5023 false,
5024 )]));
5025 let file = tempfile::tempfile().unwrap();
5026
5027 let props = WriterProperties::builder()
5029 .set_max_row_group_row_count(None)
5030 .set_max_row_group_bytes(None)
5031 .build();
5032 let mut writer =
5033 ArrowWriter::try_new(file.try_clone().unwrap(), schema.clone(), Some(props)).unwrap();
5034
5035 let first_array = StringArray::from(
5036 (0..10)
5037 .map(|i| format!("{:0>100}", i))
5038 .collect::<Vec<String>>(),
5039 );
5040 let first_batch =
5041 RecordBatch::try_new(schema.clone(), vec![Arc::new(first_array)]).unwrap();
5042 writer.write(&first_batch).unwrap();
5043 assert_eq!(writer.in_progress_rows(), 10);
5044
5045 writer.max_row_group_bytes = Some(1);
5048
5049 let second_array = StringArray::from(vec!["x".to_string()]);
5050 let second_batch =
5051 RecordBatch::try_new(schema.clone(), vec![Arc::new(second_array)]).unwrap();
5052 writer.write(&second_batch).unwrap();
5053 writer.close().unwrap();
5054 let builder = ParquetRecordBatchReaderBuilder::try_new(file).unwrap();
5055
5056 assert_eq!(
5057 &row_group_sizes(builder.metadata()),
5058 &[10, 1],
5059 "The second write should flush an oversized in-progress row group first",
5060 );
5061 }
5062
5063 #[test]
5064 fn test_row_group_limit_both_row_wins_single_batch() {
5066 let props = WriterProperties::builder()
5067 .set_max_row_group_row_count(Some(200)) .set_max_row_group_bytes(Some(1024 * 1024)) .build();
5070
5071 let builder = write_batches(
5072 WriteBatchesShape {
5073 num_batches: 1,
5074 row_size: 4,
5075 rows_per_batch: 1000,
5076 },
5077 props,
5078 );
5079
5080 assert_eq!(
5081 &row_group_sizes(builder.metadata()),
5082 &[200, 200, 200, 200, 200],
5083 "Row limit should trigger before byte limit"
5084 );
5085 }
5086
5087 #[test]
5088 fn test_row_group_limit_both_row_wins_multiple_batches() {
5090 let props = WriterProperties::builder()
5091 .set_max_row_group_row_count(Some(5)) .set_max_row_group_bytes(Some(9999)) .build();
5094
5095 let builder = write_batches(
5096 WriteBatchesShape {
5097 num_batches: 10,
5098 rows_per_batch: 10,
5099 row_size: 100,
5100 },
5101 props,
5102 );
5103
5104 assert_eq!(
5105 &row_group_sizes(builder.metadata()),
5106 &[5; 20],
5107 "Row limit should trigger before byte limit"
5108 );
5109 }
5110
5111 #[test]
5112 fn test_row_group_limit_both_bytes_wins() {
5114 let props = WriterProperties::builder()
5115 .set_max_row_group_row_count(Some(1000)) .set_max_row_group_bytes(Some(3500)) .build();
5118
5119 let builder = write_batches(
5120 WriteBatchesShape {
5121 num_batches: 10,
5122 rows_per_batch: 10,
5123 row_size: 100,
5124 },
5125 props,
5126 );
5127
5128 let sizes = row_group_sizes(builder.metadata());
5129
5130 assert!(
5131 sizes.len() > 1,
5132 "Byte limit should trigger before row limit, got {sizes:?}",
5133 );
5134
5135 assert!(
5136 sizes.iter().all(|&s| s < 1000),
5137 "No row group should hit the row limit"
5138 );
5139
5140 let total_rows: i64 = sizes.iter().sum();
5141 assert_eq!(total_rows, 100, "Total rows should be preserved");
5142 }
5143
5144 #[test]
5145 fn arrow_column_chunk_close_mut_drops_column_index() {
5146 use crate::arrow::ArrowSchemaConverter;
5147 use crate::file::writer::SerializedFileWriter;
5148
5149 let schema = Arc::new(Schema::new(vec![Field::new("i", DataType::Int32, false)]));
5150 let props = Arc::new(
5151 WriterProperties::builder()
5152 .set_statistics_enabled(EnabledStatistics::Page)
5153 .build(),
5154 );
5155 let parquet_schema = ArrowSchemaConverter::new()
5156 .with_coerce_types(props.coerce_types())
5157 .convert(&schema)
5158 .unwrap();
5159
5160 let mut buf = Vec::with_capacity(1024);
5161 let mut writer =
5162 SerializedFileWriter::new(&mut buf, parquet_schema.root_schema_ptr(), props.clone())
5163 .unwrap();
5164
5165 let factory = ArrowRowGroupWriterFactory::new(&writer, Arc::clone(&schema));
5166 let mut col_writers = factory.create_column_writers(0).unwrap();
5167 let arr: ArrayRef = Arc::new(Int32Array::from_iter_values(0..64));
5168 for leaves in compute_leaves(schema.field(0), &arr).unwrap() {
5169 col_writers[0].write(&leaves).unwrap();
5170 }
5171 let mut chunk = col_writers.pop().unwrap().close().unwrap();
5172
5173 assert!(
5175 chunk.close().column_index.is_some(),
5176 "EnabledStatistics::Page should produce a column_index"
5177 );
5178
5179 chunk.close_mut().column_index = None;
5181 assert!(chunk.close().column_index.is_none());
5182
5183 let mut rg = writer.next_row_group().unwrap();
5184 chunk.append_to_row_group(&mut rg).unwrap();
5185 rg.close().unwrap();
5186 let file_meta = writer.close().unwrap();
5187
5188 let cc = file_meta.row_group(0).column(0);
5191 assert!(cc.column_index_range().is_none());
5192 }
5193}