parquet_concat/
parquet-concat.rs1use clap::Parser;
40use parquet::bloom_filter::Sbbf;
41use parquet::column::writer::ColumnCloseResult;
42use parquet::errors::{ParquetError, Result};
43use parquet::file::metadata::{ColumnChunkMetaData, PageIndexPolicy, ParquetMetaDataReader};
44use parquet::file::properties::WriterProperties;
45use parquet::file::reader::ChunkReader;
46use parquet::file::writer::SerializedFileWriter;
47use std::fs::File;
48use std::sync::Arc;
49
50#[derive(Debug, Parser)]
51#[clap(author, version)]
52struct Args {
54 output: String,
56
57 input: Vec<String>,
59}
60
61fn read_bloom_filter<R: ChunkReader>(column: &ColumnChunkMetaData, input: &R) -> Option<Sbbf> {
62 Sbbf::read_from_column_chunk(column, input).ok().flatten()
63}
64
65impl Args {
66 fn run(&self) -> Result<()> {
67 if self.input.is_empty() {
68 return Err(ParquetError::General(
69 "Must provide at least one input file".into(),
70 ));
71 }
72
73 let output = File::create(&self.output)?;
74
75 let inputs = self
76 .input
77 .iter()
78 .map(|x| {
79 let reader = File::open(x)?;
80 let metadata = ParquetMetaDataReader::new()
82 .with_page_index_policy(PageIndexPolicy::Optional)
83 .parse_and_finish(&reader)?;
84 Ok((reader, metadata))
85 })
86 .collect::<Result<Vec<_>>>()?;
87
88 let expected = inputs[0].1.file_metadata().schema();
89 for (_, metadata) in inputs.iter().skip(1) {
90 let actual = metadata.file_metadata().schema();
91 if expected != actual {
92 return Err(ParquetError::General(format!(
93 "inputs must have the same schema, {expected:#?} vs {actual:#?}"
94 )));
95 }
96 }
97
98 let props = Arc::new(WriterProperties::builder().build());
99 let schema = inputs[0].1.file_metadata().schema_descr().root_schema_ptr();
100 let mut writer = SerializedFileWriter::new(output, schema, props)?;
101
102 for (input, metadata) in inputs {
103 let column_indexes = metadata.column_index();
104 let offset_indexes = metadata.offset_index();
105
106 for (rg_idx, rg) in metadata.row_groups().iter().enumerate() {
107 let rg_column_indexes = column_indexes.and_then(|ci| ci.get(rg_idx));
108 let rg_offset_indexes = offset_indexes.and_then(|oi| oi.get(rg_idx));
109 let mut rg_out = writer.next_row_group()?;
110 for (col_idx, column) in rg.columns().iter().enumerate() {
111 let bloom_filter = read_bloom_filter(column, &input);
112 let column_index = rg_column_indexes.and_then(|row| row.get(col_idx)).cloned();
113
114 let offset_index = rg_offset_indexes.and_then(|row| row.get(col_idx)).cloned();
115
116 let result = ColumnCloseResult {
117 bytes_written: column.compressed_size() as _,
118 rows_written: rg.num_rows() as _,
119 metadata: column.clone(),
120 bloom_filter,
121 column_index,
122 offset_index,
123 };
124 rg_out.append_column(&input, result)?;
125 }
126 rg_out.close()?;
127 }
128 }
129
130 writer.close()?;
131
132 Ok(())
133 }
134}
135
136fn main() -> Result<()> {
137 Args::parse().run()
138}