parquet_concat/parquet-concat.rs
1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements. See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership. The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License. You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied. See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Binary that concatenates the column data of one or more parquet files
19//!
20//! # Install
21//!
22//! `parquet-concat` can be installed using `cargo`:
23//! ```
24//! cargo install parquet --features=cli
25//! ```
26//! After this `parquet-concat` should be available:
27//! ```
28//! parquet-concat out.parquet a.parquet b.parquet
29//! ```
30//!
31//! The binary can also be built from the source code and run as follows:
32//! ```
33//! cargo run --features=cli --bin parquet-concat out.parquet a.parquet b.parquet
34//! ```
35//!
36//! Note: this does not currently support preserving the page index or bloom filters
37//!
38
39use clap::Parser;
40use parquet::column::writer::ColumnCloseResult;
41use parquet::errors::{ParquetError, Result};
42use parquet::file::metadata::ParquetMetaDataReader;
43use parquet::file::properties::WriterProperties;
44use parquet::file::writer::SerializedFileWriter;
45use std::fs::File;
46use std::sync::Arc;
47
48#[derive(Debug, Parser)]
49#[clap(author, version)]
50/// Concatenates one or more parquet files
51struct Args {
52 /// Path to output
53 output: String,
54
55 /// Path to input files
56 input: Vec<String>,
57}
58
59impl Args {
60 fn run(&self) -> Result<()> {
61 if self.input.is_empty() {
62 return Err(ParquetError::General(
63 "Must provide at least one input file".into(),
64 ));
65 }
66
67 let output = File::create(&self.output)?;
68
69 let inputs = self
70 .input
71 .iter()
72 .map(|x| {
73 let reader = File::open(x)?;
74 let metadata = ParquetMetaDataReader::new().parse_and_finish(&reader)?;
75 Ok((reader, metadata))
76 })
77 .collect::<Result<Vec<_>>>()?;
78
79 let expected = inputs[0].1.file_metadata().schema();
80 for (_, metadata) in inputs.iter().skip(1) {
81 let actual = metadata.file_metadata().schema();
82 if expected != actual {
83 return Err(ParquetError::General(format!(
84 "inputs must have the same schema, {expected:#?} vs {actual:#?}"
85 )));
86 }
87 }
88
89 let props = Arc::new(WriterProperties::builder().build());
90 let schema = inputs[0].1.file_metadata().schema_descr().root_schema_ptr();
91 let mut writer = SerializedFileWriter::new(output, schema, props)?;
92
93 for (input, metadata) in inputs {
94 for rg in metadata.row_groups() {
95 let mut rg_out = writer.next_row_group()?;
96 for column in rg.columns() {
97 let result = ColumnCloseResult {
98 bytes_written: column.compressed_size() as _,
99 rows_written: rg.num_rows() as _,
100 metadata: column.clone(),
101 bloom_filter: None,
102 column_index: None,
103 offset_index: None,
104 };
105 rg_out.append_column(&input, result)?;
106 }
107 rg_out.close()?;
108 }
109 }
110
111 writer.close()?;
112
113 Ok(())
114 }
115}
116
117fn main() -> Result<()> {
118 Args::parse().run()
119}