parquet_concat/
parquet-concat.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18//! Binary that concatenates the column data of one or more parquet files
19//!
20//! # Install
21//!
22//! `parquet-concat` can be installed using `cargo`:
23//! ```
24//! cargo install parquet --features=cli
25//! ```
26//! After this `parquet-concat` should be available:
27//! ```
28//! parquet-concat out.parquet a.parquet b.parquet
29//! ```
30//!
31//! The binary can also be built from the source code and run as follows:
32//! ```
33//! cargo run --features=cli --bin parquet-concat out.parquet a.parquet b.parquet
34//! ```
35//!
36//! Note: this does not currently support preserving the page index or bloom filters
37//!
38
39use clap::Parser;
40use parquet::bloom_filter::Sbbf;
41use parquet::column::writer::ColumnCloseResult;
42use parquet::errors::{ParquetError, Result};
43use parquet::file::metadata::{ColumnChunkMetaData, PageIndexPolicy, ParquetMetaDataReader};
44use parquet::file::properties::WriterProperties;
45use parquet::file::reader::ChunkReader;
46use parquet::file::writer::SerializedFileWriter;
47use std::fs::File;
48use std::sync::Arc;
49
50#[derive(Debug, Parser)]
51#[clap(author, version)]
52/// Concatenates one or more parquet files
53struct Args {
54    /// Path to output
55    output: String,
56
57    /// Path to input files
58    input: Vec<String>,
59}
60
61fn read_bloom_filter<R: ChunkReader>(column: &ColumnChunkMetaData, input: &R) -> Option<Sbbf> {
62    Sbbf::read_from_column_chunk(column, input).ok().flatten()
63}
64
65impl Args {
66    fn run(&self) -> Result<()> {
67        if self.input.is_empty() {
68            return Err(ParquetError::General(
69                "Must provide at least one input file".into(),
70            ));
71        }
72
73        let output = File::create(&self.output)?;
74
75        let inputs = self
76            .input
77            .iter()
78            .map(|x| {
79                let reader = File::open(x)?;
80                // Enable reading page indexes if present
81                let metadata = ParquetMetaDataReader::new()
82                    .with_page_index_policy(PageIndexPolicy::Optional)
83                    .parse_and_finish(&reader)?;
84                Ok((reader, metadata))
85            })
86            .collect::<Result<Vec<_>>>()?;
87
88        let expected = inputs[0].1.file_metadata().schema();
89        for (_, metadata) in inputs.iter().skip(1) {
90            let actual = metadata.file_metadata().schema();
91            if expected != actual {
92                return Err(ParquetError::General(format!(
93                    "inputs must have the same schema, {expected:#?} vs {actual:#?}"
94                )));
95            }
96        }
97
98        let props = Arc::new(WriterProperties::builder().build());
99        let schema = inputs[0].1.file_metadata().schema_descr().root_schema_ptr();
100        let mut writer = SerializedFileWriter::new(output, schema, props)?;
101
102        for (input, metadata) in inputs {
103            let column_indexes = metadata.column_index();
104            let offset_indexes = metadata.offset_index();
105
106            for (rg_idx, rg) in metadata.row_groups().iter().enumerate() {
107                let rg_column_indexes = column_indexes.and_then(|ci| ci.get(rg_idx));
108                let rg_offset_indexes = offset_indexes.and_then(|oi| oi.get(rg_idx));
109                let mut rg_out = writer.next_row_group()?;
110                for (col_idx, column) in rg.columns().iter().enumerate() {
111                    let bloom_filter = read_bloom_filter(column, &input);
112                    let column_index = rg_column_indexes.and_then(|row| row.get(col_idx)).cloned();
113
114                    let offset_index = rg_offset_indexes.and_then(|row| row.get(col_idx)).cloned();
115
116                    let result = ColumnCloseResult {
117                        bytes_written: column.compressed_size() as _,
118                        rows_written: rg.num_rows() as _,
119                        metadata: column.clone(),
120                        bloom_filter,
121                        column_index,
122                        offset_index,
123                    };
124                    rg_out.append_column(&input, result)?;
125                }
126                rg_out.close()?;
127            }
128        }
129
130        writer.close()?;
131
132        Ok(())
133    }
134}
135
136fn main() -> Result<()> {
137    Args::parse().run()
138}