-
Notifications
You must be signed in to change notification settings - Fork 4.1k
GH-48467: [C++][Parquet] Add configure to limit the row group size in bytes #48468
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 2 commits
7b8e058
8142f17
e19db37
13fe7b1
0e6e303
ea88cc7
8059517
7a28ed7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -442,12 +442,8 @@ class FileWriterImpl : public FileWriter { | |
| return Status::OK(); | ||
| } | ||
|
|
||
| // Max number of rows allowed in a row group. | ||
| const int64_t max_row_group_length = this->properties().max_row_group_length(); | ||
|
|
||
| // Initialize a new buffered row group writer if necessary. | ||
| if (row_group_writer_ == nullptr || !row_group_writer_->buffered() || | ||
| row_group_writer_->num_rows() >= max_row_group_length) { | ||
| if (row_group_writer_ == nullptr || !row_group_writer_->buffered()) { | ||
| RETURN_NOT_OK(NewBufferedRowGroup()); | ||
| } | ||
|
|
||
|
|
@@ -480,17 +476,28 @@ class FileWriterImpl : public FileWriter { | |
| return Status::OK(); | ||
| }; | ||
|
|
||
| // Max number of rows allowed in a row group. | ||
| const int64_t max_row_group_length = this->properties().max_row_group_length(); | ||
|
wgtmac marked this conversation as resolved.
|
||
| // Max number of bytes allowed in a row group. | ||
|
wecharyu marked this conversation as resolved.
Outdated
|
||
| const int64_t max_row_group_bytes = this->properties().max_row_group_bytes(); | ||
|
|
||
| int64_t offset = 0; | ||
| while (offset < batch.num_rows()) { | ||
| const int64_t batch_size = | ||
| std::min(max_row_group_length - row_group_writer_->num_rows(), | ||
| batch.num_rows() - offset); | ||
| RETURN_NOT_OK(WriteBatch(offset, batch_size)); | ||
| offset += batch_size; | ||
|
|
||
| // Flush current row group writer and create a new writer if it is full. | ||
| if (row_group_writer_->num_rows() >= max_row_group_length && | ||
| offset < batch.num_rows()) { | ||
| int64_t group_rows = row_group_writer_->num_rows(); | ||
| int64_t batch_size = | ||
| std::min(max_row_group_length - group_rows, batch.num_rows() - offset); | ||
| if (group_rows > 0) { | ||
|
wgtmac marked this conversation as resolved.
Outdated
|
||
| int64_t buffered_bytes = row_group_writer_->current_buffered_bytes(); | ||
| double avg_row_bytes = buffered_bytes * 1.0 / group_rows; | ||
| batch_size = std::min( | ||
| batch_size, | ||
| static_cast<int64_t>((max_row_group_bytes - buffered_bytes) / avg_row_bytes)); | ||
| } | ||
| if (batch_size > 0) { | ||
| RETURN_NOT_OK(WriteBatch(offset, batch_size)); | ||
| offset += batch_size; | ||
| } else if (offset < batch.num_rows()) { | ||
| // Current row group is full, write remaining rows in a new group. | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will it cause infinite loop at this line if
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would cause infinite loop only when the if (batch_size == 0 && row_group_writer_->num_rows() == 0) {
return Status::Invalid(
"Configured max_row_group_bytes is too small to hold a single row");
}
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We cannot accept infinite loop so perhaps we have to set the minimum batch size to 1 in this case?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Set the minimum batch size to 1 is not reasonable, when the buffered_bytes > max_row_group_bytes we still set the batch size as 1, then it will continually append one row to the active row group and never create a new one. Returning an invalid status might be more intuitive.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't we check row group size after writing each batch? If a large per row size leads to batch size equal to 1, we just end up with checking row group size after writing every row.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We does not check row group size after write batch, current write logic is like:
In this way we don't need check size after written, it's guaranteed in step 1; and we'll not leave an possible empty row group in the final batch, it guaranteed in step 3.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How do you achieve the step 4 above For each loop iteration:
|
||
| RETURN_NOT_OK(NewBufferedRowGroup()); | ||
| } | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -68,6 +68,12 @@ int64_t RowGroupWriter::total_compressed_bytes_written() const { | |
| return contents_->total_compressed_bytes_written(); | ||
| } | ||
|
|
||
| int64_t RowGroupWriter::current_buffered_bytes() const { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The function name is a little misleading because readers may think it is same as
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. rename to |
||
| return contents_->total_compressed_bytes() + | ||
| contents_->total_compressed_bytes_written() + | ||
| contents_->estimated_buffered_value_bytes(); | ||
| } | ||
|
|
||
| bool RowGroupWriter::buffered() const { return contents_->buffered(); } | ||
|
|
||
| int RowGroupWriter::current_column() { return contents_->current_column(); } | ||
|
|
@@ -195,6 +201,20 @@ class RowGroupSerializer : public RowGroupWriter::Contents { | |
| return total_compressed_bytes_written; | ||
| } | ||
|
|
||
| int64_t estimated_buffered_value_bytes() const override { | ||
|
wecharyu marked this conversation as resolved.
Outdated
|
||
| if (closed_) { | ||
| return 0; | ||
| } | ||
| int64_t estimated_buffered_value_bytes = 0; | ||
| for (size_t i = 0; i < column_writers_.size(); i++) { | ||
| if (column_writers_[i]) { | ||
| estimated_buffered_value_bytes += | ||
| column_writers_[i]->estimated_buffered_value_bytes(); | ||
| } | ||
| } | ||
| return estimated_buffered_value_bytes; | ||
| } | ||
|
|
||
| bool buffered() const override { return buffered_row_group_; } | ||
|
|
||
| void Close() override { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -160,6 +160,7 @@ static constexpr bool DEFAULT_IS_DICTIONARY_ENABLED = true; | |
| static constexpr int64_t DEFAULT_DICTIONARY_PAGE_SIZE_LIMIT = kDefaultDataPageSize; | ||
| static constexpr int64_t DEFAULT_WRITE_BATCH_SIZE = 1024; | ||
| static constexpr int64_t DEFAULT_MAX_ROW_GROUP_LENGTH = 1024 * 1024; | ||
| static constexpr int64_t DEFAULT_MAX_ROW_GROUP_BYTES = 128 * 1024 * 1024; | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there a particular reason for this value? AFAIK some Parquet implementation (is it Parquet Rust? @alamb ) writes a single row group per file by default. I also feel like the HDFS-related reasons in the Parquet docs are completely outdated (who cares about HDFS?).
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think smaller row groups are still useful when pruning is essential. https://www.firebolt.io/blog/unlocking-faster-iceberg-queries-the-writer-optimizations-you-are-missing is a good read.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right, but the value is not easy to devise. For example, if you have 10_000 columns, this will make for some very short columns.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In this case, we could set a very large value as default to keep the current behavior. Some engines are smart enough to derive a good threshold (from history or whatever source).
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
The default row group size in the rust writer is 1M rows (1024*1024) -- NOT bytes I looked through and didn't find any setting for max row group size in bytes. I believe at least at some point in the past, the DuckDB Parquet writer wrote a single large row group -- I am not sure if that is the current behavior or not
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think Apache Impala also produces single row group in its Parquet writer. However I think limiting row group size in bytes is still useful in some cases. For example, there is a table property in Iceberg: https://github.com/apache/iceberg/blob/73a26fc1f49e6749656a273b2e4d78eb9e64f19e/docs/docs/configuration.md?plain=1#L46. As iceberg-cpp is depending on the Parquet writer here, it is nice to support this feature. |
||
| static constexpr bool DEFAULT_ARE_STATISTICS_ENABLED = true; | ||
| static constexpr int64_t DEFAULT_MAX_STATISTICS_SIZE = 4096; | ||
| static constexpr Encoding::type DEFAULT_ENCODING = Encoding::UNKNOWN; | ||
|
|
@@ -293,6 +294,7 @@ class PARQUET_EXPORT WriterProperties { | |
| dictionary_pagesize_limit_(DEFAULT_DICTIONARY_PAGE_SIZE_LIMIT), | ||
| write_batch_size_(DEFAULT_WRITE_BATCH_SIZE), | ||
| max_row_group_length_(DEFAULT_MAX_ROW_GROUP_LENGTH), | ||
| max_row_group_bytes_(DEFAULT_MAX_ROW_GROUP_BYTES), | ||
| pagesize_(kDefaultDataPageSize), | ||
| max_rows_per_page_(kDefaultMaxRowsPerPage), | ||
| version_(ParquetVersion::PARQUET_2_6), | ||
|
|
@@ -309,6 +311,7 @@ class PARQUET_EXPORT WriterProperties { | |
| dictionary_pagesize_limit_(properties.dictionary_pagesize_limit()), | ||
| write_batch_size_(properties.write_batch_size()), | ||
| max_row_group_length_(properties.max_row_group_length()), | ||
| max_row_group_bytes_(properties.max_row_group_bytes()), | ||
| pagesize_(properties.data_pagesize()), | ||
| max_rows_per_page_(properties.max_rows_per_page()), | ||
| version_(properties.version()), | ||
|
|
@@ -418,6 +421,13 @@ class PARQUET_EXPORT WriterProperties { | |
| return this; | ||
| } | ||
|
|
||
| /// Specify the max number of bytes to put in a single row group. | ||
|
wecharyu marked this conversation as resolved.
|
||
| /// Default 128MB. | ||
| Builder* max_row_group_bytes(int64_t max_row_group_bytes) { | ||
| max_row_group_bytes_ = max_row_group_bytes; | ||
| return this; | ||
| } | ||
|
|
||
| /// Specify the data page size. | ||
| /// Default 1MB. | ||
| Builder* data_pagesize(int64_t pg_size) { | ||
|
|
@@ -779,11 +789,12 @@ class PARQUET_EXPORT WriterProperties { | |
|
|
||
| return std::shared_ptr<WriterProperties>(new WriterProperties( | ||
| pool_, dictionary_pagesize_limit_, write_batch_size_, max_row_group_length_, | ||
| pagesize_, max_rows_per_page_, version_, created_by_, page_checksum_enabled_, | ||
| size_statistics_level_, std::move(file_encryption_properties_), | ||
| default_column_properties_, column_properties, data_page_version_, | ||
| store_decimal_as_integer_, std::move(sorting_columns_), | ||
| content_defined_chunking_enabled_, content_defined_chunking_options_)); | ||
| max_row_group_bytes_, pagesize_, max_rows_per_page_, version_, created_by_, | ||
| page_checksum_enabled_, size_statistics_level_, | ||
| std::move(file_encryption_properties_), default_column_properties_, | ||
| column_properties, data_page_version_, store_decimal_as_integer_, | ||
| std::move(sorting_columns_), content_defined_chunking_enabled_, | ||
| content_defined_chunking_options_)); | ||
| } | ||
|
|
||
| private: | ||
|
|
@@ -793,6 +804,7 @@ class PARQUET_EXPORT WriterProperties { | |
| int64_t dictionary_pagesize_limit_; | ||
| int64_t write_batch_size_; | ||
| int64_t max_row_group_length_; | ||
| int64_t max_row_group_bytes_; | ||
| int64_t pagesize_; | ||
| int64_t max_rows_per_page_; | ||
| ParquetVersion::type version_; | ||
|
|
@@ -828,6 +840,8 @@ class PARQUET_EXPORT WriterProperties { | |
|
|
||
| inline int64_t max_row_group_length() const { return max_row_group_length_; } | ||
|
|
||
| inline int64_t max_row_group_bytes() const { return max_row_group_bytes_; } | ||
|
|
||
| inline int64_t data_pagesize() const { return pagesize_; } | ||
|
|
||
| inline int64_t max_rows_per_page() const { return max_rows_per_page_; } | ||
|
|
@@ -946,9 +960,10 @@ class PARQUET_EXPORT WriterProperties { | |
| private: | ||
| explicit WriterProperties( | ||
| MemoryPool* pool, int64_t dictionary_pagesize_limit, int64_t write_batch_size, | ||
| int64_t max_row_group_length, int64_t pagesize, int64_t max_rows_per_page, | ||
| ParquetVersion::type version, const std::string& created_by, | ||
| bool page_write_checksum_enabled, SizeStatisticsLevel size_statistics_level, | ||
| int64_t max_row_group_length, int64_t max_row_group_bytes, int64_t pagesize, | ||
| int64_t max_rows_per_page, ParquetVersion::type version, | ||
| const std::string& created_by, bool page_write_checksum_enabled, | ||
| SizeStatisticsLevel size_statistics_level, | ||
| std::shared_ptr<FileEncryptionProperties> file_encryption_properties, | ||
| const ColumnProperties& default_column_properties, | ||
| const std::unordered_map<std::string, ColumnProperties>& column_properties, | ||
|
|
@@ -959,6 +974,7 @@ class PARQUET_EXPORT WriterProperties { | |
| dictionary_pagesize_limit_(dictionary_pagesize_limit), | ||
| write_batch_size_(write_batch_size), | ||
| max_row_group_length_(max_row_group_length), | ||
| max_row_group_bytes_(max_row_group_bytes), | ||
| pagesize_(pagesize), | ||
| max_rows_per_page_(max_rows_per_page), | ||
| parquet_data_page_version_(data_page_version), | ||
|
|
@@ -978,6 +994,7 @@ class PARQUET_EXPORT WriterProperties { | |
| int64_t dictionary_pagesize_limit_; | ||
| int64_t write_batch_size_; | ||
| int64_t max_row_group_length_; | ||
| int64_t max_row_group_bytes_; | ||
| int64_t pagesize_; | ||
| int64_t max_rows_per_page_; | ||
| ParquetDataPageVersion parquet_data_page_version_; | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.