Merge pull request #68934 from Faless/net/4.x_http_decompress_fix
[Net] Fix HTTPRequest gzip with high compression ratio.
This commit is contained in:
commit
dce1602eda
1 changed files with 33 additions and 14 deletions
|
@ -276,10 +276,10 @@ bool HTTPRequest::_handle_response(bool *ret_value) {
|
||||||
}
|
}
|
||||||
if (content_encoding == "gzip") {
|
if (content_encoding == "gzip") {
|
||||||
decompressor.instantiate();
|
decompressor.instantiate();
|
||||||
decompressor->start_decompression(false, get_download_chunk_size() * 2);
|
decompressor->start_decompression(false, get_download_chunk_size());
|
||||||
} else if (content_encoding == "deflate") {
|
} else if (content_encoding == "deflate") {
|
||||||
decompressor.instantiate();
|
decompressor.instantiate();
|
||||||
decompressor->start_decompression(true, get_download_chunk_size() * 2);
|
decompressor->start_decompression(true, get_download_chunk_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -390,20 +390,39 @@ bool HTTPRequest::_update_connection() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
PackedByteArray chunk = client->read_response_body_chunk();
|
PackedByteArray chunk;
|
||||||
|
if (decompressor.is_null()) {
|
||||||
|
// Chunk can be read directly.
|
||||||
|
chunk = client->read_response_body_chunk();
|
||||||
downloaded.add(chunk.size());
|
downloaded.add(chunk.size());
|
||||||
|
} else {
|
||||||
|
// Chunk is the result of decompression.
|
||||||
|
PackedByteArray compressed = client->read_response_body_chunk();
|
||||||
|
downloaded.add(compressed.size());
|
||||||
|
|
||||||
// Decompress chunk if needed.
|
int pos = 0;
|
||||||
if (decompressor.is_valid()) {
|
int left = compressed.size();
|
||||||
Error err = decompressor->put_data(chunk.ptr(), chunk.size());
|
while (left) {
|
||||||
|
int w = 0;
|
||||||
|
Error err = decompressor->put_partial_data(compressed.ptr() + pos, left, w);
|
||||||
if (err == OK) {
|
if (err == OK) {
|
||||||
chunk.resize(decompressor->get_available_bytes());
|
PackedByteArray dc;
|
||||||
err = decompressor->get_data(chunk.ptrw(), chunk.size());
|
dc.resize(decompressor->get_available_bytes());
|
||||||
|
err = decompressor->get_data(dc.ptrw(), dc.size());
|
||||||
|
chunk.append_array(dc);
|
||||||
}
|
}
|
||||||
if (err != OK) {
|
if (err != OK) {
|
||||||
_defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray());
|
_defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
// We need this check here because a "zip bomb" could result in a chunk of few kilos decompressing into gigabytes of data.
|
||||||
|
if (body_size_limit >= 0 && final_body_size.get() + chunk.size() > body_size_limit) {
|
||||||
|
_defer_done(RESULT_BODY_SIZE_LIMIT_EXCEEDED, response_code, response_headers, PackedByteArray());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
pos += w;
|
||||||
|
left -= w;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
final_body_size.add(chunk.size());
|
final_body_size.add(chunk.size());
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue