Merge pull request #68934 from Faless/net/4.x_http_decompress_fix
[Net] Fix HTTPRequest gzip with high compression ratio.
This commit is contained in:
commit
dce1602eda
1 changed files with 33 additions and 14 deletions
|
@ -276,10 +276,10 @@ bool HTTPRequest::_handle_response(bool *ret_value) {
|
||||||
}
|
}
|
||||||
if (content_encoding == "gzip") {
|
if (content_encoding == "gzip") {
|
||||||
decompressor.instantiate();
|
decompressor.instantiate();
|
||||||
decompressor->start_decompression(false, get_download_chunk_size() * 2);
|
decompressor->start_decompression(false, get_download_chunk_size());
|
||||||
} else if (content_encoding == "deflate") {
|
} else if (content_encoding == "deflate") {
|
||||||
decompressor.instantiate();
|
decompressor.instantiate();
|
||||||
decompressor->start_decompression(true, get_download_chunk_size() * 2);
|
decompressor->start_decompression(true, get_download_chunk_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -390,19 +390,38 @@ bool HTTPRequest::_update_connection() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
PackedByteArray chunk = client->read_response_body_chunk();
|
PackedByteArray chunk;
|
||||||
downloaded.add(chunk.size());
|
if (decompressor.is_null()) {
|
||||||
|
// Chunk can be read directly.
|
||||||
|
chunk = client->read_response_body_chunk();
|
||||||
|
downloaded.add(chunk.size());
|
||||||
|
} else {
|
||||||
|
// Chunk is the result of decompression.
|
||||||
|
PackedByteArray compressed = client->read_response_body_chunk();
|
||||||
|
downloaded.add(compressed.size());
|
||||||
|
|
||||||
// Decompress chunk if needed.
|
int pos = 0;
|
||||||
if (decompressor.is_valid()) {
|
int left = compressed.size();
|
||||||
Error err = decompressor->put_data(chunk.ptr(), chunk.size());
|
while (left) {
|
||||||
if (err == OK) {
|
int w = 0;
|
||||||
chunk.resize(decompressor->get_available_bytes());
|
Error err = decompressor->put_partial_data(compressed.ptr() + pos, left, w);
|
||||||
err = decompressor->get_data(chunk.ptrw(), chunk.size());
|
if (err == OK) {
|
||||||
}
|
PackedByteArray dc;
|
||||||
if (err != OK) {
|
dc.resize(decompressor->get_available_bytes());
|
||||||
_defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray());
|
err = decompressor->get_data(dc.ptrw(), dc.size());
|
||||||
return true;
|
chunk.append_array(dc);
|
||||||
|
}
|
||||||
|
if (err != OK) {
|
||||||
|
_defer_done(RESULT_BODY_DECOMPRESS_FAILED, response_code, response_headers, PackedByteArray());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// We need this check here because a "zip bomb" could result in a chunk of few kilos decompressing into gigabytes of data.
|
||||||
|
if (body_size_limit >= 0 && final_body_size.get() + chunk.size() > body_size_limit) {
|
||||||
|
_defer_done(RESULT_BODY_SIZE_LIMIT_EXCEEDED, response_code, response_headers, PackedByteArray());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
pos += w;
|
||||||
|
left -= w;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final_body_size.add(chunk.size());
|
final_body_size.add(chunk.size());
|
||||||
|
|
Loading…
Reference in a new issue