Fixes: - Element collection will only contain valid elements. - Fixes buffer overflow in the FBX document
This commit is contained in:
parent
ee903becc8
commit
18d1898309
4 changed files with 11 additions and 6 deletions
|
@ -128,7 +128,7 @@ Node *EditorSceneImporterFBX::import_scene(const String &p_path, uint32_t p_flag
|
||||||
FBXDocParser::TokenizeBinary(tokens, (const char *)data.write().ptr(), (size_t)data.size());
|
FBXDocParser::TokenizeBinary(tokens, (const char *)data.write().ptr(), (size_t)data.size());
|
||||||
} else {
|
} else {
|
||||||
print_verbose("[doc] is ascii");
|
print_verbose("[doc] is ascii");
|
||||||
FBXDocParser::Tokenize(tokens, (const char *)data.write().ptr());
|
FBXDocParser::Tokenize(tokens, (const char *)data.write().ptr(), (size_t)data.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
// The import process explained:
|
// The import process explained:
|
||||||
|
|
|
@ -182,7 +182,7 @@ MeshGeometry::MeshGeometry(uint64_t id, const ElementPtr element, const std::str
|
||||||
|
|
||||||
// This is stupid, because it means we select them ALL not just the one we want.
|
// This is stupid, because it means we select them ALL not just the one we want.
|
||||||
// but it's fine we can match by id.
|
// but it's fine we can match by id.
|
||||||
GetRequiredElement(top, layer_type_name);
|
|
||||||
const ElementCollection &candidates = top->GetCollection(layer_type_name);
|
const ElementCollection &candidates = top->GetCollection(layer_type_name);
|
||||||
|
|
||||||
ElementMap::const_iterator iter;
|
ElementMap::const_iterator iter;
|
||||||
|
|
|
@ -142,7 +142,7 @@ void ProcessDataToken(TokenList &output_tokens, const char *&start, const char *
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// ------------------------------------------------------------------------------------------------
|
// ------------------------------------------------------------------------------------------------
|
||||||
void Tokenize(TokenList &output_tokens, const char *input) {
|
void Tokenize(TokenList &output_tokens, const char *input, size_t length) {
|
||||||
// line and column numbers numbers are one-based
|
// line and column numbers numbers are one-based
|
||||||
unsigned int line = 1;
|
unsigned int line = 1;
|
||||||
unsigned int column = 1;
|
unsigned int column = 1;
|
||||||
|
@ -152,8 +152,13 @@ void Tokenize(TokenList &output_tokens, const char *input) {
|
||||||
bool pending_data_token = false;
|
bool pending_data_token = false;
|
||||||
|
|
||||||
const char *token_begin = nullptr, *token_end = nullptr;
|
const char *token_begin = nullptr, *token_end = nullptr;
|
||||||
for (const char *cur = input; *cur; column += (*cur == '\t' ? ASSIMP_FBX_TAB_WIDTH : 1), ++cur) {
|
|
||||||
const char c = *cur;
|
// input (starting string), *cur the current string, column +=
|
||||||
|
// modified to fix strlen() and stop buffer overflow
|
||||||
|
for (size_t x = 0; x < length; x++) {
|
||||||
|
const char c = input[x];
|
||||||
|
const char *cur = &input[x];
|
||||||
|
column += (c == '\t' ? ASSIMP_FBX_TAB_WIDTH : 1);
|
||||||
|
|
||||||
if (IsLineEnd(c)) {
|
if (IsLineEnd(c)) {
|
||||||
comment = false;
|
comment = false;
|
||||||
|
|
|
@ -187,7 +187,7 @@ typedef std::vector<TokenPtr> TokenList;
|
||||||
* @param output_tokens Receives a list of all tokens in the input data.
|
* @param output_tokens Receives a list of all tokens in the input data.
|
||||||
* @param input_buffer Textual input buffer to be processed, 0-terminated.
|
* @param input_buffer Textual input buffer to be processed, 0-terminated.
|
||||||
* @print_error if something goes wrong */
|
* @print_error if something goes wrong */
|
||||||
void Tokenize(TokenList &output_tokens, const char *input);
|
void Tokenize(TokenList &output_tokens, const char *input, size_t length);
|
||||||
|
|
||||||
/** Tokenizer function for binary FBX files.
|
/** Tokenizer function for binary FBX files.
|
||||||
*
|
*
|
||||||
|
|
Loading…
Reference in a new issue