Merge pull request #81112 from akien-mga/3.x-cherrypicks
Cherry-picks for the 3.x branch (future 3.6) - 14th batch
This commit is contained in:
commit
98fe7f4a30
171 changed files with 16976 additions and 10214 deletions
3
.github/workflows/android_builds.yml
vendored
3
.github/workflows/android_builds.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: 🤖 Android Builds
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Global Settings
|
||||
env:
|
||||
|
|
3
.github/workflows/ios_builds.yml
vendored
3
.github/workflows/ios_builds.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: 🍏 iOS Builds
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Global Settings
|
||||
env:
|
||||
|
|
3
.github/workflows/javascript_builds.yml
vendored
3
.github/workflows/javascript_builds.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: 🌐 JavaScript Builds
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Global Settings
|
||||
env:
|
||||
|
|
9
.github/workflows/linux_builds.yml
vendored
9
.github/workflows/linux_builds.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: 🐧 Linux Builds
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Global Settings
|
||||
env:
|
||||
|
@ -63,6 +64,12 @@ jobs:
|
|||
libdbus-1-dev libudev-dev libxi-dev libxrandr-dev yasm xvfb wget unzip \
|
||||
libspeechd-dev speech-dispatcher
|
||||
|
||||
- name: Free disk space on runner
|
||||
run: |
|
||||
echo "Disk usage before:" && df -h
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
echo "Disk usage after:" && df -h
|
||||
|
||||
- name: Setup Godot build cache
|
||||
uses: ./.github/actions/godot-cache
|
||||
with:
|
||||
|
|
3
.github/workflows/macos_builds.yml
vendored
3
.github/workflows/macos_builds.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: 🍎 macOS Builds
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Global Settings
|
||||
env:
|
||||
|
|
46
.github/workflows/runner.yml
vendored
Normal file
46
.github/workflows/runner.yml
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
name: 🔗 GHA
|
||||
on: [push, pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ci-${{github.actor}}-${{github.head_ref || github.run_number}}-${{github.ref}}-runner
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
static-checks:
|
||||
name: 📊 Static
|
||||
uses: ./.github/workflows/static_checks.yml
|
||||
|
||||
android-build:
|
||||
name: 🤖 Android
|
||||
needs: static-checks
|
||||
uses: ./.github/workflows/android_builds.yml
|
||||
|
||||
ios-build:
|
||||
name: 🍏 iOS
|
||||
needs: static-checks
|
||||
uses: ./.github/workflows/ios_builds.yml
|
||||
|
||||
javascript-build:
|
||||
name: 🌐 JavaScript
|
||||
needs: static-checks
|
||||
uses: ./.github/workflows/javascript_builds.yml
|
||||
|
||||
linux-build:
|
||||
name: 🐧 Linux
|
||||
needs: static-checks
|
||||
uses: ./.github/workflows/linux_builds.yml
|
||||
|
||||
macos-build:
|
||||
name: 🍎 macOS
|
||||
needs: static-checks
|
||||
uses: ./.github/workflows/macos_builds.yml
|
||||
|
||||
server-build:
|
||||
name: ☁ Server
|
||||
needs: static-checks
|
||||
uses: ./.github/workflows/server_builds.yml
|
||||
|
||||
windows-build:
|
||||
name: 🏁 Windows
|
||||
needs: static-checks
|
||||
uses: ./.github/workflows/windows_builds.yml
|
3
.github/workflows/server_builds.yml
vendored
3
.github/workflows/server_builds.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: ☁ Server Builds
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Global Settings
|
||||
env:
|
||||
|
|
8
.github/workflows/static_checks.yml
vendored
8
.github/workflows/static_checks.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: 📊 Static Checks
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
concurrency:
|
||||
group: ci-${{github.actor}}-${{github.head_ref || github.run_number}}-${{github.ref}}-static
|
||||
|
@ -29,6 +30,11 @@ jobs:
|
|||
sudo update-alternatives --install /usr/bin/clang-format clang-format /usr/bin/clang-format-15 100
|
||||
sudo pip3 install black==22.3.0 pygments
|
||||
|
||||
# This needs to happen before Python and npm execution; it must happen before any extra files are written.
|
||||
- name: .gitignore checks (gitignore_check.sh)
|
||||
run: |
|
||||
bash ./misc/scripts/gitignore_check.sh
|
||||
|
||||
- name: File formatting checks (file_format.sh)
|
||||
run: |
|
||||
bash ./misc/scripts/file_format.sh
|
||||
|
|
3
.github/workflows/windows_builds.yml
vendored
3
.github/workflows/windows_builds.yml
vendored
|
@ -1,5 +1,6 @@
|
|||
name: 🏁 Windows Builds
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Global Settings
|
||||
# SCONS_CACHE for windows must be set in the build environment
|
||||
|
|
27
.gitignore
vendored
27
.gitignore
vendored
|
@ -131,23 +131,9 @@ cppcheck-cppcheck-build-dir/
|
|||
*.pydevproject
|
||||
*.launch
|
||||
|
||||
# Gcov and Lcov code coverage
|
||||
*.gcno
|
||||
# GCOV code coverage
|
||||
*.gcda
|
||||
*.gcov.html
|
||||
*.func.html
|
||||
*.func-sort-c.html
|
||||
*index-sort-f.html
|
||||
*index-sort-l.html
|
||||
*index.html
|
||||
godot.info
|
||||
amber.png
|
||||
emerald.png
|
||||
glass.png
|
||||
ruby.png
|
||||
snow.png
|
||||
updown.png
|
||||
gcov.css
|
||||
*.gcno
|
||||
|
||||
# Geany
|
||||
*.geany
|
||||
|
@ -246,9 +232,6 @@ xcuserdata/
|
|||
x64/
|
||||
x86/
|
||||
|
||||
# Do not ignore x86 folders anywhere under thirdparty libraries
|
||||
!thirdparty/**/x86/
|
||||
|
||||
[Ww][Ii][Nn]32/
|
||||
[Aa][Rr][Mm]/
|
||||
[Aa][Rr][Mm]64/
|
||||
|
@ -258,6 +241,12 @@ bld/
|
|||
[Ll]og/
|
||||
[Ll]ogs/
|
||||
|
||||
# Do not ignore arch-specific folders anywhere under thirdparty libraries
|
||||
!thirdparty/**/x64/
|
||||
!thirdparty/**/x86/
|
||||
!thirdparty/**/arm/
|
||||
!thirdparty/**/arm64/
|
||||
|
||||
# Visual Studio 2015/2017 cache/options directory
|
||||
.vs/
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ License: CC0-1.0
|
|||
|
||||
Files: ./thirdparty/miniupnpc/
|
||||
Comment: MiniUPnP Project
|
||||
Copyright: 2005-2022, Thomas Bernard
|
||||
Copyright: 2005-2023, Thomas Bernard
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: ./thirdparty/minizip/
|
||||
|
@ -413,7 +413,7 @@ License: Zlib
|
|||
|
||||
Files: ./thirdparty/zstd/
|
||||
Comment: Zstandard
|
||||
Copyright: 2016-2021, Facebook, Inc.
|
||||
Copyright: Meta Platforms, Inc. and affiliates.
|
||||
License: BSD-3-clause
|
||||
|
||||
|
||||
|
|
|
@ -506,6 +506,7 @@ if selected_platform in platform_list:
|
|||
|
||||
if env["werror"]:
|
||||
env.Append(CCFLAGS=["/WX"])
|
||||
env.Append(LINKFLAGS=["/WX"])
|
||||
else: # GCC, Clang
|
||||
version = methods.get_compiler_version(env) or [-1, -1]
|
||||
|
||||
|
|
|
@ -2477,6 +2477,9 @@ void Image::_repeat_pixel_over_subsequent_memory(uint8_t *p_pixel, int p_pixel_s
|
|||
}
|
||||
|
||||
void Image::fill(const Color &p_color) {
|
||||
if (data.size() == 0) {
|
||||
return;
|
||||
}
|
||||
ERR_FAIL_COND_MSG(!_can_modify(format), "Cannot fill in compressed or custom image formats.");
|
||||
|
||||
lock();
|
||||
|
@ -2495,6 +2498,9 @@ void Image::fill(const Color &p_color) {
|
|||
}
|
||||
|
||||
void Image::fill_rect(const Rect2 &p_rect, const Color &p_color) {
|
||||
if (data.size() == 0) {
|
||||
return;
|
||||
}
|
||||
ERR_FAIL_COND_MSG(!_can_modify(format), "Cannot fill rect in compressed or custom image formats.");
|
||||
|
||||
Rect2i r = Rect2i(0, 0, width, height).clip(p_rect.abs());
|
||||
|
|
26
misc/scripts/gitignore_check.sh
Normal file
26
misc/scripts/gitignore_check.sh
Normal file
|
@ -0,0 +1,26 @@
|
|||
set -uo pipefail
|
||||
shopt -s globstar
|
||||
|
||||
echo -e ".gitignore validation..."
|
||||
|
||||
# Get a list of files that exist in the repo but are ignored.
|
||||
|
||||
# The --verbose flag also includes files un-ignored via ! prefixes.
|
||||
# We filter those out with a somewhat awkward `awk` directive.
|
||||
# (Explanation: Split each line by : delimiters,
|
||||
# see if the actual gitignore line shown in the third field starts with !,
|
||||
# if it doesn't, print it.)
|
||||
|
||||
# ignorecase for the sake of Windows users.
|
||||
|
||||
output=$(git -c core.ignorecase=true check-ignore --verbose --no-index **/* | \
|
||||
awk -F ':' '{ if ($3 !~ /^!/) print $0 }')
|
||||
|
||||
# Then we take this result and return success if it's empty.
|
||||
if [ -z "$output" ]; then
|
||||
exit 0
|
||||
else
|
||||
# And print the result if it isn't.
|
||||
echo "$output"
|
||||
exit 1
|
||||
fi
|
|
@ -1,5 +1,5 @@
|
|||
def can_build(env, platform):
|
||||
return not env["arch"].startswith("rv")
|
||||
return True
|
||||
|
||||
|
||||
def configure(env):
|
||||
|
|
|
@ -40,7 +40,9 @@ static void *_regex_malloc(PCRE2_SIZE size, void *user) {
|
|||
}
|
||||
|
||||
static void _regex_free(void *ptr, void *user) {
|
||||
memfree(ptr);
|
||||
if (ptr) {
|
||||
memfree(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
int RegExMatch::_find(const Variant &p_name) const {
|
||||
|
|
1
platform/android/java/editor/src/.gitignore
vendored
Normal file
1
platform/android/java/editor/src/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
!/debug
|
|
@ -64,7 +64,7 @@ internal class AssetsDirectoryAccess(context: Context) : DirectoryAccessHandler.
|
|||
override fun hasDirId(dirId: Int) = dirs.indexOfKey(dirId) >= 0
|
||||
|
||||
override fun dirOpen(path: String): Int {
|
||||
val assetsPath = getAssetsPath(path) ?: return INVALID_DIR_ID
|
||||
val assetsPath = getAssetsPath(path)
|
||||
try {
|
||||
val files = assetManager.list(assetsPath) ?: return INVALID_DIR_ID
|
||||
// Empty directories don't get added to the 'assets' directory, so
|
||||
|
@ -99,7 +99,7 @@ internal class AssetsDirectoryAccess(context: Context) : DirectoryAccessHandler.
|
|||
}
|
||||
|
||||
override fun fileExists(path: String): Boolean {
|
||||
val assetsPath = getAssetsPath(path) ?: return false
|
||||
val assetsPath = getAssetsPath(path)
|
||||
try {
|
||||
val files = assetManager.list(assetsPath) ?: return false
|
||||
// Empty directories don't get added to the 'assets' directory, so
|
||||
|
|
|
@ -83,10 +83,10 @@ def configure(env):
|
|||
env.Append(CCFLAGS=["-arch", "arm64", "-mmacosx-version-min=10.15"])
|
||||
env.Append(LINKFLAGS=["-arch", "arm64", "-mmacosx-version-min=10.15"])
|
||||
else:
|
||||
print("Building for macOS 10.12+, platform x86-64.")
|
||||
env.Append(ASFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.12"])
|
||||
env.Append(CCFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.12"])
|
||||
env.Append(LINKFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.12"])
|
||||
print("Building for macOS 10.13+, platform x86-64.")
|
||||
env.Append(ASFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.13"])
|
||||
env.Append(CCFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.13"])
|
||||
env.Append(LINKFLAGS=["-arch", "x86_64", "-mmacosx-version-min=10.13"])
|
||||
|
||||
if not "osxcross" in env: # regular native build
|
||||
if env["macports_clang"] != "no":
|
||||
|
|
|
@ -510,11 +510,7 @@ static NSCursor *cursorFromSelector(SEL selector, SEL fallback = nil) {
|
|||
trackingArea = nil;
|
||||
imeInputEventInProgress = false;
|
||||
[self updateTrackingAreas];
|
||||
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101400
|
||||
[self registerForDraggedTypes:[NSArray arrayWithObject:NSPasteboardTypeFileURL]];
|
||||
#else
|
||||
[self registerForDraggedTypes:[NSArray arrayWithObject:NSFilenamesPboardType]];
|
||||
#endif
|
||||
markedText = [[NSMutableAttributedString alloc] init];
|
||||
return self;
|
||||
}
|
||||
|
@ -660,7 +656,6 @@ static const NSRange kEmptyRange = { NSNotFound, 0 };
|
|||
Vector<String> files;
|
||||
NSPasteboard *pboard = [sender draggingPasteboard];
|
||||
|
||||
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101400
|
||||
NSArray *items = pboard.pasteboardItems;
|
||||
for (NSPasteboardItem *item in items) {
|
||||
NSString *path = [item stringForType:NSPasteboardTypeFileURL];
|
||||
|
@ -671,16 +666,6 @@ static const NSRange kEmptyRange = { NSNotFound, 0 };
|
|||
free(utfs);
|
||||
files.push_back(ret);
|
||||
}
|
||||
#else
|
||||
NSArray *filenames = [pboard propertyListForType:NSFilenamesPboardType];
|
||||
for (NSString *ns in filenames) {
|
||||
char *utfs = strdup([ns UTF8String]);
|
||||
String ret;
|
||||
ret.parse_utf8(utfs);
|
||||
free(utfs);
|
||||
files.push_back(ret);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (files.size()) {
|
||||
OS_OSX::singleton->main_loop->drop_files(files, 0);
|
||||
|
|
|
@ -51,7 +51,6 @@ def get_flags():
|
|||
|
||||
|
||||
def configure(env):
|
||||
|
||||
## Build type
|
||||
|
||||
if env["target"] == "release":
|
||||
|
@ -76,6 +75,11 @@ def configure(env):
|
|||
env.Prepend(CCFLAGS=["-g3"])
|
||||
env.Append(LINKFLAGS=["-rdynamic"])
|
||||
|
||||
if env["debug_symbols"]:
|
||||
# Adding dwarf-4 explicitly makes stacktraces work with clang builds,
|
||||
# otherwise addr2line doesn't understand them
|
||||
env.Append(CCFLAGS=["-gdwarf-4"])
|
||||
|
||||
## Architecture
|
||||
|
||||
is64 = sys.maxsize > 2**32
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include <cxxabi.h>
|
||||
#include <dlfcn.h>
|
||||
#include <execinfo.h>
|
||||
#include <link.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
@ -79,7 +80,33 @@ static void handle_crash(int sig) {
|
|||
}
|
||||
print_error(vformat("Dumping the backtrace. %s", msg));
|
||||
char **strings = backtrace_symbols(bt_buffer, size);
|
||||
// PIE executable relocation, zero for non-PIE executables
|
||||
#ifdef __GLIBC__
|
||||
// This is a glibc only thing apparently.
|
||||
uintptr_t relocation = _r_debug.r_map->l_addr;
|
||||
#else
|
||||
// Non glibc systems apparently don't give PIE relocation info.
|
||||
uintptr_t relocation = 0;
|
||||
#endif //__GLIBC__
|
||||
if (strings) {
|
||||
List<String> args;
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
char str[1024];
|
||||
snprintf(str, 1024, "%p", (void *)((uintptr_t)bt_buffer[i] - relocation));
|
||||
args.push_back(str);
|
||||
}
|
||||
args.push_back("-e");
|
||||
args.push_back(_execpath);
|
||||
|
||||
// Try to get the file/line number using addr2line
|
||||
int ret;
|
||||
String output = "";
|
||||
Error err = OS::get_singleton()->execute(String("addr2line"), args, true, nullptr, &output, &ret);
|
||||
Vector<String> addr2line_results;
|
||||
if (err == OK) {
|
||||
addr2line_results = output.substr(0, output.length() - 1).split("\n", false);
|
||||
}
|
||||
|
||||
for (size_t i = 1; i < size; i++) {
|
||||
char fname[1024];
|
||||
Dl_info info;
|
||||
|
@ -102,24 +129,7 @@ static void handle_crash(int sig) {
|
|||
}
|
||||
}
|
||||
|
||||
List<String> args;
|
||||
|
||||
char str[1024];
|
||||
snprintf(str, 1024, "%p", bt_buffer[i]);
|
||||
args.push_back(str);
|
||||
args.push_back("-e");
|
||||
args.push_back(_execpath);
|
||||
|
||||
String output = "";
|
||||
|
||||
// Try to get the file/line number using addr2line
|
||||
int ret;
|
||||
Error err = OS::get_singleton()->execute(String("addr2line"), args, true, nullptr, &output, &ret);
|
||||
if (err == OK) {
|
||||
output.erase(output.length() - 1, 1);
|
||||
}
|
||||
|
||||
print_error(vformat("[%d] %s (%s)", (int64_t)i, fname, output));
|
||||
print_error(vformat("[%d] %s (%s)", (int64_t)i, fname, err == OK ? addr2line_results[i] : ""));
|
||||
}
|
||||
|
||||
free(strings);
|
||||
|
|
|
@ -112,6 +112,11 @@ def configure(env):
|
|||
env.Prepend(CCFLAGS=["-g3"])
|
||||
env.Append(LINKFLAGS=["-rdynamic"])
|
||||
|
||||
if env["debug_symbols"]:
|
||||
# Adding dwarf-4 explicitly makes stacktraces work with clang builds,
|
||||
# otherwise addr2line doesn't understand them
|
||||
env.Append(CCFLAGS=["-gdwarf-4"])
|
||||
|
||||
## Architecture
|
||||
|
||||
is64 = sys.maxsize > 2**32
|
||||
|
@ -419,7 +424,7 @@ def configure(env):
|
|||
gnu_ld_version = re.search("^GNU ld [^$]*(\d+\.\d+)$", linker_version_str, re.MULTILINE)
|
||||
if not gnu_ld_version:
|
||||
print(
|
||||
"Warning: Creating template binaries enabled for PCK embedding is currently only supported with GNU ld, not gold or LLD."
|
||||
"Warning: Creating export template binaries enabled for PCK embedding is currently only supported with GNU ld, not gold, LLD or mold."
|
||||
)
|
||||
else:
|
||||
if float(gnu_ld_version.group(1)) >= 2.30:
|
||||
|
|
19
thirdparty/README.md
vendored
19
thirdparty/README.md
vendored
|
@ -8,12 +8,13 @@ readability.
|
|||
## brotli
|
||||
|
||||
- Upstream: https://github.com/google/brotli
|
||||
- Version: git (f4153a09f87cbb9c826d8fc12c74642bb2d879ea, 2022)
|
||||
- Version: git (ed1995b6bda19244070ab5d331111f16f67c8054, 2023)
|
||||
- License: MIT
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
||||
- `common/`, `dec/` and `include/` folders
|
||||
- `common/`, `dec/` and `include/` folders from `c/`,
|
||||
minus the `dictionary.bin*` files
|
||||
- `LICENSE`
|
||||
|
||||
|
||||
|
@ -185,7 +186,7 @@ Files extracted from upstream source:
|
|||
## libpng
|
||||
|
||||
- Upstream: http://libpng.org/pub/png/libpng.html
|
||||
- Version: 1.6.38 (0a158f3506502dfa23edfc42790dfaed82efba17, 2022)
|
||||
- Version: 1.6.40 (f135775ad4e5d4408d2e12ffcc71bb36e6b48551, 2023)
|
||||
- License: libpng/zlib
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
@ -308,7 +309,7 @@ Files extracted from upstream repository:
|
|||
## miniupnpc
|
||||
|
||||
- Upstream: https://github.com/miniupnp/miniupnp
|
||||
- Version: 2.2.4 (7d1d8bc3868b08ad003bad235eee57562b95b76d, 2022)
|
||||
- Version: 2.2.5 (58837ef586278d18cbebee50be758835ed4be79a, 2023)
|
||||
- License: BSD-3-Clause
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
@ -461,7 +462,7 @@ Files extracted from upstream source:
|
|||
## pcre2
|
||||
|
||||
- Upstream: http://www.pcre.org
|
||||
- Version: 10.40 (3103b8f20a3b9944b177e812fde29fbfb8b90558, 2022)
|
||||
- Version: 10.42 (52c08847921a324c804cabf2814549f50bce1265, 2022)
|
||||
- License: BSD-3-Clause
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
@ -493,7 +494,7 @@ Files extracted from upstream source:
|
|||
## recastnavigation
|
||||
|
||||
- Upstream: https://github.com/recastnavigation/recastnavigation
|
||||
- Version: git (4fef0446609b23d6ac180ed822817571525528a1, 2022)
|
||||
- Version: 1.6.0 (6dc1667f580357e8a2154c28b7867bea7e8ad3a7, 2023)
|
||||
- License: zlib
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
@ -567,7 +568,7 @@ folder.
|
|||
## wslay
|
||||
|
||||
- Upstream: https://github.com/tatsuhiro-t/wslay
|
||||
- Version: 1.1.1+git (45d22583b488f79d5a4e598cc7675c191c5ab53f, 2021)
|
||||
- Version: 1.1.1+git (0e7d106ff89ad6638090fd811a9b2e4c5dda8d40, 2022)
|
||||
- License: MIT
|
||||
|
||||
File extracted from upstream release tarball:
|
||||
|
@ -584,7 +585,7 @@ File extracted from upstream release tarball:
|
|||
## xatlas
|
||||
|
||||
- Upstream: https://github.com/jpcy/xatlas
|
||||
- Version: git (16ace528acd2cf1f16a7c0dde99c42c486488dbe, 2022)
|
||||
- Version: git (f700c7790aaa030e794b52ba7791a05c085faf0c, 2022)
|
||||
- License: MIT
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
@ -608,7 +609,7 @@ Files extracted from upstream source:
|
|||
## zstd
|
||||
|
||||
- Upstream: https://github.com/facebook/zstd
|
||||
- Version: 1.5.2 (e47e674cd09583ff0503f0f6defd6d23d8b718d3, 2022)
|
||||
- Version: 1.5.5 (63779c798237346c2b245c546c40b72a5a5913fe, 2023)
|
||||
- License: BSD-3-Clause
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
|
3
thirdparty/brotli/common/constants.h
vendored
3
thirdparty/brotli/common/constants.h
vendored
|
@ -12,10 +12,11 @@
|
|||
#ifndef BROTLI_COMMON_CONSTANTS_H_
|
||||
#define BROTLI_COMMON_CONSTANTS_H_
|
||||
|
||||
#include "platform.h"
|
||||
#include <brotli/port.h>
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
/* Specification: 7.3. Encoding of the context map */
|
||||
#define BROTLI_CONTEXT_MAP_MAX_RLE 16
|
||||
|
||||
|
|
2
thirdparty/brotli/common/dictionary.c
vendored
2
thirdparty/brotli/common/dictionary.c
vendored
|
@ -5897,7 +5897,7 @@ static BrotliDictionary kBrotliDictionary = {
|
|||
#endif
|
||||
};
|
||||
|
||||
const BrotliDictionary* BrotliGetDictionary() {
|
||||
const BrotliDictionary* BrotliGetDictionary(void) {
|
||||
return &kBrotliDictionary;
|
||||
}
|
||||
|
||||
|
|
3
thirdparty/brotli/common/platform.c
vendored
3
thirdparty/brotli/common/platform.c
vendored
|
@ -6,9 +6,10 @@
|
|||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "platform.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
/* Default brotli_alloc_func */
|
||||
void* BrotliDefaultAllocFunc(void* opaque, size_t size) {
|
||||
BROTLI_UNUSED(opaque);
|
||||
|
|
123
thirdparty/brotli/common/platform.h
vendored
123
thirdparty/brotli/common/platform.h
vendored
|
@ -12,9 +12,9 @@
|
|||
* BROTLI_BUILD_BIG_ENDIAN forces to use big-endian optimizations
|
||||
* BROTLI_BUILD_ENDIAN_NEUTRAL disables endian-aware optimizations
|
||||
* BROTLI_BUILD_LITTLE_ENDIAN forces to use little-endian optimizations
|
||||
* BROTLI_BUILD_PORTABLE disables dangerous optimizations, like unaligned
|
||||
read and overlapping memcpy; this reduces decompression speed by 5%
|
||||
* BROTLI_BUILD_NO_RBIT disables "rbit" optimization for ARM CPUs
|
||||
* BROTLI_BUILD_NO_UNALIGNED_READ_FAST forces off the fast-unaligned-read
|
||||
optimizations (mainly for testing purposes).
|
||||
* BROTLI_DEBUG dumps file name and line number when decoder detects stream
|
||||
or memory error
|
||||
* BROTLI_ENABLE_LOG enables asserts and dumps various state information
|
||||
|
@ -208,15 +208,19 @@ OR:
|
|||
#define BROTLI_TARGET_RISCV64
|
||||
#endif
|
||||
|
||||
#if defined(BROTLI_TARGET_X64) || defined(BROTLI_TARGET_ARMV8_64) || \
|
||||
defined(BROTLI_TARGET_POWERPC64) || defined(BROTLI_TARGET_RISCV64)
|
||||
#define BROTLI_TARGET_64_BITS 1
|
||||
#else
|
||||
#define BROTLI_TARGET_64_BITS 0
|
||||
#endif
|
||||
|
||||
#if defined(BROTLI_BUILD_64_BIT)
|
||||
#define BROTLI_64_BITS 1
|
||||
#elif defined(BROTLI_BUILD_32_BIT)
|
||||
#define BROTLI_64_BITS 0
|
||||
#elif defined(BROTLI_TARGET_X64) || defined(BROTLI_TARGET_ARMV8_64) || \
|
||||
defined(BROTLI_TARGET_POWERPC64) || defined(BROTLI_TARGET_RISCV64)
|
||||
#define BROTLI_64_BITS 1
|
||||
#else
|
||||
#define BROTLI_64_BITS 0
|
||||
#define BROTLI_64_BITS BROTLI_TARGET_64_BITS
|
||||
#endif
|
||||
|
||||
#if (BROTLI_64_BITS)
|
||||
|
@ -260,18 +264,19 @@ OR:
|
|||
#undef BROTLI_X_BIG_ENDIAN
|
||||
#endif
|
||||
|
||||
#if defined(BROTLI_BUILD_PORTABLE)
|
||||
#define BROTLI_ALIGNED_READ (!!1)
|
||||
#elif defined(BROTLI_TARGET_X86) || defined(BROTLI_TARGET_X64) || \
|
||||
#if defined(BROTLI_BUILD_NO_UNALIGNED_READ_FAST)
|
||||
#define BROTLI_UNALIGNED_READ_FAST (!!0)
|
||||
#elif defined(BROTLI_TARGET_X86) || defined(BROTLI_TARGET_X64) || \
|
||||
defined(BROTLI_TARGET_ARMV7) || defined(BROTLI_TARGET_ARMV8_ANY) || \
|
||||
defined(BROTLI_TARGET_RISCV64)
|
||||
/* Allow unaligned read only for white-listed CPUs. */
|
||||
#define BROTLI_ALIGNED_READ (!!0)
|
||||
/* These targets are known to generate efficient code for unaligned reads
|
||||
* (e.g. a single instruction, not multiple 1-byte loads, shifted and or'd
|
||||
* together). */
|
||||
#define BROTLI_UNALIGNED_READ_FAST (!!1)
|
||||
#else
|
||||
#define BROTLI_ALIGNED_READ (!!1)
|
||||
#define BROTLI_UNALIGNED_READ_FAST (!!0)
|
||||
#endif
|
||||
|
||||
#if BROTLI_ALIGNED_READ
|
||||
/* Portable unaligned memory access: read / write values via memcpy. */
|
||||
static BROTLI_INLINE uint16_t BrotliUnalignedRead16(const void* p) {
|
||||
uint16_t t;
|
||||
|
@ -291,75 +296,6 @@ static BROTLI_INLINE uint64_t BrotliUnalignedRead64(const void* p) {
|
|||
static BROTLI_INLINE void BrotliUnalignedWrite64(void* p, uint64_t v) {
|
||||
memcpy(p, &v, sizeof v);
|
||||
}
|
||||
#else /* BROTLI_ALIGNED_READ */
|
||||
/* Unaligned memory access is allowed: just cast pointer to requested type. */
|
||||
#if BROTLI_SANITIZED
|
||||
/* Consider we have an unaligned load/store of 4 bytes from address 0x...05.
|
||||
AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
|
||||
will miss a bug if 08 is the first unaddressable byte.
|
||||
ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
|
||||
miss a race between this access and some other accesses to 08.
|
||||
MemorySanitizer will correctly propagate the shadow on unaligned stores
|
||||
and correctly report bugs on unaligned loads, but it may not properly
|
||||
update and report the origin of the uninitialized memory.
|
||||
For all three tools, replacing an unaligned access with a tool-specific
|
||||
callback solves the problem. */
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
uint16_t __sanitizer_unaligned_load16(const void* p);
|
||||
uint32_t __sanitizer_unaligned_load32(const void* p);
|
||||
uint64_t __sanitizer_unaligned_load64(const void* p);
|
||||
void __sanitizer_unaligned_store64(void* p, uint64_t v);
|
||||
#if defined(__cplusplus)
|
||||
} /* extern "C" */
|
||||
#endif /* __cplusplus */
|
||||
#define BrotliUnalignedRead16 __sanitizer_unaligned_load16
|
||||
#define BrotliUnalignedRead32 __sanitizer_unaligned_load32
|
||||
#define BrotliUnalignedRead64 __sanitizer_unaligned_load64
|
||||
#define BrotliUnalignedWrite64 __sanitizer_unaligned_store64
|
||||
#else /* BROTLI_SANITIZED */
|
||||
static BROTLI_INLINE uint16_t BrotliUnalignedRead16(const void* p) {
|
||||
return *(const uint16_t*)p;
|
||||
}
|
||||
static BROTLI_INLINE uint32_t BrotliUnalignedRead32(const void* p) {
|
||||
return *(const uint32_t*)p;
|
||||
}
|
||||
#if (BROTLI_64_BITS)
|
||||
static BROTLI_INLINE uint64_t BrotliUnalignedRead64(const void* p) {
|
||||
return *(const uint64_t*)p;
|
||||
}
|
||||
static BROTLI_INLINE void BrotliUnalignedWrite64(void* p, uint64_t v) {
|
||||
*(uint64_t*)p = v;
|
||||
}
|
||||
#else /* BROTLI_64_BITS */
|
||||
/* Avoid emitting LDRD / STRD, which require properly aligned address. */
|
||||
/* If __attribute__(aligned) is available, use that. Otherwise, memcpy. */
|
||||
|
||||
#if BROTLI_GNUC_HAS_ATTRIBUTE(aligned, 2, 7, 0)
|
||||
typedef BROTLI_ALIGNED(1) uint64_t brotli_unaligned_uint64_t;
|
||||
|
||||
static BROTLI_INLINE uint64_t BrotliUnalignedRead64(const void* p) {
|
||||
return (uint64_t) ((const brotli_unaligned_uint64_t*) p)[0];
|
||||
}
|
||||
static BROTLI_INLINE void BrotliUnalignedWrite64(void* p, uint64_t v) {
|
||||
brotli_unaligned_uint64_t* dwords = (brotli_unaligned_uint64_t*) p;
|
||||
dwords[0] = (brotli_unaligned_uint64_t) v;
|
||||
}
|
||||
#else /* BROTLI_GNUC_HAS_ATTRIBUTE(aligned, 2, 7, 0) */
|
||||
static BROTLI_INLINE uint64_t BrotliUnalignedRead64(const void* p) {
|
||||
uint64_t v;
|
||||
memcpy(&v, p, sizeof(uint64_t));
|
||||
return v;
|
||||
}
|
||||
|
||||
static BROTLI_INLINE void BrotliUnalignedWrite64(void* p, uint64_t v) {
|
||||
memcpy(p, &v, sizeof(uint64_t));
|
||||
}
|
||||
#endif /* BROTLI_GNUC_HAS_ATTRIBUTE(aligned, 2, 7, 0) */
|
||||
#endif /* BROTLI_64_BITS */
|
||||
#endif /* BROTLI_SANITIZED */
|
||||
#endif /* BROTLI_ALIGNED_READ */
|
||||
|
||||
#if BROTLI_LITTLE_ENDIAN
|
||||
/* Straight endianness. Just read / write values. */
|
||||
|
@ -435,6 +371,16 @@ static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64LE(void* p, uint64_t v) {
|
|||
}
|
||||
#endif /* BROTLI_LITTLE_ENDIAN */
|
||||
|
||||
static BROTLI_INLINE void* BROTLI_UNALIGNED_LOAD_PTR(const void* p) {
|
||||
void* v;
|
||||
memcpy(&v, p, sizeof(void*));
|
||||
return v;
|
||||
}
|
||||
|
||||
static BROTLI_INLINE void BROTLI_UNALIGNED_STORE_PTR(void* p, const void* v) {
|
||||
memcpy(p, &v, sizeof(void*));
|
||||
}
|
||||
|
||||
/* BROTLI_IS_CONSTANT macros returns true for compile-time constants. */
|
||||
#if BROTLI_GNUC_HAS_BUILTIN(__builtin_constant_p, 3, 0, 1) || \
|
||||
BROTLI_INTEL_VERSION_CHECK(16, 0, 0)
|
||||
|
@ -467,6 +413,8 @@ static BROTLI_INLINE void BrotliDump(const char* f, int l, const char* fn) {
|
|||
#define BROTLI_DUMP() (void)(0)
|
||||
#endif
|
||||
|
||||
/* BrotliRBit assumes brotli_reg_t fits native CPU register type. */
|
||||
#if (BROTLI_64_BITS == BROTLI_TARGET_64_BITS)
|
||||
/* TODO(eustas): add appropriate icc/sunpro/arm/ibm/ti checks. */
|
||||
#if (BROTLI_GNUC_VERSION_CHECK(3, 0, 0) || defined(__llvm__)) && \
|
||||
!defined(BROTLI_BUILD_NO_RBIT)
|
||||
|
@ -480,15 +428,14 @@ static BROTLI_INLINE brotli_reg_t BrotliRBit(brotli_reg_t input) {
|
|||
#define BROTLI_RBIT(x) BrotliRBit(x)
|
||||
#endif /* armv7 / armv8 */
|
||||
#endif /* gcc || clang */
|
||||
#endif /* brotli_reg_t is native */
|
||||
#if !defined(BROTLI_RBIT)
|
||||
static BROTLI_INLINE void BrotliRBit(void) { /* Should break build if used. */ }
|
||||
#endif /* BROTLI_RBIT */
|
||||
|
||||
#define BROTLI_REPEAT(N, X) { \
|
||||
if ((N & 1) != 0) {X;} \
|
||||
if ((N & 2) != 0) {X; X;} \
|
||||
if ((N & 4) != 0) {X; X; X; X;} \
|
||||
}
|
||||
#define BROTLI_REPEAT_4(X) {X; X; X; X;}
|
||||
#define BROTLI_REPEAT_5(X) {X; X; X; X; X;}
|
||||
#define BROTLI_REPEAT_6(X) {X; X; X; X; X; X;}
|
||||
|
||||
#define BROTLI_UNUSED(X) (void)(X)
|
||||
|
||||
|
@ -553,6 +500,8 @@ BROTLI_UNUSED_FUNCTION void BrotliSuppressUnusedFunctions(void) {
|
|||
BROTLI_UNUSED(&BROTLI_UNALIGNED_LOAD32LE);
|
||||
BROTLI_UNUSED(&BROTLI_UNALIGNED_LOAD64LE);
|
||||
BROTLI_UNUSED(&BROTLI_UNALIGNED_STORE64LE);
|
||||
BROTLI_UNUSED(&BROTLI_UNALIGNED_LOAD_PTR);
|
||||
BROTLI_UNUSED(&BROTLI_UNALIGNED_STORE_PTR);
|
||||
BROTLI_UNUSED(&BrotliRBit);
|
||||
BROTLI_UNUSED(&brotli_min_double);
|
||||
BROTLI_UNUSED(&brotli_max_double);
|
||||
|
|
|
@ -9,11 +9,12 @@
|
|||
#ifndef BROTLI_COMMON_SHARED_DICTIONARY_INTERNAL_H_
|
||||
#define BROTLI_COMMON_SHARED_DICTIONARY_INTERNAL_H_
|
||||
|
||||
#include "dictionary.h"
|
||||
#include <brotli/shared_dictionary.h>
|
||||
#include "transform.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "dictionary.h"
|
||||
#include "transform.h"
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
|
5
thirdparty/brotli/dec/bit_reader.c
vendored
5
thirdparty/brotli/dec/bit_reader.c
vendored
|
@ -8,9 +8,10 @@
|
|||
|
||||
#include "bit_reader.h"
|
||||
|
||||
#include "../common/platform.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "../common/platform.h"
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -36,7 +37,7 @@ BROTLI_BOOL BrotliWarmupBitReader(BrotliBitReader* const br) {
|
|||
/* Fixing alignment after unaligned BrotliFillWindow would result accumulator
|
||||
overflow. If unalignment is caused by BrotliSafeReadBits, then there is
|
||||
enough space in accumulator to fix alignment. */
|
||||
if (!BROTLI_ALIGNED_READ) {
|
||||
if (BROTLI_UNALIGNED_READ_FAST) {
|
||||
aligned_read_mask = 0;
|
||||
}
|
||||
if (BrotliGetAvailableBits(br) == 0) {
|
||||
|
|
29
thirdparty/brotli/dec/bit_reader.h
vendored
29
thirdparty/brotli/dec/bit_reader.h
vendored
|
@ -11,9 +11,10 @@
|
|||
|
||||
#include <string.h> /* memcpy */
|
||||
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "../common/constants.h"
|
||||
#include "../common/platform.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
extern "C" {
|
||||
|
@ -53,8 +54,8 @@ BROTLI_INTERNAL void BrotliInitBitReader(BrotliBitReader* const br);
|
|||
/* Ensures that accumulator is not empty.
|
||||
May consume up to sizeof(brotli_reg_t) - 1 bytes of input.
|
||||
Returns BROTLI_FALSE if data is required but there is no input available.
|
||||
For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned
|
||||
reading. */
|
||||
For !BROTLI_UNALIGNED_READ_FAST this function also prepares bit reader for
|
||||
aligned reading. */
|
||||
BROTLI_INTERNAL BROTLI_BOOL BrotliWarmupBitReader(BrotliBitReader* const br);
|
||||
|
||||
/* Fallback for BrotliSafeReadBits32. Extracted as noninlined method to unburden
|
||||
|
@ -107,7 +108,8 @@ static BROTLI_INLINE BROTLI_BOOL BrotliCheckInputAmount(
|
|||
static BROTLI_INLINE void BrotliFillBitWindow(
|
||||
BrotliBitReader* const br, uint32_t n_bits) {
|
||||
#if (BROTLI_64_BITS)
|
||||
if (!BROTLI_ALIGNED_READ && BROTLI_IS_CONSTANT(n_bits) && (n_bits <= 8)) {
|
||||
if (BROTLI_UNALIGNED_READ_FAST && BROTLI_IS_CONSTANT(n_bits) &&
|
||||
(n_bits <= 8)) {
|
||||
uint32_t bit_pos = br->bit_pos_;
|
||||
if (bit_pos >= 56) {
|
||||
br->val_ =
|
||||
|
@ -117,8 +119,8 @@ static BROTLI_INLINE void BrotliFillBitWindow(
|
|||
br->avail_in -= 7;
|
||||
br->next_in += 7;
|
||||
}
|
||||
} else if (
|
||||
!BROTLI_ALIGNED_READ && BROTLI_IS_CONSTANT(n_bits) && (n_bits <= 16)) {
|
||||
} else if (BROTLI_UNALIGNED_READ_FAST && BROTLI_IS_CONSTANT(n_bits) &&
|
||||
(n_bits <= 16)) {
|
||||
uint32_t bit_pos = br->bit_pos_;
|
||||
if (bit_pos >= 48) {
|
||||
br->val_ =
|
||||
|
@ -140,7 +142,8 @@ static BROTLI_INLINE void BrotliFillBitWindow(
|
|||
}
|
||||
}
|
||||
#else
|
||||
if (!BROTLI_ALIGNED_READ && BROTLI_IS_CONSTANT(n_bits) && (n_bits <= 8)) {
|
||||
if (BROTLI_UNALIGNED_READ_FAST && BROTLI_IS_CONSTANT(n_bits) &&
|
||||
(n_bits <= 8)) {
|
||||
uint32_t bit_pos = br->bit_pos_;
|
||||
if (bit_pos >= 24) {
|
||||
br->val_ =
|
||||
|
@ -338,6 +341,11 @@ static BROTLI_INLINE BROTLI_BOOL BrotliJumpToByteBoundary(BrotliBitReader* br) {
|
|||
return TO_BROTLI_BOOL(pad_bits == 0);
|
||||
}
|
||||
|
||||
static BROTLI_INLINE void BrotliDropBytes(BrotliBitReader* br, size_t num) {
|
||||
br->avail_in -= num;
|
||||
br->next_in += num;
|
||||
}
|
||||
|
||||
/* Copies remaining input bytes stored in the bit reader to the output. Value
|
||||
|num| may not be larger than BrotliGetRemainingBytes. The bit reader must be
|
||||
warmed up again after this. */
|
||||
|
@ -349,9 +357,10 @@ static BROTLI_INLINE void BrotliCopyBytes(uint8_t* dest,
|
|||
++dest;
|
||||
--num;
|
||||
}
|
||||
memcpy(dest, br->next_in, num);
|
||||
br->avail_in -= num;
|
||||
br->next_in += num;
|
||||
if (num > 0) {
|
||||
memcpy(dest, br->next_in, num);
|
||||
BrotliDropBytes(br, num);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
|
|
103
thirdparty/brotli/dec/decode.c
vendored
103
thirdparty/brotli/dec/decode.c
vendored
|
@ -113,8 +113,9 @@ void BrotliDecoderDestroyInstance(BrotliDecoderState* state) {
|
|||
|
||||
/* Saves error code and converts it to BrotliDecoderResult. */
|
||||
static BROTLI_NOINLINE BrotliDecoderResult SaveErrorCode(
|
||||
BrotliDecoderState* s, BrotliDecoderErrorCode e) {
|
||||
BrotliDecoderState* s, BrotliDecoderErrorCode e, size_t consumed_input) {
|
||||
s->error_code = (int)e;
|
||||
s->used_input += consumed_input;
|
||||
switch (e) {
|
||||
case BROTLI_DECODER_SUCCESS:
|
||||
return BROTLI_DECODER_RESULT_SUCCESS;
|
||||
|
@ -1172,7 +1173,7 @@ static BROTLI_INLINE void DetectTrivialLiteralBlockTypes(
|
|||
size_t sample = s->context_map[offset];
|
||||
size_t j;
|
||||
for (j = 0; j < (1u << BROTLI_LITERAL_CONTEXT_BITS);) {
|
||||
BROTLI_REPEAT(4, error |= s->context_map[offset + j++] ^ sample;)
|
||||
BROTLI_REPEAT_4({ error |= s->context_map[offset + j++] ^ sample; })
|
||||
}
|
||||
if (error == 0) {
|
||||
s->trivial_literal_contexts[i >> 5] |= 1u << (i & 31);
|
||||
|
@ -1353,6 +1354,57 @@ static BROTLI_BOOL BROTLI_NOINLINE BrotliEnsureRingBuffer(
|
|||
return BROTLI_TRUE;
|
||||
}
|
||||
|
||||
static BrotliDecoderErrorCode BROTLI_NOINLINE
|
||||
SkipMetadataBlock(BrotliDecoderState* s) {
|
||||
BrotliBitReader* br = &s->br;
|
||||
|
||||
if (s->meta_block_remaining_len == 0) {
|
||||
return BROTLI_DECODER_SUCCESS;
|
||||
}
|
||||
|
||||
BROTLI_DCHECK((BrotliGetAvailableBits(br) & 7) == 0);
|
||||
|
||||
/* Drain accumulator. */
|
||||
if (BrotliGetAvailableBits(br) >= 8) {
|
||||
uint8_t buffer[8];
|
||||
int nbytes = (int)(BrotliGetAvailableBits(br)) >> 3;
|
||||
BROTLI_DCHECK(nbytes <= 8);
|
||||
if (nbytes > s->meta_block_remaining_len) {
|
||||
nbytes = s->meta_block_remaining_len;
|
||||
}
|
||||
BrotliCopyBytes(buffer, br, (size_t)nbytes);
|
||||
if (s->metadata_chunk_func) {
|
||||
s->metadata_chunk_func(s->metadata_callback_opaque, buffer,
|
||||
(size_t)nbytes);
|
||||
}
|
||||
s->meta_block_remaining_len -= nbytes;
|
||||
if (s->meta_block_remaining_len == 0) {
|
||||
return BROTLI_DECODER_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
/* Direct access to metadata is possible. */
|
||||
int nbytes = (int)BrotliGetRemainingBytes(br);
|
||||
if (nbytes > s->meta_block_remaining_len) {
|
||||
nbytes = s->meta_block_remaining_len;
|
||||
}
|
||||
if (nbytes > 0) {
|
||||
if (s->metadata_chunk_func) {
|
||||
s->metadata_chunk_func(s->metadata_callback_opaque, br->next_in,
|
||||
(size_t)nbytes);
|
||||
}
|
||||
BrotliDropBytes(br, (size_t)nbytes);
|
||||
s->meta_block_remaining_len -= nbytes;
|
||||
if (s->meta_block_remaining_len == 0) {
|
||||
return BROTLI_DECODER_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
BROTLI_DCHECK(BrotliGetRemainingBytes(br) == 0);
|
||||
|
||||
return BROTLI_DECODER_NEEDS_MORE_INPUT;
|
||||
}
|
||||
|
||||
static BrotliDecoderErrorCode BROTLI_NOINLINE CopyUncompressedBlockToOutput(
|
||||
size_t* available_out, uint8_t** next_out, size_t* total_out,
|
||||
BrotliDecoderState* s) {
|
||||
|
@ -2243,6 +2295,9 @@ BrotliDecoderResult BrotliDecoderDecompressStream(
|
|||
size_t* available_out, uint8_t** next_out, size_t* total_out) {
|
||||
BrotliDecoderErrorCode result = BROTLI_DECODER_SUCCESS;
|
||||
BrotliBitReader* br = &s->br;
|
||||
size_t input_size = *available_in;
|
||||
#define BROTLI_SAVE_ERROR_CODE(code) \
|
||||
SaveErrorCode(s, (code), input_size - *available_in)
|
||||
/* Ensure that |total_out| is set, even if no data will ever be pushed out. */
|
||||
if (total_out) {
|
||||
*total_out = s->partial_pos_out;
|
||||
|
@ -2252,8 +2307,8 @@ BrotliDecoderResult BrotliDecoderDecompressStream(
|
|||
return BROTLI_DECODER_RESULT_ERROR;
|
||||
}
|
||||
if (*available_out && (!next_out || !*next_out)) {
|
||||
return SaveErrorCode(
|
||||
s, BROTLI_FAILURE(BROTLI_DECODER_ERROR_INVALID_ARGUMENTS));
|
||||
return BROTLI_SAVE_ERROR_CODE(
|
||||
BROTLI_FAILURE(BROTLI_DECODER_ERROR_INVALID_ARGUMENTS));
|
||||
}
|
||||
if (!*available_out) next_out = 0;
|
||||
if (s->buffer_length == 0) { /* Just connect bit reader to input stream. */
|
||||
|
@ -2410,6 +2465,10 @@ BrotliDecoderResult BrotliDecoderDecompressStream(
|
|||
}
|
||||
if (s->is_metadata) {
|
||||
s->state = BROTLI_STATE_METADATA;
|
||||
if (s->metadata_start_func) {
|
||||
s->metadata_start_func(s->metadata_callback_opaque,
|
||||
(size_t)s->meta_block_remaining_len);
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (s->meta_block_remaining_len == 0) {
|
||||
|
@ -2502,17 +2561,11 @@ BrotliDecoderResult BrotliDecoderDecompressStream(
|
|||
}
|
||||
|
||||
case BROTLI_STATE_METADATA:
|
||||
for (; s->meta_block_remaining_len > 0; --s->meta_block_remaining_len) {
|
||||
uint32_t bits;
|
||||
/* Read one byte and ignore it. */
|
||||
if (!BrotliSafeReadBits(br, 8, &bits)) {
|
||||
result = BROTLI_DECODER_NEEDS_MORE_INPUT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (result == BROTLI_DECODER_SUCCESS) {
|
||||
s->state = BROTLI_STATE_METABLOCK_DONE;
|
||||
result = SkipMetadataBlock(s);
|
||||
if (result != BROTLI_DECODER_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
s->state = BROTLI_STATE_METABLOCK_DONE;
|
||||
break;
|
||||
|
||||
case BROTLI_STATE_METABLOCK_HEADER_2: {
|
||||
|
@ -2586,7 +2639,7 @@ BrotliDecoderResult BrotliDecoderDecompressStream(
|
|||
s, &s->distance_hgroup, distance_alphabet_size_max,
|
||||
distance_alphabet_size_limit, s->num_dist_htrees);
|
||||
if (!allocation_success) {
|
||||
return SaveErrorCode(s,
|
||||
return BROTLI_SAVE_ERROR_CODE(
|
||||
BROTLI_FAILURE(BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS));
|
||||
}
|
||||
s->loop_counter = 0;
|
||||
|
@ -2600,7 +2653,7 @@ BrotliDecoderResult BrotliDecoderDecompressStream(
|
|||
case 0: hgroup = &s->literal_hgroup; break;
|
||||
case 1: hgroup = &s->insert_copy_hgroup; break;
|
||||
case 2: hgroup = &s->distance_hgroup; break;
|
||||
default: return SaveErrorCode(s, BROTLI_FAILURE(
|
||||
default: return BROTLI_SAVE_ERROR_CODE(BROTLI_FAILURE(
|
||||
BROTLI_DECODER_ERROR_UNREACHABLE)); /* COV_NF_LINE */
|
||||
}
|
||||
result = HuffmanTreeGroupDecode(hgroup, s);
|
||||
|
@ -2710,10 +2763,11 @@ BrotliDecoderResult BrotliDecoderDecompressStream(
|
|||
break;
|
||||
}
|
||||
}
|
||||
return SaveErrorCode(s, result);
|
||||
return BROTLI_SAVE_ERROR_CODE(result);
|
||||
}
|
||||
}
|
||||
return SaveErrorCode(s, result);
|
||||
return BROTLI_SAVE_ERROR_CODE(result);
|
||||
#undef BROTLI_SAVE_ERROR_CODE
|
||||
}
|
||||
|
||||
BROTLI_BOOL BrotliDecoderHasMoreOutput(const BrotliDecoderState* s) {
|
||||
|
@ -2743,7 +2797,7 @@ const uint8_t* BrotliDecoderTakeOutput(BrotliDecoderState* s, size_t* size) {
|
|||
} else {
|
||||
/* ... or stream is broken. Normally this should be caught by
|
||||
BrotliDecoderDecompressStream, this is just a safeguard. */
|
||||
if ((int)status < 0) SaveErrorCode(s, status);
|
||||
if ((int)status < 0) SaveErrorCode(s, status, 0);
|
||||
*size = 0;
|
||||
result = 0;
|
||||
}
|
||||
|
@ -2776,10 +2830,19 @@ const char* BrotliDecoderErrorString(BrotliDecoderErrorCode c) {
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t BrotliDecoderVersion() {
|
||||
uint32_t BrotliDecoderVersion(void) {
|
||||
return BROTLI_VERSION;
|
||||
}
|
||||
|
||||
void BrotliDecoderSetMetadataCallbacks(
|
||||
BrotliDecoderState* state,
|
||||
brotli_decoder_metadata_start_func start_func,
|
||||
brotli_decoder_metadata_chunk_func chunk_func, void* opaque) {
|
||||
state->metadata_start_func = start_func;
|
||||
state->metadata_chunk_func = chunk_func;
|
||||
state->metadata_callback_opaque = opaque;
|
||||
}
|
||||
|
||||
/* Escalate internal functions visibility; for testing purposes only. */
|
||||
#if defined(BROTLI_TEST)
|
||||
BROTLI_BOOL SafeReadSymbolForTest(
|
||||
|
|
9
thirdparty/brotli/dec/huffman.c
vendored
9
thirdparty/brotli/dec/huffman.c
vendored
|
@ -10,9 +10,10 @@
|
|||
|
||||
#include <string.h> /* memcpy, memset */
|
||||
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "../common/constants.h"
|
||||
#include "../common/platform.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
extern "C" {
|
||||
|
@ -117,11 +118,13 @@ void BrotliBuildCodeLengthsHuffmanTable(HuffmanCode* table,
|
|||
int bits_count;
|
||||
BROTLI_DCHECK(BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH <=
|
||||
BROTLI_REVERSE_BITS_MAX);
|
||||
BROTLI_DCHECK(BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH == 5);
|
||||
|
||||
/* Generate offsets into sorted symbol table by code length. */
|
||||
symbol = -1;
|
||||
bits = 1;
|
||||
BROTLI_REPEAT(BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH, {
|
||||
/* BROTLI_HUFFMAN_MAX_CODE_LENGTH_CODE_LENGTH == 5 */
|
||||
BROTLI_REPEAT_5({
|
||||
symbol += count[bits];
|
||||
offset[bits] = symbol;
|
||||
bits++;
|
||||
|
@ -132,7 +135,7 @@ void BrotliBuildCodeLengthsHuffmanTable(HuffmanCode* table,
|
|||
/* Sort symbols by length, by symbol order within each length. */
|
||||
symbol = BROTLI_CODE_LENGTH_CODES;
|
||||
do {
|
||||
BROTLI_REPEAT(6, {
|
||||
BROTLI_REPEAT_6({
|
||||
symbol--;
|
||||
sorted[offset[code_lengths[symbol]]--] = symbol;
|
||||
});
|
||||
|
|
3
thirdparty/brotli/dec/huffman.h
vendored
3
thirdparty/brotli/dec/huffman.h
vendored
|
@ -9,9 +9,10 @@
|
|||
#ifndef BROTLI_DEC_HUFFMAN_H_
|
||||
#define BROTLI_DEC_HUFFMAN_H_
|
||||
|
||||
#include "../common/platform.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "../common/platform.h"
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
|
3
thirdparty/brotli/dec/prefix.h
vendored
3
thirdparty/brotli/dec/prefix.h
vendored
|
@ -10,9 +10,10 @@
|
|||
#ifndef BROTLI_DEC_PREFIX_H_
|
||||
#define BROTLI_DEC_PREFIX_H_
|
||||
|
||||
#include "../common/constants.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "../common/constants.h"
|
||||
|
||||
typedef struct CmdLutElement {
|
||||
uint8_t insert_len_extra_bits;
|
||||
uint8_t copy_len_extra_bits;
|
||||
|
|
20
thirdparty/brotli/dec/state.c
vendored
20
thirdparty/brotli/dec/state.c
vendored
|
@ -8,8 +8,9 @@
|
|||
|
||||
#include <stdlib.h> /* free, malloc */
|
||||
|
||||
#include "../common/dictionary.h"
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "../common/dictionary.h"
|
||||
#include "huffman.h"
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
|
@ -43,6 +44,7 @@ BROTLI_BOOL BrotliDecoderStateInit(BrotliDecoderState* s,
|
|||
s->pos = 0;
|
||||
s->rb_roundtrips = 0;
|
||||
s->partial_pos_out = 0;
|
||||
s->used_input = 0;
|
||||
|
||||
s->block_type_trees = NULL;
|
||||
s->block_len_trees = NULL;
|
||||
|
@ -87,6 +89,10 @@ BROTLI_BOOL BrotliDecoderStateInit(BrotliDecoderState* s,
|
|||
BrotliSharedDictionaryCreateInstance(alloc_func, free_func, opaque);
|
||||
if (!s->dictionary) return BROTLI_FALSE;
|
||||
|
||||
s->metadata_start_func = NULL;
|
||||
s->metadata_chunk_func = NULL;
|
||||
s->metadata_callback_opaque = 0;
|
||||
|
||||
return BROTLI_TRUE;
|
||||
}
|
||||
|
||||
|
@ -129,9 +135,21 @@ void BrotliDecoderStateCleanupAfterMetablock(BrotliDecoderState* s) {
|
|||
BROTLI_DECODER_FREE(s, s->distance_hgroup.htrees);
|
||||
}
|
||||
|
||||
#ifdef BROTLI_REPORTING
|
||||
/* When BROTLI_REPORTING is defined extra reporting module have to be linked. */
|
||||
void BrotliDecoderOnFinish(const BrotliDecoderState* s);
|
||||
#define BROTLI_DECODER_ON_FINISH(s) BrotliDecoderOnFinish(s);
|
||||
#else
|
||||
#if !defined(BROTLI_DECODER_ON_FINISH)
|
||||
#define BROTLI_DECODER_ON_FINISH(s) (void)(s);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void BrotliDecoderStateCleanup(BrotliDecoderState* s) {
|
||||
BrotliDecoderStateCleanupAfterMetablock(s);
|
||||
|
||||
BROTLI_DECODER_ON_FINISH(s);
|
||||
|
||||
BROTLI_DECODER_FREE(s, s->compound_dictionary);
|
||||
BrotliSharedDictionaryDestroyInstance(s->dictionary);
|
||||
s->dictionary = NULL;
|
||||
|
|
13
thirdparty/brotli/dec/state.h
vendored
13
thirdparty/brotli/dec/state.h
vendored
|
@ -9,12 +9,14 @@
|
|||
#ifndef BROTLI_DEC_STATE_H_
|
||||
#define BROTLI_DEC_STATE_H_
|
||||
|
||||
#include <brotli/decode.h>
|
||||
#include <brotli/shared_dictionary.h>
|
||||
#include <brotli/types.h>
|
||||
|
||||
#include "../common/constants.h"
|
||||
#include "../common/dictionary.h"
|
||||
#include "../common/platform.h"
|
||||
#include <brotli/shared_dictionary.h>
|
||||
#include "../common/transform.h"
|
||||
#include <brotli/types.h>
|
||||
#include "bit_reader.h"
|
||||
#include "huffman.h"
|
||||
|
||||
|
@ -321,6 +323,13 @@ struct BrotliDecoderStateStruct {
|
|||
|
||||
/* Less used attributes are at the end of this struct. */
|
||||
|
||||
brotli_decoder_metadata_start_func metadata_start_func;
|
||||
brotli_decoder_metadata_chunk_func metadata_chunk_func;
|
||||
void* metadata_callback_opaque;
|
||||
|
||||
/* For reporting. */
|
||||
uint64_t used_input; /* how many bytes of input are consumed */
|
||||
|
||||
/* States inside function calls. */
|
||||
BrotliRunningMetablockHeaderState substate_metablock_header;
|
||||
BrotliRunningUncompressedState substate_uncompressed;
|
||||
|
|
41
thirdparty/brotli/include/brotli/decode.h
vendored
41
thirdparty/brotli/include/brotli/decode.h
vendored
|
@ -361,6 +361,47 @@ BROTLI_DEC_API const char* BrotliDecoderErrorString(BrotliDecoderErrorCode c);
|
|||
*/
|
||||
BROTLI_DEC_API uint32_t BrotliDecoderVersion(void);
|
||||
|
||||
/**
|
||||
* Callback to fire on metadata block start.
|
||||
*
|
||||
* After this callback is fired, if @p size is not @c 0, it is followed by
|
||||
* ::brotli_decoder_metadata_chunk_func as more metadata block contents become
|
||||
* accessible.
|
||||
*
|
||||
* @param opaque callback handle
|
||||
* @param size size of metadata block
|
||||
*/
|
||||
typedef void (*brotli_decoder_metadata_start_func)(void* opaque, size_t size);
|
||||
|
||||
/**
|
||||
* Callback to fire on metadata block chunk becomes available.
|
||||
*
|
||||
* This function can be invoked multiple times per metadata block; block should
|
||||
* be considered finished when sum of @p size matches the announced metadata
|
||||
* block size. Chunks contents pointed by @p data are transient and shouln not
|
||||
* be accessed after leaving the callback.
|
||||
*
|
||||
* @param opaque callback handle
|
||||
* @param data pointer to metadata contents
|
||||
* @param size size of metadata block chunk, at least @c 1
|
||||
*/
|
||||
typedef void (*brotli_decoder_metadata_chunk_func)(void* opaque,
|
||||
const uint8_t* data,
|
||||
size_t size);
|
||||
|
||||
/**
|
||||
* Sets callback for receiving metadata blocks.
|
||||
*
|
||||
* @param state decoder instance
|
||||
* @param start_func callback on metadata block start
|
||||
* @param chunk_func callback on metadata block chunk
|
||||
* @param opaque callback handle
|
||||
*/
|
||||
BROTLI_DEC_API void BrotliDecoderSetMetadataCallbacks(
|
||||
BrotliDecoderState* state,
|
||||
brotli_decoder_metadata_start_func start_func,
|
||||
brotli_decoder_metadata_chunk_func chunk_func, void* opaque);
|
||||
|
||||
#if defined(__cplusplus) || defined(c_plusplus)
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
|
2
thirdparty/brotli/include/brotli/encode.h
vendored
2
thirdparty/brotli/include/brotli/encode.h
vendored
|
@ -453,7 +453,7 @@ BROTLI_ENC_API BROTLI_BOOL BrotliEncoderHasMoreOutput(
|
|||
*
|
||||
* This method is used to make language bindings easier and more efficient:
|
||||
* -# push data to ::BrotliEncoderCompressStream,
|
||||
* until ::BrotliEncoderHasMoreOutput returns BROTL_TRUE
|
||||
* until ::BrotliEncoderHasMoreOutput returns BROTLI_TRUE
|
||||
* -# use ::BrotliEncoderTakeOutput to peek bytes and copy to language-specific
|
||||
* entity
|
||||
*
|
||||
|
|
8
thirdparty/brotli/include/brotli/port.h
vendored
8
thirdparty/brotli/include/brotli/port.h
vendored
|
@ -224,14 +224,6 @@
|
|||
#define BROTLI_HAS_FEATURE(feature) (0)
|
||||
#endif
|
||||
|
||||
#if defined(ADDRESS_SANITIZER) || BROTLI_HAS_FEATURE(address_sanitizer) || \
|
||||
defined(THREAD_SANITIZER) || BROTLI_HAS_FEATURE(thread_sanitizer) || \
|
||||
defined(MEMORY_SANITIZER) || BROTLI_HAS_FEATURE(memory_sanitizer)
|
||||
#define BROTLI_SANITIZED 1
|
||||
#else
|
||||
#define BROTLI_SANITIZED 0
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||
#define BROTLI_PUBLIC
|
||||
#elif BROTLI_GNUC_VERSION_CHECK(3, 3, 0) || \
|
||||
|
|
4
thirdparty/libpng/LICENSE
vendored
4
thirdparty/libpng/LICENSE
vendored
|
@ -4,8 +4,8 @@ COPYRIGHT NOTICE, DISCLAIMER, and LICENSE
|
|||
PNG Reference Library License version 2
|
||||
---------------------------------------
|
||||
|
||||
* Copyright (c) 1995-2022 The PNG Reference Library Authors.
|
||||
* Copyright (c) 2018-2022 Cosmin Truta.
|
||||
* Copyright (c) 1995-2023 The PNG Reference Library Authors.
|
||||
* Copyright (c) 2018-2023 Cosmin Truta.
|
||||
* Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson.
|
||||
* Copyright (c) 1996-1997 Andreas Dilger.
|
||||
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
|
8
thirdparty/libpng/png.c
vendored
8
thirdparty/libpng/png.c
vendored
|
@ -1,7 +1,7 @@
|
|||
|
||||
/* png.c - location for general purpose libpng functions
|
||||
*
|
||||
* Copyright (c) 2018-2022 Cosmin Truta
|
||||
* Copyright (c) 2018-2023 Cosmin Truta
|
||||
* Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
|
||||
* Copyright (c) 1996-1997 Andreas Dilger
|
||||
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
@ -14,7 +14,7 @@
|
|||
#include "pngpriv.h"
|
||||
|
||||
/* Generate a compiler error if there is an old png.h in the search path. */
|
||||
typedef png_libpng_version_1_6_39 Your_png_h_is_not_version_1_6_39;
|
||||
typedef png_libpng_version_1_6_40 Your_png_h_is_not_version_1_6_40;
|
||||
|
||||
#ifdef __GNUC__
|
||||
/* The version tests may need to be added to, but the problem warning has
|
||||
|
@ -815,8 +815,8 @@ png_get_copyright(png_const_structrp png_ptr)
|
|||
return PNG_STRING_COPYRIGHT
|
||||
#else
|
||||
return PNG_STRING_NEWLINE \
|
||||
"libpng version 1.6.39" PNG_STRING_NEWLINE \
|
||||
"Copyright (c) 2018-2022 Cosmin Truta" PNG_STRING_NEWLINE \
|
||||
"libpng version 1.6.40" PNG_STRING_NEWLINE \
|
||||
"Copyright (c) 2018-2023 Cosmin Truta" PNG_STRING_NEWLINE \
|
||||
"Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson" \
|
||||
PNG_STRING_NEWLINE \
|
||||
"Copyright (c) 1996-1997 Andreas Dilger" PNG_STRING_NEWLINE \
|
||||
|
|
22
thirdparty/libpng/png.h
vendored
22
thirdparty/libpng/png.h
vendored
|
@ -1,9 +1,9 @@
|
|||
|
||||
/* png.h - header file for PNG reference library
|
||||
*
|
||||
* libpng version 1.6.39 - November 20, 2022
|
||||
* libpng version 1.6.40
|
||||
*
|
||||
* Copyright (c) 2018-2022 Cosmin Truta
|
||||
* Copyright (c) 2018-2023 Cosmin Truta
|
||||
* Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
|
||||
* Copyright (c) 1996-1997 Andreas Dilger
|
||||
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
@ -15,7 +15,7 @@
|
|||
* libpng versions 0.89, June 1996, through 0.96, May 1997: Andreas Dilger
|
||||
* libpng versions 0.97, January 1998, through 1.6.35, July 2018:
|
||||
* Glenn Randers-Pehrson
|
||||
* libpng versions 1.6.36, December 2018, through 1.6.39, November 2022:
|
||||
* libpng versions 1.6.36, December 2018, through 1.6.40, June 2023:
|
||||
* Cosmin Truta
|
||||
* See also "Contributing Authors", below.
|
||||
*/
|
||||
|
@ -27,8 +27,8 @@
|
|||
* PNG Reference Library License version 2
|
||||
* ---------------------------------------
|
||||
*
|
||||
* * Copyright (c) 1995-2022 The PNG Reference Library Authors.
|
||||
* * Copyright (c) 2018-2022 Cosmin Truta.
|
||||
* * Copyright (c) 1995-2023 The PNG Reference Library Authors.
|
||||
* * Copyright (c) 2018-2023 Cosmin Truta.
|
||||
* * Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson.
|
||||
* * Copyright (c) 1996-1997 Andreas Dilger.
|
||||
* * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
@ -239,7 +239,7 @@
|
|||
* ...
|
||||
* 1.5.30 15 10530 15.so.15.30[.0]
|
||||
* ...
|
||||
* 1.6.39 16 10639 16.so.16.39[.0]
|
||||
* 1.6.40 16 10640 16.so.16.40[.0]
|
||||
*
|
||||
* Henceforth the source version will match the shared-library major and
|
||||
* minor numbers; the shared-library major version number will be used for
|
||||
|
@ -278,8 +278,8 @@
|
|||
*/
|
||||
|
||||
/* Version information for png.h - this should match the version in png.c */
|
||||
#define PNG_LIBPNG_VER_STRING "1.6.39"
|
||||
#define PNG_HEADER_VERSION_STRING " libpng version 1.6.39 - November 20, 2022\n"
|
||||
#define PNG_LIBPNG_VER_STRING "1.6.40"
|
||||
#define PNG_HEADER_VERSION_STRING " libpng version 1.6.40 - June 21, 2023\n"
|
||||
|
||||
#define PNG_LIBPNG_VER_SONUM 16
|
||||
#define PNG_LIBPNG_VER_DLLNUM 16
|
||||
|
@ -287,7 +287,7 @@
|
|||
/* These should match the first 3 components of PNG_LIBPNG_VER_STRING: */
|
||||
#define PNG_LIBPNG_VER_MAJOR 1
|
||||
#define PNG_LIBPNG_VER_MINOR 6
|
||||
#define PNG_LIBPNG_VER_RELEASE 39
|
||||
#define PNG_LIBPNG_VER_RELEASE 40
|
||||
|
||||
/* This should be zero for a public release, or non-zero for a
|
||||
* development version. [Deprecated]
|
||||
|
@ -318,7 +318,7 @@
|
|||
* From version 1.0.1 it is:
|
||||
* XXYYZZ, where XX=major, YY=minor, ZZ=release
|
||||
*/
|
||||
#define PNG_LIBPNG_VER 10639 /* 1.6.39 */
|
||||
#define PNG_LIBPNG_VER 10640 /* 1.6.40 */
|
||||
|
||||
/* Library configuration: these options cannot be changed after
|
||||
* the library has been built.
|
||||
|
@ -428,7 +428,7 @@ extern "C" {
|
|||
/* This triggers a compiler error in png.c, if png.c and png.h
|
||||
* do not agree upon the version number.
|
||||
*/
|
||||
typedef char* png_libpng_version_1_6_39;
|
||||
typedef char* png_libpng_version_1_6_40;
|
||||
|
||||
/* Basic control structions. Read libpng-manual.txt or libpng.3 for more info.
|
||||
*
|
||||
|
|
2
thirdparty/libpng/pngconf.h
vendored
2
thirdparty/libpng/pngconf.h
vendored
|
@ -1,7 +1,7 @@
|
|||
|
||||
/* pngconf.h - machine-configurable file for libpng
|
||||
*
|
||||
* libpng version 1.6.39
|
||||
* libpng version 1.6.40
|
||||
*
|
||||
* Copyright (c) 2018-2022 Cosmin Truta
|
||||
* Copyright (c) 1998-2002,2004,2006-2016,2018 Glenn Randers-Pehrson
|
||||
|
|
13
thirdparty/libpng/pngget.c
vendored
13
thirdparty/libpng/pngget.c
vendored
|
@ -1,7 +1,7 @@
|
|||
|
||||
/* pngget.c - retrieval of values from info struct
|
||||
*
|
||||
* Copyright (c) 2018 Cosmin Truta
|
||||
* Copyright (c) 2018-2023 Cosmin Truta
|
||||
* Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
|
||||
* Copyright (c) 1996-1997 Andreas Dilger
|
||||
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
@ -21,7 +21,18 @@ png_get_valid(png_const_structrp png_ptr, png_const_inforp info_ptr,
|
|||
png_uint_32 flag)
|
||||
{
|
||||
if (png_ptr != NULL && info_ptr != NULL)
|
||||
{
|
||||
#ifdef PNG_READ_tRNS_SUPPORTED
|
||||
/* png_handle_PLTE() may have canceled a valid tRNS chunk but left the
|
||||
* 'valid' flag for the detection of duplicate chunks. Do not report a
|
||||
* valid tRNS chunk in this case.
|
||||
*/
|
||||
if (flag == PNG_INFO_tRNS && png_ptr->num_trans == 0)
|
||||
return(0);
|
||||
#endif
|
||||
|
||||
return(info_ptr->valid & flag);
|
||||
}
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
|
4
thirdparty/libpng/pnglibconf.h
vendored
4
thirdparty/libpng/pnglibconf.h
vendored
|
@ -1,8 +1,8 @@
|
|||
/* pnglibconf.h - library build configuration */
|
||||
|
||||
/* libpng version 1.6.39 */
|
||||
/* libpng version 1.6.40 */
|
||||
|
||||
/* Copyright (c) 2018-2022 Cosmin Truta */
|
||||
/* Copyright (c) 2018-2023 Cosmin Truta */
|
||||
/* Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson */
|
||||
|
||||
/* This code is released under the libpng license. */
|
||||
|
|
6
thirdparty/libpng/pngpriv.h
vendored
6
thirdparty/libpng/pngpriv.h
vendored
|
@ -1,7 +1,7 @@
|
|||
|
||||
/* pngpriv.h - private declarations for use inside libpng
|
||||
*
|
||||
* Copyright (c) 2018-2022 Cosmin Truta
|
||||
* Copyright (c) 2018-2023 Cosmin Truta
|
||||
* Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
|
||||
* Copyright (c) 1996-1997 Andreas Dilger
|
||||
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
@ -626,7 +626,7 @@
|
|||
#define PNG_BACKGROUND_IS_GRAY 0x800U
|
||||
#define PNG_HAVE_PNG_SIGNATURE 0x1000U
|
||||
#define PNG_HAVE_CHUNK_AFTER_IDAT 0x2000U /* Have another chunk after IDAT */
|
||||
/* 0x4000U (unused) */
|
||||
#define PNG_WROTE_eXIf 0x4000U
|
||||
#define PNG_IS_READ_STRUCT 0x8000U /* Else is a write struct */
|
||||
|
||||
/* Flags for the transformations the PNG library does on the image data */
|
||||
|
@ -1910,7 +1910,7 @@ PNG_INTERNAL_FUNCTION(void,png_ascii_from_fixed,(png_const_structrp png_ptr,
|
|||
*/
|
||||
#define PNG_FP_INVALID 512 /* Available for callers as a distinct value */
|
||||
|
||||
/* Result codes for the parser (boolean - true meants ok, false means
|
||||
/* Result codes for the parser (boolean - true means ok, false means
|
||||
* not ok yet.)
|
||||
*/
|
||||
#define PNG_FP_MAYBE 0 /* The number may be valid in the future */
|
||||
|
|
62
thirdparty/libpng/pngset.c
vendored
62
thirdparty/libpng/pngset.c
vendored
|
@ -1,7 +1,7 @@
|
|||
|
||||
/* pngset.c - storage of image information into info struct
|
||||
*
|
||||
* Copyright (c) 2018-2022 Cosmin Truta
|
||||
* Copyright (c) 2018-2023 Cosmin Truta
|
||||
* Copyright (c) 1998-2018 Glenn Randers-Pehrson
|
||||
* Copyright (c) 1996-1997 Andreas Dilger
|
||||
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
@ -137,46 +137,40 @@ png_set_cHRM_XYZ(png_const_structrp png_ptr, png_inforp info_ptr, double red_X,
|
|||
#ifdef PNG_eXIf_SUPPORTED
|
||||
void PNGAPI
|
||||
png_set_eXIf(png_const_structrp png_ptr, png_inforp info_ptr,
|
||||
png_bytep eXIf_buf)
|
||||
png_bytep exif)
|
||||
{
|
||||
png_warning(png_ptr, "png_set_eXIf does not work; use png_set_eXIf_1");
|
||||
PNG_UNUSED(info_ptr)
|
||||
PNG_UNUSED(eXIf_buf)
|
||||
PNG_UNUSED(exif)
|
||||
}
|
||||
|
||||
void PNGAPI
|
||||
png_set_eXIf_1(png_const_structrp png_ptr, png_inforp info_ptr,
|
||||
png_uint_32 num_exif, png_bytep eXIf_buf)
|
||||
png_uint_32 num_exif, png_bytep exif)
|
||||
{
|
||||
int i;
|
||||
png_bytep new_exif;
|
||||
|
||||
png_debug1(1, "in %s storage function", "eXIf");
|
||||
|
||||
if (png_ptr == NULL || info_ptr == NULL)
|
||||
if (png_ptr == NULL || info_ptr == NULL ||
|
||||
(png_ptr->mode & PNG_WROTE_eXIf) != 0)
|
||||
return;
|
||||
|
||||
if (info_ptr->exif)
|
||||
{
|
||||
png_free(png_ptr, info_ptr->exif);
|
||||
info_ptr->exif = NULL;
|
||||
}
|
||||
new_exif = png_voidcast(png_bytep, png_malloc_warn(png_ptr, num_exif));
|
||||
|
||||
info_ptr->num_exif = num_exif;
|
||||
|
||||
info_ptr->exif = png_voidcast(png_bytep, png_malloc_warn(png_ptr,
|
||||
info_ptr->num_exif));
|
||||
|
||||
if (info_ptr->exif == NULL)
|
||||
if (new_exif == NULL)
|
||||
{
|
||||
png_warning(png_ptr, "Insufficient memory for eXIf chunk data");
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(new_exif, exif, (size_t)num_exif);
|
||||
|
||||
png_free_data(png_ptr, info_ptr, PNG_FREE_EXIF, 0);
|
||||
|
||||
info_ptr->num_exif = num_exif;
|
||||
info_ptr->exif = new_exif;
|
||||
info_ptr->free_me |= PNG_FREE_EXIF;
|
||||
|
||||
for (i = 0; i < (int) info_ptr->num_exif; i++)
|
||||
info_ptr->exif[i] = eXIf_buf[i];
|
||||
|
||||
info_ptr->valid |= PNG_INFO_eXIf;
|
||||
}
|
||||
#endif /* eXIf */
|
||||
|
@ -237,15 +231,13 @@ png_set_hIST(png_const_structrp png_ptr, png_inforp info_ptr,
|
|||
if (info_ptr->hist == NULL)
|
||||
{
|
||||
png_warning(png_ptr, "Insufficient memory for hIST chunk data");
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
info_ptr->free_me |= PNG_FREE_HIST;
|
||||
|
||||
for (i = 0; i < info_ptr->num_palette; i++)
|
||||
info_ptr->hist[i] = hist[i];
|
||||
|
||||
info_ptr->free_me |= PNG_FREE_HIST;
|
||||
info_ptr->valid |= PNG_INFO_hIST;
|
||||
}
|
||||
#endif
|
||||
|
@ -367,6 +359,8 @@ png_set_pCAL(png_const_structrp png_ptr, png_inforp info_ptr,
|
|||
|
||||
memcpy(info_ptr->pcal_purpose, purpose, length);
|
||||
|
||||
info_ptr->free_me |= PNG_FREE_PCAL;
|
||||
|
||||
png_debug(3, "storing X0, X1, type, and nparams in info");
|
||||
info_ptr->pcal_X0 = X0;
|
||||
info_ptr->pcal_X1 = X1;
|
||||
|
@ -383,7 +377,6 @@ png_set_pCAL(png_const_structrp png_ptr, png_inforp info_ptr,
|
|||
if (info_ptr->pcal_units == NULL)
|
||||
{
|
||||
png_warning(png_ptr, "Insufficient memory for pCAL units");
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -395,7 +388,6 @@ png_set_pCAL(png_const_structrp png_ptr, png_inforp info_ptr,
|
|||
if (info_ptr->pcal_params == NULL)
|
||||
{
|
||||
png_warning(png_ptr, "Insufficient memory for pCAL params");
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -413,7 +405,6 @@ png_set_pCAL(png_const_structrp png_ptr, png_inforp info_ptr,
|
|||
if (info_ptr->pcal_params[i] == NULL)
|
||||
{
|
||||
png_warning(png_ptr, "Insufficient memory for pCAL parameter");
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -421,7 +412,6 @@ png_set_pCAL(png_const_structrp png_ptr, png_inforp info_ptr,
|
|||
}
|
||||
|
||||
info_ptr->valid |= PNG_INFO_pCAL;
|
||||
info_ptr->free_me |= PNG_FREE_PCAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -478,18 +468,17 @@ png_set_sCAL_s(png_const_structrp png_ptr, png_inforp info_ptr,
|
|||
|
||||
if (info_ptr->scal_s_height == NULL)
|
||||
{
|
||||
png_free (png_ptr, info_ptr->scal_s_width);
|
||||
png_free(png_ptr, info_ptr->scal_s_width);
|
||||
info_ptr->scal_s_width = NULL;
|
||||
|
||||
png_warning(png_ptr, "Memory allocation failed while processing sCAL");
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(info_ptr->scal_s_height, sheight, lengthh);
|
||||
|
||||
info_ptr->valid |= PNG_INFO_sCAL;
|
||||
info_ptr->free_me |= PNG_FREE_SCAL;
|
||||
info_ptr->valid |= PNG_INFO_sCAL;
|
||||
}
|
||||
|
||||
# ifdef PNG_FLOATING_POINT_SUPPORTED
|
||||
|
@ -625,11 +614,10 @@ png_set_PLTE(png_structrp png_ptr, png_inforp info_ptr,
|
|||
if (num_palette > 0)
|
||||
memcpy(png_ptr->palette, palette, (unsigned int)num_palette *
|
||||
(sizeof (png_color)));
|
||||
|
||||
info_ptr->palette = png_ptr->palette;
|
||||
info_ptr->num_palette = png_ptr->num_palette = (png_uint_16)num_palette;
|
||||
|
||||
info_ptr->free_me |= PNG_FREE_PLTE;
|
||||
|
||||
info_ptr->valid |= PNG_INFO_PLTE;
|
||||
}
|
||||
|
||||
|
@ -1020,8 +1008,8 @@ png_set_tRNS(png_structrp png_ptr, png_inforp info_ptr,
|
|||
png_malloc(png_ptr, PNG_MAX_PALETTE_LENGTH));
|
||||
memcpy(info_ptr->trans_alpha, trans_alpha, (size_t)num_trans);
|
||||
|
||||
info_ptr->valid |= PNG_INFO_tRNS;
|
||||
info_ptr->free_me |= PNG_FREE_TRNS;
|
||||
info_ptr->valid |= PNG_INFO_tRNS;
|
||||
}
|
||||
png_ptr->trans_alpha = info_ptr->trans_alpha;
|
||||
}
|
||||
|
@ -1054,8 +1042,8 @@ png_set_tRNS(png_structrp png_ptr, png_inforp info_ptr,
|
|||
|
||||
if (num_trans != 0)
|
||||
{
|
||||
info_ptr->valid |= PNG_INFO_tRNS;
|
||||
info_ptr->free_me |= PNG_FREE_TRNS;
|
||||
info_ptr->valid |= PNG_INFO_tRNS;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -1089,11 +1077,11 @@ png_set_sPLT(png_const_structrp png_ptr,
|
|||
{
|
||||
/* Out of memory or too many chunks */
|
||||
png_chunk_report(png_ptr, "too many sPLT chunks", PNG_CHUNK_WRITE_ERROR);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
png_free(png_ptr, info_ptr->splt_palettes);
|
||||
|
||||
info_ptr->splt_palettes = np;
|
||||
info_ptr->free_me |= PNG_FREE_SPLT;
|
||||
|
||||
|
@ -1247,11 +1235,11 @@ png_set_unknown_chunks(png_const_structrp png_ptr,
|
|||
{
|
||||
png_chunk_report(png_ptr, "too many unknown chunks",
|
||||
PNG_CHUNK_WRITE_ERROR);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
png_free(png_ptr, info_ptr->unknown_chunks);
|
||||
|
||||
info_ptr->unknown_chunks = np; /* safe because it is initialized */
|
||||
info_ptr->free_me |= PNG_FREE_UNKN;
|
||||
|
||||
|
|
10
thirdparty/libpng/pngwrite.c
vendored
10
thirdparty/libpng/pngwrite.c
vendored
|
@ -1,7 +1,7 @@
|
|||
|
||||
/* pngwrite.c - general routines to write a PNG file
|
||||
*
|
||||
* Copyright (c) 2018-2022 Cosmin Truta
|
||||
* Copyright (c) 2018-2023 Cosmin Truta
|
||||
* Copyright (c) 1998-2002,2004,2006-2018 Glenn Randers-Pehrson
|
||||
* Copyright (c) 1996-1997 Andreas Dilger
|
||||
* Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
|
||||
|
@ -239,7 +239,10 @@ png_write_info(png_structrp png_ptr, png_const_inforp info_ptr)
|
|||
|
||||
#ifdef PNG_WRITE_eXIf_SUPPORTED
|
||||
if ((info_ptr->valid & PNG_INFO_eXIf) != 0)
|
||||
{
|
||||
png_write_eXIf(png_ptr, info_ptr->exif, info_ptr->num_exif);
|
||||
png_ptr->mode |= PNG_WROTE_eXIf;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef PNG_WRITE_hIST_SUPPORTED
|
||||
|
@ -439,8 +442,9 @@ png_write_end(png_structrp png_ptr, png_inforp info_ptr)
|
|||
#endif
|
||||
|
||||
#ifdef PNG_WRITE_eXIf_SUPPORTED
|
||||
if ((info_ptr->valid & PNG_INFO_eXIf) != 0)
|
||||
png_write_eXIf(png_ptr, info_ptr->exif, info_ptr->num_exif);
|
||||
if ((info_ptr->valid & PNG_INFO_eXIf) != 0 &&
|
||||
(png_ptr->mode & PNG_WROTE_eXIf) == 0)
|
||||
png_write_eXIf(png_ptr, info_ptr->exif, info_ptr->num_exif);
|
||||
#endif
|
||||
|
||||
#ifdef PNG_WRITE_UNKNOWN_CHUNKS_SUPPORTED
|
||||
|
|
2
thirdparty/miniupnpc/LICENSE
vendored
2
thirdparty/miniupnpc/LICENSE
vendored
|
@ -1,6 +1,6 @@
|
|||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2005-2022, Thomas BERNARD
|
||||
Copyright (c) 2005-2023, Thomas BERNARD
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
|
4
thirdparty/miniupnpc/include/miniupnpc.h
vendored
4
thirdparty/miniupnpc/include/miniupnpc.h
vendored
|
@ -1,4 +1,4 @@
|
|||
/* $Id: miniupnpc.h,v 1.61 2022/10/21 21:15:02 nanard Exp $ */
|
||||
/* $Id: miniupnpc.h,v 1.62 2023/06/11 23:25:46 nanard Exp $ */
|
||||
/* vim: tabstop=4 shiftwidth=4 noexpandtab
|
||||
* Project: miniupnp
|
||||
* http://miniupnp.free.fr/ or https://miniupnp.tuxfamily.org/
|
||||
|
@ -20,7 +20,7 @@
|
|||
#define UPNPDISCOVER_MEMORY_ERROR (-102)
|
||||
|
||||
/* versions : */
|
||||
#define MINIUPNPC_VERSION "2.2.4"
|
||||
#define MINIUPNPC_VERSION "2.2.5"
|
||||
#define MINIUPNPC_API_VERSION 17
|
||||
|
||||
/* Source port:
|
||||
|
|
2
thirdparty/miniupnpc/src/miniupnpcstrings.h
vendored
2
thirdparty/miniupnpc/src/miniupnpcstrings.h
vendored
|
@ -4,7 +4,7 @@
|
|||
#include "core/version.h"
|
||||
|
||||
#define OS_STRING VERSION_NAME "/1.0"
|
||||
#define MINIUPNPC_VERSION_STRING "2.2.4"
|
||||
#define MINIUPNPC_VERSION_STRING "2.2.5"
|
||||
|
||||
#if 0
|
||||
/* according to "UPnP Device Architecture 1.0" */
|
||||
|
|
12
thirdparty/pcre2/src/config.h
vendored
12
thirdparty/pcre2/src/config.h
vendored
|
@ -236,7 +236,7 @@ sure both macros are undefined; an emulation function will then be used. */
|
|||
#define PACKAGE_NAME "PCRE2"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "PCRE2 10.40"
|
||||
#define PACKAGE_STRING "PCRE2 10.42"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "pcre2"
|
||||
|
@ -245,7 +245,7 @@ sure both macros are undefined; an emulation function will then be used. */
|
|||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "10.40"
|
||||
#define PACKAGE_VERSION "10.42"
|
||||
|
||||
/* The value of PARENS_NEST_LIMIT specifies the maximum depth of nested
|
||||
parentheses (of any kind) in a pattern. This limits the amount of system
|
||||
|
@ -438,7 +438,13 @@ sure both macros are undefined; an emulation function will then be used. */
|
|||
#endif
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "10.40"
|
||||
#define VERSION "10.42"
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
/* #undef _FILE_OFFSET_BITS */
|
||||
|
||||
/* Define for large files, on AIX-style hosts. */
|
||||
/* #undef _LARGE_FILES */
|
||||
|
||||
/* Define to empty if `const' does not conform to ANSI C. */
|
||||
/* #undef const */
|
||||
|
|
68
thirdparty/pcre2/src/pcre2.h
vendored
68
thirdparty/pcre2/src/pcre2.h
vendored
|
@ -42,9 +42,9 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
/* The current PCRE version information. */
|
||||
|
||||
#define PCRE2_MAJOR 10
|
||||
#define PCRE2_MINOR 40
|
||||
#define PCRE2_MINOR 42
|
||||
#define PCRE2_PRERELEASE
|
||||
#define PCRE2_DATE 2022-04-14
|
||||
#define PCRE2_DATE 2022-12-11
|
||||
|
||||
/* When an application links to a PCRE DLL in Windows, the symbols that are
|
||||
imported have to be identified as such. When building PCRE2, the appropriate
|
||||
|
@ -572,19 +572,19 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION pcre2_config(uint32_t, void *);
|
|||
/* Functions for manipulating contexts. */
|
||||
|
||||
#define PCRE2_GENERAL_CONTEXT_FUNCTIONS \
|
||||
PCRE2_EXP_DECL pcre2_general_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_general_context_copy(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_general_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_general_context_create(void *(*)(PCRE2_SIZE, void *), \
|
||||
PCRE2_EXP_DECL pcre2_general_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_general_context_copy(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_general_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_general_context_create(void *(*)(PCRE2_SIZE, void *), \
|
||||
void (*)(void *, void *), void *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_general_context_free(pcre2_general_context *);
|
||||
|
||||
#define PCRE2_COMPILE_CONTEXT_FUNCTIONS \
|
||||
PCRE2_EXP_DECL pcre2_compile_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_compile_context_copy(pcre2_compile_context *); \
|
||||
PCRE2_EXP_DECL pcre2_compile_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_compile_context_create(pcre2_general_context *);\
|
||||
PCRE2_EXP_DECL pcre2_compile_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_compile_context_copy(pcre2_compile_context *); \
|
||||
PCRE2_EXP_DECL pcre2_compile_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_compile_context_create(pcre2_general_context *);\
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_compile_context_free(pcre2_compile_context *); \
|
||||
PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
||||
|
@ -604,10 +604,10 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
|||
int (*)(uint32_t, void *), void *);
|
||||
|
||||
#define PCRE2_MATCH_CONTEXT_FUNCTIONS \
|
||||
PCRE2_EXP_DECL pcre2_match_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_match_context_copy(pcre2_match_context *); \
|
||||
PCRE2_EXP_DECL pcre2_match_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_match_context_create(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_match_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_match_context_copy(pcre2_match_context *); \
|
||||
PCRE2_EXP_DECL pcre2_match_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_match_context_create(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_match_context_free(pcre2_match_context *); \
|
||||
PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
||||
|
@ -631,10 +631,10 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
|||
void *(*)(PCRE2_SIZE, void *), void (*)(void *, void *), void *);
|
||||
|
||||
#define PCRE2_CONVERT_CONTEXT_FUNCTIONS \
|
||||
PCRE2_EXP_DECL pcre2_convert_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_convert_context_copy(pcre2_convert_context *); \
|
||||
PCRE2_EXP_DECL pcre2_convert_context PCRE2_CALL_CONVENTION \
|
||||
*pcre2_convert_context_create(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_convert_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_convert_context_copy(pcre2_convert_context *); \
|
||||
PCRE2_EXP_DECL pcre2_convert_context *PCRE2_CALL_CONVENTION \
|
||||
pcre2_convert_context_create(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_convert_context_free(pcre2_convert_context *); \
|
||||
PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
||||
|
@ -646,15 +646,15 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
|||
/* Functions concerned with compiling a pattern to PCRE internal code. */
|
||||
|
||||
#define PCRE2_COMPILE_FUNCTIONS \
|
||||
PCRE2_EXP_DECL pcre2_code PCRE2_CALL_CONVENTION \
|
||||
*pcre2_compile(PCRE2_SPTR, PCRE2_SIZE, uint32_t, int *, PCRE2_SIZE *, \
|
||||
PCRE2_EXP_DECL pcre2_code *PCRE2_CALL_CONVENTION \
|
||||
pcre2_compile(PCRE2_SPTR, PCRE2_SIZE, uint32_t, int *, PCRE2_SIZE *, \
|
||||
pcre2_compile_context *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_code_free(pcre2_code *); \
|
||||
PCRE2_EXP_DECL pcre2_code PCRE2_CALL_CONVENTION \
|
||||
*pcre2_code_copy(const pcre2_code *); \
|
||||
PCRE2_EXP_DECL pcre2_code PCRE2_CALL_CONVENTION \
|
||||
*pcre2_code_copy_with_tables(const pcre2_code *);
|
||||
PCRE2_EXP_DECL pcre2_code *PCRE2_CALL_CONVENTION \
|
||||
pcre2_code_copy(const pcre2_code *); \
|
||||
PCRE2_EXP_DECL pcre2_code *PCRE2_CALL_CONVENTION \
|
||||
pcre2_code_copy_with_tables(const pcre2_code *);
|
||||
|
||||
|
||||
/* Functions that give information about a compiled pattern. */
|
||||
|
@ -670,10 +670,10 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
|||
/* Functions for running a match and inspecting the result. */
|
||||
|
||||
#define PCRE2_MATCH_FUNCTIONS \
|
||||
PCRE2_EXP_DECL pcre2_match_data PCRE2_CALL_CONVENTION \
|
||||
*pcre2_match_data_create(uint32_t, pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_match_data PCRE2_CALL_CONVENTION \
|
||||
*pcre2_match_data_create_from_pattern(const pcre2_code *, \
|
||||
PCRE2_EXP_DECL pcre2_match_data *PCRE2_CALL_CONVENTION \
|
||||
pcre2_match_data_create(uint32_t, pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_match_data *PCRE2_CALL_CONVENTION \
|
||||
pcre2_match_data_create_from_pattern(const pcre2_code *, \
|
||||
pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
||||
pcre2_dfa_match(const pcre2_code *, PCRE2_SPTR, PCRE2_SIZE, PCRE2_SIZE, \
|
||||
|
@ -689,8 +689,8 @@ PCRE2_EXP_DECL PCRE2_SIZE PCRE2_CALL_CONVENTION \
|
|||
pcre2_get_match_data_size(pcre2_match_data *); \
|
||||
PCRE2_EXP_DECL uint32_t PCRE2_CALL_CONVENTION \
|
||||
pcre2_get_ovector_count(pcre2_match_data *); \
|
||||
PCRE2_EXP_DECL PCRE2_SIZE PCRE2_CALL_CONVENTION \
|
||||
*pcre2_get_ovector_pointer(pcre2_match_data *); \
|
||||
PCRE2_EXP_DECL PCRE2_SIZE *PCRE2_CALL_CONVENTION \
|
||||
pcre2_get_ovector_pointer(pcre2_match_data *); \
|
||||
PCRE2_EXP_DECL PCRE2_SIZE PCRE2_CALL_CONVENTION \
|
||||
pcre2_get_startchar(pcre2_match_data *);
|
||||
|
||||
|
@ -770,8 +770,8 @@ PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
|||
uint32_t, pcre2_match_data *, pcre2_match_context *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_jit_free_unused_memory(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_jit_stack PCRE2_CALL_CONVENTION \
|
||||
*pcre2_jit_stack_create(PCRE2_SIZE, PCRE2_SIZE, pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL pcre2_jit_stack *PCRE2_CALL_CONVENTION \
|
||||
pcre2_jit_stack_create(PCRE2_SIZE, PCRE2_SIZE, pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_jit_stack_assign(pcre2_match_context *, pcre2_jit_callback, void *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
|
@ -783,8 +783,8 @@ PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
|||
#define PCRE2_OTHER_FUNCTIONS \
|
||||
PCRE2_EXP_DECL int PCRE2_CALL_CONVENTION \
|
||||
pcre2_get_error_message(int, PCRE2_UCHAR *, PCRE2_SIZE); \
|
||||
PCRE2_EXP_DECL const uint8_t PCRE2_CALL_CONVENTION \
|
||||
*pcre2_maketables(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL const uint8_t *PCRE2_CALL_CONVENTION \
|
||||
pcre2_maketables(pcre2_general_context *); \
|
||||
PCRE2_EXP_DECL void PCRE2_CALL_CONVENTION \
|
||||
pcre2_maketables_free(pcre2_general_context *, const uint8_t *);
|
||||
|
||||
|
|
12
thirdparty/pcre2/src/pcre2_compile.c
vendored
12
thirdparty/pcre2/src/pcre2_compile.c
vendored
|
@ -1266,8 +1266,10 @@ PCRE2_SIZE* ref_count;
|
|||
|
||||
if (code != NULL)
|
||||
{
|
||||
#ifdef SUPPORT_JIT
|
||||
if (code->executable_jit != NULL)
|
||||
PRIV(jit_free)(code->executable_jit, &code->memctl);
|
||||
#endif
|
||||
|
||||
if ((code->flags & PCRE2_DEREF_TABLES) != 0)
|
||||
{
|
||||
|
@ -2687,7 +2689,7 @@ if ((options & PCRE2_EXTENDED_MORE) != 0) options |= PCRE2_EXTENDED;
|
|||
while (ptr < ptrend)
|
||||
{
|
||||
int prev_expect_cond_assert;
|
||||
uint32_t min_repeat, max_repeat;
|
||||
uint32_t min_repeat = 0, max_repeat = 0;
|
||||
uint32_t set, unset, *optset;
|
||||
uint32_t terminator;
|
||||
uint32_t prev_meta_quantifier;
|
||||
|
@ -8552,7 +8554,7 @@ do {
|
|||
op == OP_SCBRA || op == OP_SCBRAPOS)
|
||||
{
|
||||
int n = GET2(scode, 1+LINK_SIZE);
|
||||
int new_map = bracket_map | ((n < 32)? (1u << n) : 1);
|
||||
unsigned int new_map = bracket_map | ((n < 32)? (1u << n) : 1);
|
||||
if (!is_startline(scode, new_map, cb, atomcount, inassert)) return FALSE;
|
||||
}
|
||||
|
||||
|
@ -10620,4 +10622,10 @@ re = NULL;
|
|||
goto EXIT;
|
||||
}
|
||||
|
||||
/* These #undefs are here to enable unity builds with CMake. */
|
||||
|
||||
#undef NLBLOCK /* Block containing newline information */
|
||||
#undef PSSTART /* Field containing processed string start */
|
||||
#undef PSEND /* Field containing processed string end */
|
||||
|
||||
/* End of pcre2_compile.c */
|
||||
|
|
12
thirdparty/pcre2/src/pcre2_context.c
vendored
12
thirdparty/pcre2/src/pcre2_context.c
vendored
|
@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
|
|||
|
||||
Written by Philip Hazel
|
||||
Original API code Copyright (c) 1997-2012 University of Cambridge
|
||||
New API code Copyright (c) 2016-2018 University of Cambridge
|
||||
New API code Copyright (c) 2016-2022 University of Cambridge
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -443,8 +443,11 @@ mcontext->offset_limit = limit;
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* This function became obsolete at release 10.30. It is kept as a synonym for
|
||||
backwards compatibility. */
|
||||
/* These functions became obsolete at release 10.30. The first is kept as a
|
||||
synonym for backwards compatibility. The second now does nothing. Exclude both
|
||||
from coverage reports. */
|
||||
|
||||
/* LCOV_EXCL_START */
|
||||
|
||||
PCRE2_EXP_DEFN int PCRE2_CALL_CONVENTION
|
||||
pcre2_set_recursion_limit(pcre2_match_context *mcontext, uint32_t limit)
|
||||
|
@ -464,6 +467,9 @@ pcre2_set_recursion_memory_management(pcre2_match_context *mcontext,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* LCOV_EXCL_STOP */
|
||||
|
||||
|
||||
/* ------------ Convert context ------------ */
|
||||
|
||||
PCRE2_EXP_DEFN int PCRE2_CALL_CONVENTION
|
||||
|
|
5
thirdparty/pcre2/src/pcre2_convert.c
vendored
5
thirdparty/pcre2/src/pcre2_convert.c
vendored
|
@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
|
|||
|
||||
Written by Philip Hazel
|
||||
Original API code Copyright (c) 1997-2012 University of Cambridge
|
||||
New API code Copyright (c) 2016-2018 University of Cambridge
|
||||
New API code Copyright (c) 2016-2022 University of Cambridge
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -65,9 +65,8 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#define STR_QUERY_s STR_LEFT_PARENTHESIS STR_QUESTION_MARK STR_s STR_RIGHT_PARENTHESIS
|
||||
#define STR_STAR_NUL STR_LEFT_PARENTHESIS STR_ASTERISK STR_N STR_U STR_L STR_RIGHT_PARENTHESIS
|
||||
|
||||
/* States for range and POSIX processing */
|
||||
/* States for POSIX processing */
|
||||
|
||||
enum { RANGE_NOT_STARTED, RANGE_STARTING, RANGE_STARTED };
|
||||
enum { POSIX_START_REGEX, POSIX_ANCHORED, POSIX_NOT_BRACKET,
|
||||
POSIX_CLASS_NOT_STARTED, POSIX_CLASS_STARTING, POSIX_CLASS_STARTED };
|
||||
|
||||
|
|
12
thirdparty/pcre2/src/pcre2_dfa_match.c
vendored
12
thirdparty/pcre2/src/pcre2_dfa_match.c
vendored
|
@ -350,7 +350,7 @@ Returns: the return from the callout
|
|||
*/
|
||||
|
||||
static int
|
||||
do_callout(PCRE2_SPTR code, PCRE2_SIZE *offsets, PCRE2_SPTR current_subject,
|
||||
do_callout_dfa(PCRE2_SPTR code, PCRE2_SIZE *offsets, PCRE2_SPTR current_subject,
|
||||
PCRE2_SPTR ptr, dfa_match_block *mb, PCRE2_SIZE extracode,
|
||||
PCRE2_SIZE *lengthptr)
|
||||
{
|
||||
|
@ -2799,7 +2799,7 @@ for (;;)
|
|||
|| code[LINK_SIZE + 1] == OP_CALLOUT_STR)
|
||||
{
|
||||
PCRE2_SIZE callout_length;
|
||||
rrc = do_callout(code, offsets, current_subject, ptr, mb,
|
||||
rrc = do_callout_dfa(code, offsets, current_subject, ptr, mb,
|
||||
1 + LINK_SIZE, &callout_length);
|
||||
if (rrc < 0) return rrc; /* Abandon */
|
||||
if (rrc > 0) break; /* Fail this thread */
|
||||
|
@ -3196,7 +3196,7 @@ for (;;)
|
|||
case OP_CALLOUT_STR:
|
||||
{
|
||||
PCRE2_SIZE callout_length;
|
||||
rrc = do_callout(code, offsets, current_subject, ptr, mb, 0,
|
||||
rrc = do_callout_dfa(code, offsets, current_subject, ptr, mb, 0,
|
||||
&callout_length);
|
||||
if (rrc < 0) return rrc; /* Abandon */
|
||||
if (rrc == 0)
|
||||
|
@ -4057,4 +4057,10 @@ while (rws->next != NULL)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* These #undefs are here to enable unity builds with CMake. */
|
||||
|
||||
#undef NLBLOCK /* Block containing newline information */
|
||||
#undef PSSTART /* Field containing processed string start */
|
||||
#undef PSEND /* Field containing processed string end */
|
||||
|
||||
/* End of pcre2_dfa_match.c */
|
||||
|
|
17
thirdparty/pcre2/src/pcre2_internal.h
vendored
17
thirdparty/pcre2/src/pcre2_internal.h
vendored
|
@ -220,18 +220,17 @@ not rely on this. */
|
|||
|
||||
#define COMPILE_ERROR_BASE 100
|
||||
|
||||
/* The initial frames vector for remembering backtracking points in
|
||||
pcre2_match() is allocated on the system stack, of this size (bytes). The size
|
||||
must be a multiple of sizeof(PCRE2_SPTR) in all environments, so making it a
|
||||
multiple of 8 is best. Typical frame sizes are a few hundred bytes (it depends
|
||||
on the number of capturing parentheses) so 20KiB handles quite a few frames. A
|
||||
larger vector on the heap is obtained for patterns that need more frames. The
|
||||
maximum size of this can be limited. */
|
||||
/* The initial frames vector for remembering pcre2_match() backtracking points
|
||||
is allocated on the heap, of this size (bytes) or ten times the frame size if
|
||||
larger, unless the heap limit is smaller. Typical frame sizes are a few hundred
|
||||
bytes (it depends on the number of capturing parentheses) so 20KiB handles
|
||||
quite a few frames. A larger vector on the heap is obtained for matches that
|
||||
need more frames, subject to the heap limit. */
|
||||
|
||||
#define START_FRAMES_SIZE 20480
|
||||
|
||||
/* Similarly, for DFA matching, an initial internal workspace vector is
|
||||
allocated on the stack. */
|
||||
/* For DFA matching, an initial internal workspace vector is allocated on the
|
||||
stack. The heap is used only if this turns out to be too small. */
|
||||
|
||||
#define DFA_START_RWS_SIZE 30720
|
||||
|
||||
|
|
34
thirdparty/pcre2/src/pcre2_intmodedep.h
vendored
34
thirdparty/pcre2/src/pcre2_intmodedep.h
vendored
|
@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
|
|||
|
||||
Written by Philip Hazel
|
||||
Original API code Copyright (c) 1997-2012 University of Cambridge
|
||||
New API code Copyright (c) 2016-2018 University of Cambridge
|
||||
New API code Copyright (c) 2016-2022 University of Cambridge
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -649,19 +649,23 @@ the size varies from call to call. As the maximum number of capturing
|
|||
subpatterns is 65535 we must allow for 65536 strings to include the overall
|
||||
match. (See also the heapframe structure below.) */
|
||||
|
||||
struct heapframe; /* Forward reference */
|
||||
|
||||
typedef struct pcre2_real_match_data {
|
||||
pcre2_memctl memctl;
|
||||
const pcre2_real_code *code; /* The pattern used for the match */
|
||||
PCRE2_SPTR subject; /* The subject that was matched */
|
||||
PCRE2_SPTR mark; /* Pointer to last mark */
|
||||
PCRE2_SIZE leftchar; /* Offset to leftmost code unit */
|
||||
PCRE2_SIZE rightchar; /* Offset to rightmost code unit */
|
||||
PCRE2_SIZE startchar; /* Offset to starting code unit */
|
||||
uint8_t matchedby; /* Type of match (normal, JIT, DFA) */
|
||||
uint8_t flags; /* Various flags */
|
||||
uint16_t oveccount; /* Number of pairs */
|
||||
int rc; /* The return code from the match */
|
||||
PCRE2_SIZE ovector[131072]; /* Must be last in the structure */
|
||||
pcre2_memctl memctl; /* Memory control fields */
|
||||
const pcre2_real_code *code; /* The pattern used for the match */
|
||||
PCRE2_SPTR subject; /* The subject that was matched */
|
||||
PCRE2_SPTR mark; /* Pointer to last mark */
|
||||
struct heapframe *heapframes; /* Backtracking frames heap memory */
|
||||
PCRE2_SIZE heapframes_size; /* Malloc-ed size */
|
||||
PCRE2_SIZE leftchar; /* Offset to leftmost code unit */
|
||||
PCRE2_SIZE rightchar; /* Offset to rightmost code unit */
|
||||
PCRE2_SIZE startchar; /* Offset to starting code unit */
|
||||
uint8_t matchedby; /* Type of match (normal, JIT, DFA) */
|
||||
uint8_t flags; /* Various flags */
|
||||
uint16_t oveccount; /* Number of pairs */
|
||||
int rc; /* The return code from the match */
|
||||
PCRE2_SIZE ovector[131072]; /* Must be last in the structure */
|
||||
} pcre2_real_match_data;
|
||||
|
||||
|
||||
|
@ -854,10 +858,6 @@ doing traditional NFA matching (pcre2_match() and friends). */
|
|||
|
||||
typedef struct match_block {
|
||||
pcre2_memctl memctl; /* For general use */
|
||||
PCRE2_SIZE frame_vector_size; /* Size of a backtracking frame */
|
||||
heapframe *match_frames; /* Points to vector of frames */
|
||||
heapframe *match_frames_top; /* Points after the end of the vector */
|
||||
heapframe *stack_frames; /* The original vector on the stack */
|
||||
PCRE2_SIZE heap_limit; /* As it says */
|
||||
uint32_t match_limit; /* As it says */
|
||||
uint32_t match_limit_depth; /* As it says */
|
||||
|
|
156
thirdparty/pcre2/src/pcre2_jit_compile.c
vendored
156
thirdparty/pcre2/src/pcre2_jit_compile.c
vendored
|
@ -542,7 +542,7 @@ typedef struct compare_context {
|
|||
#undef CMP
|
||||
|
||||
/* Used for accessing the elements of the stack. */
|
||||
#define STACK(i) ((i) * (int)sizeof(sljit_sw))
|
||||
#define STACK(i) ((i) * SSIZE_OF(sw))
|
||||
|
||||
#ifdef SLJIT_PREF_SHIFT_REG
|
||||
#if SLJIT_PREF_SHIFT_REG == SLJIT_R2
|
||||
|
@ -590,8 +590,8 @@ to characters. The vector data is divided into two groups: the first
|
|||
group contains the start / end character pointers, and the second is
|
||||
the start pointers when the end of the capturing group has not yet reached. */
|
||||
#define OVECTOR_START (common->ovector_start)
|
||||
#define OVECTOR(i) (OVECTOR_START + (i) * (sljit_sw)sizeof(sljit_sw))
|
||||
#define OVECTOR_PRIV(i) (common->cbra_ptr + (i) * (sljit_sw)sizeof(sljit_sw))
|
||||
#define OVECTOR(i) (OVECTOR_START + (i) * SSIZE_OF(sw))
|
||||
#define OVECTOR_PRIV(i) (common->cbra_ptr + (i) * SSIZE_OF(sw))
|
||||
#define PRIVATE_DATA(cc) (common->private_data_ptrs[(cc) - common->start])
|
||||
|
||||
#if PCRE2_CODE_UNIT_WIDTH == 8
|
||||
|
@ -2151,9 +2151,9 @@ while (cc < ccend)
|
|||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0));
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
setsom_found = TRUE;
|
||||
}
|
||||
cc += 1;
|
||||
|
@ -2168,9 +2168,9 @@ while (cc < ccend)
|
|||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
setmark_found = TRUE;
|
||||
}
|
||||
cc += 1 + 2 + cc[1];
|
||||
|
@ -2181,27 +2181,27 @@ while (cc < ccend)
|
|||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -OVECTOR(0));
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
setsom_found = TRUE;
|
||||
}
|
||||
if (common->mark_ptr != 0 && !setmark_found)
|
||||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->mark_ptr);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
setmark_found = TRUE;
|
||||
}
|
||||
if (common->capture_last_ptr != 0 && !capture_last_found)
|
||||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
capture_last_found = TRUE;
|
||||
}
|
||||
cc += 1 + LINK_SIZE;
|
||||
|
@ -2215,20 +2215,20 @@ while (cc < ccend)
|
|||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, -common->capture_last_ptr);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
capture_last_found = TRUE;
|
||||
}
|
||||
offset = (GET2(cc, 1 + LINK_SIZE)) << 1;
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, SLJIT_IMM, OVECTOR(offset));
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset));
|
||||
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(offset + 1));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP2, 0);
|
||||
stackpos -= (int)sizeof(sljit_sw);
|
||||
stackpos -= SSIZE_OF(sw);
|
||||
|
||||
cc += 1 + LINK_SIZE + IMM2_SIZE;
|
||||
break;
|
||||
|
@ -3144,7 +3144,7 @@ static SLJIT_INLINE void allocate_stack(compiler_common *common, int size)
|
|||
DEFINE_COMPILER;
|
||||
|
||||
SLJIT_ASSERT(size > 0);
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * SSIZE_OF(sw));
|
||||
#ifdef DESTROY_REGISTERS
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 12345);
|
||||
OP1(SLJIT_MOV, TMP3, 0, TMP1, 0);
|
||||
|
@ -3160,7 +3160,7 @@ static SLJIT_INLINE void free_stack(compiler_common *common, int size)
|
|||
DEFINE_COMPILER;
|
||||
|
||||
SLJIT_ASSERT(size > 0);
|
||||
OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw));
|
||||
OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * SSIZE_OF(sw));
|
||||
}
|
||||
|
||||
static sljit_uw * allocate_read_only_data(compiler_common *common, sljit_uw size)
|
||||
|
@ -3200,12 +3200,12 @@ if (length < 8)
|
|||
}
|
||||
else
|
||||
{
|
||||
if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS)
|
||||
if (sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw)) == SLJIT_SUCCESS)
|
||||
{
|
||||
GET_LOCAL_BASE(SLJIT_R1, 0, OVECTOR_START);
|
||||
OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_IMM, length - 1);
|
||||
loop = LABEL();
|
||||
sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw));
|
||||
sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, SLJIT_R0, SLJIT_MEM1(SLJIT_R1), sizeof(sljit_sw));
|
||||
OP2(SLJIT_SUB | SLJIT_SET_Z, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, 1);
|
||||
JUMPTO(SLJIT_NOT_ZERO, loop);
|
||||
}
|
||||
|
@ -3261,8 +3261,8 @@ OP2(SLJIT_ADD, TMP2, 0, TMP1, 0, SLJIT_IMM, size - uncleared_size);
|
|||
loop = LABEL();
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), 0, src, 0);
|
||||
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 3 * sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -2 * (sljit_sw)sizeof(sljit_sw), src, 0);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -1 * (sljit_sw)sizeof(sljit_sw), src, 0);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -2 * SSIZE_OF(sw), src, 0);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP1), -1 * SSIZE_OF(sw), src, 0);
|
||||
CMPTO(SLJIT_LESS, TMP1, 0, TMP2, 0, loop);
|
||||
|
||||
if (uncleared_size >= sizeof(sljit_sw))
|
||||
|
@ -3289,12 +3289,12 @@ if (length < 8)
|
|||
}
|
||||
else
|
||||
{
|
||||
if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS)
|
||||
if (sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw)) == SLJIT_SUCCESS)
|
||||
{
|
||||
GET_LOCAL_BASE(TMP2, 0, OVECTOR_START + sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_IMM, length - 2);
|
||||
loop = LABEL();
|
||||
sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
|
||||
sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_STORE | SLJIT_MEM_PRE, TMP1, SLJIT_MEM1(TMP2), sizeof(sljit_sw));
|
||||
OP2(SLJIT_SUB | SLJIT_SET_Z, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 1);
|
||||
JUMPTO(SLJIT_NOT_ZERO, loop);
|
||||
}
|
||||
|
@ -3386,7 +3386,7 @@ else
|
|||
OP2(SLJIT_ADD, SLJIT_R2, 0, SLJIT_R2, 0, SLJIT_IMM, SLJIT_OFFSETOF(pcre2_match_data, ovector) - sizeof(PCRE2_SIZE));
|
||||
}
|
||||
|
||||
has_pre = sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS;
|
||||
has_pre = sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw)) == SLJIT_SUCCESS;
|
||||
|
||||
GET_LOCAL_BASE(SLJIT_S0, 0, OVECTOR_START - (has_pre ? sizeof(sljit_sw) : 0));
|
||||
OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(HAS_VIRTUAL_REGISTERS ? SLJIT_R0 : ARGUMENTS), SLJIT_OFFSETOF(jit_arguments, begin));
|
||||
|
@ -3394,7 +3394,7 @@ OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(HAS_VIRTUAL_REGISTERS ? SLJIT_R0 : ARGUME
|
|||
loop = LABEL();
|
||||
|
||||
if (has_pre)
|
||||
sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw));
|
||||
sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_S1, SLJIT_MEM1(SLJIT_S0), sizeof(sljit_sw));
|
||||
else
|
||||
{
|
||||
OP1(SLJIT_MOV, SLJIT_S1, 0, SLJIT_MEM1(SLJIT_S0), 0);
|
||||
|
@ -3417,14 +3417,14 @@ JUMPTO(SLJIT_NOT_ZERO, loop);
|
|||
/* Calculate the return value, which is the maximum ovector value. */
|
||||
if (topbracket > 1)
|
||||
{
|
||||
if (sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw))) == SLJIT_SUCCESS)
|
||||
if (sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * SSIZE_OF(sw))) == SLJIT_SUCCESS)
|
||||
{
|
||||
GET_LOCAL_BASE(SLJIT_R0, 0, OVECTOR_START + topbracket * 2 * sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, topbracket + 1);
|
||||
|
||||
/* OVECTOR(0) is never equal to SLJIT_S2. */
|
||||
loop = LABEL();
|
||||
sljit_emit_mem(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * (sljit_sw)sizeof(sljit_sw)));
|
||||
sljit_emit_mem_update(compiler, SLJIT_MOV | SLJIT_MEM_PRE, SLJIT_R2, SLJIT_MEM1(SLJIT_R0), -(2 * SSIZE_OF(sw)));
|
||||
OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
|
||||
CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
|
||||
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0);
|
||||
|
@ -3437,7 +3437,7 @@ if (topbracket > 1)
|
|||
/* OVECTOR(0) is never equal to SLJIT_S2. */
|
||||
loop = LABEL();
|
||||
OP1(SLJIT_MOV, SLJIT_R2, 0, SLJIT_MEM1(SLJIT_R0), 0);
|
||||
OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * (sljit_sw)sizeof(sljit_sw));
|
||||
OP2(SLJIT_SUB, SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 2 * SSIZE_OF(sw));
|
||||
OP2(SLJIT_SUB, SLJIT_R1, 0, SLJIT_R1, 0, SLJIT_IMM, 1);
|
||||
CMPTO(SLJIT_EQUAL, SLJIT_R2, 0, SLJIT_S2, 0, loop);
|
||||
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_R1, 0);
|
||||
|
@ -4652,8 +4652,8 @@ if (common->nltype != NLTYPE_ANY)
|
|||
/* All newlines are ascii, just skip intermediate octets. */
|
||||
jump[0] = CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0);
|
||||
loop = LABEL();
|
||||
if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, TMP2, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
else
|
||||
{
|
||||
OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0));
|
||||
|
@ -5886,7 +5886,7 @@ static BOOL check_fast_forward_char_pair_simd(compiler_common *common, fast_forw
|
|||
while (j < i)
|
||||
{
|
||||
b_pri = chars[j].last_count;
|
||||
if (b_pri > 2 && a_pri + b_pri >= max_pri)
|
||||
if (b_pri > 2 && (sljit_u32)a_pri + (sljit_u32)b_pri >= max_pri)
|
||||
{
|
||||
b1 = chars[j].chars[0];
|
||||
b2 = chars[j].chars[1];
|
||||
|
@ -6572,21 +6572,21 @@ GET_LOCAL_BASE(TMP1, 0, 0);
|
|||
|
||||
/* Drop frames until we reach STACK_TOP. */
|
||||
mainloop = LABEL();
|
||||
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), -sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(STACK_TOP), -SSIZE_OF(sw));
|
||||
jump = CMP(SLJIT_SIG_LESS_EQUAL, TMP2, 0, SLJIT_IMM, 0);
|
||||
|
||||
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
|
||||
if (HAS_VIRTUAL_REGISTERS)
|
||||
{
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw)));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(STACK_TOP), -(3 * sizeof(sljit_sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw)));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), SLJIT_MEM1(STACK_TOP), -(3 * SSIZE_OF(sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * SSIZE_OF(sw));
|
||||
}
|
||||
else
|
||||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw)));
|
||||
OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(3 * sizeof(sljit_sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw)));
|
||||
OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(3 * SSIZE_OF(sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 3 * SSIZE_OF(sw));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP1, 0);
|
||||
GET_LOCAL_BASE(TMP1, 0, 0);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), sizeof(sljit_sw), TMP3, 0);
|
||||
|
@ -6603,13 +6603,13 @@ OP2(SLJIT_SUB, TMP2, 0, SLJIT_IMM, 0, TMP2, 0);
|
|||
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
|
||||
if (HAS_VIRTUAL_REGISTERS)
|
||||
{
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * SSIZE_OF(sw));
|
||||
}
|
||||
else
|
||||
{
|
||||
OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(2 * sizeof(sljit_sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * sizeof(sljit_sw));
|
||||
OP1(SLJIT_MOV, TMP3, 0, SLJIT_MEM1(STACK_TOP), -(2 * SSIZE_OF(sw)));
|
||||
OP2(SLJIT_SUB, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, 2 * SSIZE_OF(sw));
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(TMP2), 0, TMP3, 0);
|
||||
}
|
||||
JUMPTO(SLJIT_JUMP, mainloop);
|
||||
|
@ -7159,11 +7159,11 @@ if (char1_reg == STR_END)
|
|||
OP1(SLJIT_MOV, RETURN_ADDR, 0, char2_reg, 0);
|
||||
}
|
||||
|
||||
if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
{
|
||||
label = LABEL();
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
|
||||
OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
|
||||
JUMPTO(SLJIT_NOT_ZERO, label);
|
||||
|
@ -7171,14 +7171,14 @@ if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_
|
|||
JUMPHERE(jump);
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
|
||||
}
|
||||
else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
else if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
{
|
||||
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1));
|
||||
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
|
||||
|
||||
label = LABEL();
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0);
|
||||
OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
|
||||
JUMPTO(SLJIT_NOT_ZERO, label);
|
||||
|
@ -7232,9 +7232,9 @@ else
|
|||
lcc_table = TMP3;
|
||||
}
|
||||
|
||||
if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
opt_type = 1;
|
||||
else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
else if (sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS)
|
||||
opt_type = 2;
|
||||
|
||||
sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
|
||||
|
@ -7253,8 +7253,8 @@ OP1(SLJIT_MOV, lcc_table, 0, SLJIT_IMM, common->lcc);
|
|||
if (opt_type == 1)
|
||||
{
|
||||
label = LABEL();
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
}
|
||||
else if (opt_type == 2)
|
||||
{
|
||||
|
@ -7262,8 +7262,8 @@ else if (opt_type == 2)
|
|||
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
|
||||
|
||||
label = LABEL();
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1));
|
||||
sljit_emit_mem_update(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -9689,7 +9689,7 @@ BACKTRACK_AS(recurse_backtrack)->matchingpath = LABEL();
|
|||
return cc + 1 + LINK_SIZE;
|
||||
}
|
||||
|
||||
static sljit_s32 SLJIT_FUNC do_callout(struct jit_arguments *arguments, pcre2_callout_block *callout_block, PCRE2_SPTR *jit_ovector)
|
||||
static sljit_s32 SLJIT_FUNC do_callout_jit(struct jit_arguments *arguments, pcre2_callout_block *callout_block, PCRE2_SPTR *jit_ovector)
|
||||
{
|
||||
PCRE2_SPTR begin;
|
||||
PCRE2_SIZE *ovector;
|
||||
|
@ -9756,7 +9756,7 @@ unsigned int callout_length = (*cc == OP_CALLOUT)
|
|||
sljit_sw value1;
|
||||
sljit_sw value2;
|
||||
sljit_sw value3;
|
||||
sljit_uw callout_arg_size = (common->re->top_bracket + 1) * 2 * sizeof(sljit_sw);
|
||||
sljit_uw callout_arg_size = (common->re->top_bracket + 1) * 2 * SSIZE_OF(sw);
|
||||
|
||||
PUSH_BACKTRACK(sizeof(backtrack_common), cc, NULL);
|
||||
|
||||
|
@ -9806,7 +9806,7 @@ OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS0, STR_PTR, 0);
|
|||
/* SLJIT_R0 = arguments */
|
||||
OP1(SLJIT_MOV, SLJIT_R1, 0, STACK_TOP, 0);
|
||||
GET_LOCAL_BASE(SLJIT_R2, 0, OVECTOR_START);
|
||||
sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS3(32, W, W, W), SLJIT_IMM, SLJIT_FUNC_ADDR(do_callout));
|
||||
sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS3(32, W, W, W), SLJIT_IMM, SLJIT_FUNC_ADDR(do_callout_jit));
|
||||
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
|
||||
free_stack(common, callout_arg_size);
|
||||
|
||||
|
@ -11451,7 +11451,7 @@ struct sljit_label *label;
|
|||
int private_data_ptr = PRIVATE_DATA(cc);
|
||||
int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_SP);
|
||||
int offset0 = (private_data_ptr == 0) ? STACK(0) : private_data_ptr;
|
||||
int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + (int)sizeof(sljit_sw);
|
||||
int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + SSIZE_OF(sw);
|
||||
int tmp_base, tmp_offset;
|
||||
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32
|
||||
BOOL use_tmp;
|
||||
|
@ -11517,19 +11517,19 @@ if (exact > 1)
|
|||
}
|
||||
}
|
||||
else if (exact == 1)
|
||||
{
|
||||
compile_char1_matchingpath(common, type, cc, &backtrack->topbacktracks, TRUE);
|
||||
|
||||
if (early_fail_type == type_fail_range)
|
||||
{
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr);
|
||||
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + (int)sizeof(sljit_sw));
|
||||
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, TMP2, 0);
|
||||
OP2(SLJIT_SUB, TMP2, 0, STR_PTR, 0, TMP2, 0);
|
||||
add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_LESS_EQUAL, TMP2, 0, TMP1, 0));
|
||||
if (early_fail_type == type_fail_range)
|
||||
{
|
||||
/* Range end first, followed by range start. */
|
||||
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr);
|
||||
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + SSIZE_OF(sw));
|
||||
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, TMP2, 0);
|
||||
OP2(SLJIT_SUB, TMP2, 0, STR_PTR, 0, TMP2, 0);
|
||||
add_jump(compiler, &backtrack->topbacktracks, CMP(SLJIT_LESS_EQUAL, TMP2, 0, TMP1, 0));
|
||||
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + (int)sizeof(sljit_sw), STR_PTR, 0);
|
||||
}
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), early_fail_ptr, STR_PTR, 0);
|
||||
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), early_fail_ptr + SSIZE_OF(sw), STR_PTR, 0);
|
||||
}
|
||||
|
||||
switch(opcode)
|
||||
|
@ -12428,7 +12428,7 @@ PCRE2_SPTR end;
|
|||
int private_data_ptr = PRIVATE_DATA(cc);
|
||||
int base = (private_data_ptr == 0) ? SLJIT_MEM1(STACK_TOP) : SLJIT_MEM1(SLJIT_SP);
|
||||
int offset0 = (private_data_ptr == 0) ? STACK(0) : private_data_ptr;
|
||||
int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + (int)sizeof(sljit_sw);
|
||||
int offset1 = (private_data_ptr == 0) ? STACK(1) : private_data_ptr + SSIZE_OF(sw);
|
||||
|
||||
cc = get_iterator_parameters(common, cc, &opcode, &type, &max, &exact, &end);
|
||||
|
||||
|
@ -14148,7 +14148,7 @@ quit_label = common->quit_label;
|
|||
if (common->currententry != NULL)
|
||||
{
|
||||
/* A free bit for each private data. */
|
||||
common->recurse_bitset_size = ((private_data_size / (int)sizeof(sljit_sw)) + 7) >> 3;
|
||||
common->recurse_bitset_size = ((private_data_size / SSIZE_OF(sw)) + 7) >> 3;
|
||||
SLJIT_ASSERT(common->recurse_bitset_size > 0);
|
||||
common->recurse_bitset = (sljit_u8*)SLJIT_MALLOC(common->recurse_bitset_size, allocator_data);;
|
||||
|
||||
|
@ -14384,7 +14384,7 @@ pcre2_jit_compile(pcre2_code *code, uint32_t options)
|
|||
pcre2_real_code *re = (pcre2_real_code *)code;
|
||||
#ifdef SUPPORT_JIT
|
||||
executable_functions *functions;
|
||||
static int executable_allocator_is_working = 0;
|
||||
static int executable_allocator_is_working = -1;
|
||||
#endif
|
||||
|
||||
if (code == NULL)
|
||||
|
@ -14447,23 +14447,21 @@ return PCRE2_ERROR_JIT_BADOPTION;
|
|||
|
||||
if ((re->flags & PCRE2_NOJIT) != 0) return 0;
|
||||
|
||||
if (executable_allocator_is_working == 0)
|
||||
if (executable_allocator_is_working == -1)
|
||||
{
|
||||
/* Checks whether the executable allocator is working. This check
|
||||
might run multiple times in multi-threaded environments, but the
|
||||
result should not be affected by it. */
|
||||
void *ptr = SLJIT_MALLOC_EXEC(32, NULL);
|
||||
|
||||
executable_allocator_is_working = -1;
|
||||
|
||||
if (ptr != NULL)
|
||||
{
|
||||
SLJIT_FREE_EXEC(((sljit_u8*)(ptr)) + SLJIT_EXEC_OFFSET(ptr), NULL);
|
||||
executable_allocator_is_working = 1;
|
||||
}
|
||||
else executable_allocator_is_working = 0;
|
||||
}
|
||||
|
||||
if (executable_allocator_is_working < 0)
|
||||
if (!executable_allocator_is_working)
|
||||
return PCRE2_ERROR_NOMEMORY;
|
||||
|
||||
if ((re->overall_options & PCRE2_MATCH_INVALID_UTF) != 0)
|
||||
|
|
4
thirdparty/pcre2/src/pcre2_jit_misc.c
vendored
4
thirdparty/pcre2/src/pcre2_jit_misc.c
vendored
|
@ -110,8 +110,10 @@ pcre2_jit_free_unused_memory(pcre2_general_context *gcontext)
|
|||
(void)gcontext; /* Suppress warning */
|
||||
#else /* SUPPORT_JIT */
|
||||
SLJIT_UNUSED_ARG(gcontext);
|
||||
#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
|
||||
sljit_free_unused_memory_exec();
|
||||
#endif /* SUPPORT_JIT */
|
||||
#endif /* SLJIT_EXECUTABLE_ALLOCATOR */
|
||||
#endif /* SUPPORT_JIT */
|
||||
}
|
||||
|
||||
|
||||
|
|
4
thirdparty/pcre2/src/pcre2_jit_neon_inc.h
vendored
4
thirdparty/pcre2/src/pcre2_jit_neon_inc.h
vendored
|
@ -183,6 +183,8 @@ restart:;
|
|||
#endif
|
||||
|
||||
#if defined(FFCPS)
|
||||
if (str_ptr >= str_end)
|
||||
return NULL;
|
||||
sljit_u8 *p1 = str_ptr - diff;
|
||||
#endif
|
||||
sljit_s32 align_offset = ((uint64_t)str_ptr & 0xf);
|
||||
|
@ -327,7 +329,7 @@ match:;
|
|||
return NULL;
|
||||
|
||||
#if defined(FF_UTF)
|
||||
if (utf_continue(str_ptr + IN_UCHARS(-offs1)))
|
||||
if (utf_continue((PCRE2_SPTR)str_ptr - offs1))
|
||||
{
|
||||
/* Not a match. */
|
||||
str_ptr += IN_UCHARS(1);
|
||||
|
|
2
thirdparty/pcre2/src/pcre2_jit_simd_inc.h
vendored
2
thirdparty/pcre2/src/pcre2_jit_simd_inc.h
vendored
|
@ -776,7 +776,7 @@ typedef union {
|
|||
} int_char;
|
||||
|
||||
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32
|
||||
static SLJIT_INLINE int utf_continue(sljit_u8 *s)
|
||||
static SLJIT_INLINE int utf_continue(PCRE2_SPTR s)
|
||||
{
|
||||
#if PCRE2_CODE_UNIT_WIDTH == 8
|
||||
return (*s & 0xc0) == 0x80;
|
||||
|
|
176
thirdparty/pcre2/src/pcre2_match.c
vendored
176
thirdparty/pcre2/src/pcre2_match.c
vendored
|
@ -204,6 +204,7 @@ Arguments:
|
|||
P a previous frame of interest
|
||||
frame_size the frame size
|
||||
mb points to the match block
|
||||
match_data points to the match data block
|
||||
s identification text
|
||||
|
||||
Returns: nothing
|
||||
|
@ -211,7 +212,7 @@ Returns: nothing
|
|||
|
||||
static void
|
||||
display_frames(FILE *f, heapframe *F, heapframe *P, PCRE2_SIZE frame_size,
|
||||
match_block *mb, const char *s, ...)
|
||||
match_block *mb, pcre2_match_data *match_data, const char *s, ...)
|
||||
{
|
||||
uint32_t i;
|
||||
heapframe *Q;
|
||||
|
@ -223,10 +224,10 @@ vfprintf(f, s, ap);
|
|||
va_end(ap);
|
||||
|
||||
if (P != NULL) fprintf(f, " P=%lu",
|
||||
((char *)P - (char *)(mb->match_frames))/frame_size);
|
||||
((char *)P - (char *)(match_data->heapframes))/frame_size);
|
||||
fprintf(f, "\n");
|
||||
|
||||
for (i = 0, Q = mb->match_frames;
|
||||
for (i = 0, Q = match_data->heapframes;
|
||||
Q <= F;
|
||||
i++, Q = (heapframe *)((char *)Q + frame_size))
|
||||
{
|
||||
|
@ -490,10 +491,16 @@ A version did exist that used individual frames on the heap instead of calling
|
|||
match() recursively, but this ran substantially slower. The current version is
|
||||
a refactoring that uses a vector of frames to remember backtracking points.
|
||||
This runs no slower, and possibly even a bit faster than the original recursive
|
||||
implementation. An initial vector of size START_FRAMES_SIZE (enough for maybe
|
||||
50 frames) is allocated on the system stack. If this is not big enough, the
|
||||
heap is used for a larger vector.
|
||||
implementation.
|
||||
|
||||
At first, an initial vector of size START_FRAMES_SIZE (enough for maybe 50
|
||||
frames) was allocated on the system stack. If this was not big enough, the heap
|
||||
was used for a larger vector. However, it turns out that there are environments
|
||||
where taking as little as 20KiB from the system stack is an embarrassment.
|
||||
After another refactoring, the heap is used exclusively, but a pointer the
|
||||
frames vector and its size are cached in the match_data block, so that there is
|
||||
no new memory allocation if the same match_data block is used for multiple
|
||||
matches (unless the frames vector has to be extended).
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
|
@ -566,10 +573,9 @@ made performance worse.
|
|||
Arguments:
|
||||
start_eptr starting character in subject
|
||||
start_ecode starting position in compiled code
|
||||
ovector pointer to the final output vector
|
||||
oveccount number of pairs in ovector
|
||||
top_bracket number of capturing parentheses in the pattern
|
||||
frame_size size of each backtracking frame
|
||||
match_data pointer to the match_data block
|
||||
mb pointer to "static" variables block
|
||||
|
||||
Returns: MATCH_MATCH if matched ) these values are >= 0
|
||||
|
@ -580,17 +586,19 @@ Returns: MATCH_MATCH if matched ) these values are >= 0
|
|||
*/
|
||||
|
||||
static int
|
||||
match(PCRE2_SPTR start_eptr, PCRE2_SPTR start_ecode, PCRE2_SIZE *ovector,
|
||||
uint16_t oveccount, uint16_t top_bracket, PCRE2_SIZE frame_size,
|
||||
match_block *mb)
|
||||
match(PCRE2_SPTR start_eptr, PCRE2_SPTR start_ecode, uint16_t top_bracket,
|
||||
PCRE2_SIZE frame_size, pcre2_match_data *match_data, match_block *mb)
|
||||
{
|
||||
/* Frame-handling variables */
|
||||
|
||||
heapframe *F; /* Current frame pointer */
|
||||
heapframe *N = NULL; /* Temporary frame pointers */
|
||||
heapframe *P = NULL;
|
||||
|
||||
heapframe *frames_top; /* End of frames vector */
|
||||
heapframe *assert_accept_frame = NULL; /* For passing back a frame with captures */
|
||||
PCRE2_SIZE frame_copy_size; /* Amount to copy when creating a new frame */
|
||||
PCRE2_SIZE heapframes_size; /* Usable size of frames vector */
|
||||
PCRE2_SIZE frame_copy_size; /* Amount to copy when creating a new frame */
|
||||
|
||||
/* Local variables that do not need to be preserved over calls to RRMATCH(). */
|
||||
|
||||
|
@ -627,10 +635,14 @@ copied when a new frame is created. */
|
|||
|
||||
frame_copy_size = frame_size - offsetof(heapframe, eptr);
|
||||
|
||||
/* Set up the first current frame at the start of the vector, and initialize
|
||||
fields that are not reset for new frames. */
|
||||
/* Set up the first frame and the end of the frames vector. We set the local
|
||||
heapframes_size to the usuable amount of the vector, that is, a whole number of
|
||||
frames. */
|
||||
|
||||
F = match_data->heapframes;
|
||||
heapframes_size = (match_data->heapframes_size / frame_size) * frame_size;
|
||||
frames_top = (heapframe *)((char *)F + heapframes_size);
|
||||
|
||||
F = mb->match_frames;
|
||||
Frdepth = 0; /* "Recursion" depth */
|
||||
Fcapture_last = 0; /* Number of most recent capture */
|
||||
Fcurrent_recurse = RECURSE_UNSET; /* Not pattern recursing. */
|
||||
|
@ -646,34 +658,35 @@ backtracking point. */
|
|||
|
||||
MATCH_RECURSE:
|
||||
|
||||
/* Set up a new backtracking frame. If the vector is full, get a new one
|
||||
on the heap, doubling the size, but constrained by the heap limit. */
|
||||
/* Set up a new backtracking frame. If the vector is full, get a new one,
|
||||
doubling the size, but constrained by the heap limit (which is in KiB). */
|
||||
|
||||
N = (heapframe *)((char *)F + frame_size);
|
||||
if (N >= mb->match_frames_top)
|
||||
if (N >= frames_top)
|
||||
{
|
||||
PCRE2_SIZE newsize = mb->frame_vector_size * 2;
|
||||
heapframe *new;
|
||||
PCRE2_SIZE newsize = match_data->heapframes_size * 2;
|
||||
|
||||
if ((newsize / 1024) > mb->heap_limit)
|
||||
if (newsize > mb->heap_limit)
|
||||
{
|
||||
PCRE2_SIZE maxsize = ((mb->heap_limit * 1024)/frame_size) * frame_size;
|
||||
if (mb->frame_vector_size >= maxsize) return PCRE2_ERROR_HEAPLIMIT;
|
||||
PCRE2_SIZE maxsize = (mb->heap_limit/frame_size) * frame_size;
|
||||
if (match_data->heapframes_size >= maxsize) return PCRE2_ERROR_HEAPLIMIT;
|
||||
newsize = maxsize;
|
||||
}
|
||||
|
||||
new = mb->memctl.malloc(newsize, mb->memctl.memory_data);
|
||||
new = match_data->memctl.malloc(newsize, match_data->memctl.memory_data);
|
||||
if (new == NULL) return PCRE2_ERROR_NOMEMORY;
|
||||
memcpy(new, mb->match_frames, mb->frame_vector_size);
|
||||
memcpy(new, match_data->heapframes, heapframes_size);
|
||||
|
||||
F = (heapframe *)((char *)new + ((char *)F - (char *)mb->match_frames));
|
||||
F = (heapframe *)((char *)new + ((char *)F - (char *)match_data->heapframes));
|
||||
N = (heapframe *)((char *)F + frame_size);
|
||||
|
||||
if (mb->match_frames != mb->stack_frames)
|
||||
mb->memctl.free(mb->match_frames, mb->memctl.memory_data);
|
||||
mb->match_frames = new;
|
||||
mb->match_frames_top = (heapframe *)((char *)mb->match_frames + newsize);
|
||||
mb->frame_vector_size = newsize;
|
||||
match_data->memctl.free(match_data->heapframes, match_data->memctl.memory_data);
|
||||
match_data->heapframes = new;
|
||||
match_data->heapframes_size = newsize;
|
||||
|
||||
heapframes_size = (newsize / frame_size) * frame_size;
|
||||
frames_top = (heapframe *)((char *)new + heapframes_size);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SHOW_RMATCH
|
||||
|
@ -731,7 +744,7 @@ recursion value. */
|
|||
|
||||
if (group_frame_type != 0)
|
||||
{
|
||||
Flast_group_offset = (char *)F - (char *)mb->match_frames;
|
||||
Flast_group_offset = (char *)F - (char *)match_data->heapframes;
|
||||
if (GF_IDMASK(group_frame_type) == GF_RECURSE)
|
||||
Fcurrent_recurse = GF_DATAMASK(group_frame_type);
|
||||
group_frame_type = 0;
|
||||
|
@ -773,7 +786,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
|
|||
for(;;)
|
||||
{
|
||||
if (offset == PCRE2_UNSET) return PCRE2_ERROR_INTERNAL;
|
||||
N = (heapframe *)((char *)mb->match_frames + offset);
|
||||
N = (heapframe *)((char *)match_data->heapframes + offset);
|
||||
P = (heapframe *)((char *)N - frame_size);
|
||||
if (N->group_frame_type == (GF_CAPTURE | number)) break;
|
||||
offset = P->last_group_offset;
|
||||
|
@ -811,7 +824,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
|
|||
for(;;)
|
||||
{
|
||||
if (offset == PCRE2_UNSET) return PCRE2_ERROR_INTERNAL;
|
||||
N = (heapframe *)((char *)mb->match_frames + offset);
|
||||
N = (heapframe *)((char *)match_data->heapframes + offset);
|
||||
P = (heapframe *)((char *)N - frame_size);
|
||||
if (GF_IDMASK(N->group_frame_type) == GF_RECURSE) break;
|
||||
offset = P->last_group_offset;
|
||||
|
@ -864,14 +877,15 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
|
|||
mb->mark = Fmark; /* and the last success mark */
|
||||
if (Feptr > mb->last_used_ptr) mb->last_used_ptr = Feptr;
|
||||
|
||||
ovector[0] = Fstart_match - mb->start_subject;
|
||||
ovector[1] = Feptr - mb->start_subject;
|
||||
match_data->ovector[0] = Fstart_match - mb->start_subject;
|
||||
match_data->ovector[1] = Feptr - mb->start_subject;
|
||||
|
||||
/* Set i to the smaller of the sizes of the external and frame ovectors. */
|
||||
|
||||
i = 2 * ((top_bracket + 1 > oveccount)? oveccount : top_bracket + 1);
|
||||
memcpy(ovector + 2, Fovector, (i - 2) * sizeof(PCRE2_SIZE));
|
||||
while (--i >= Foffset_top + 2) ovector[i] = PCRE2_UNSET;
|
||||
i = 2 * ((top_bracket + 1 > match_data->oveccount)?
|
||||
match_data->oveccount : top_bracket + 1);
|
||||
memcpy(match_data->ovector + 2, Fovector, (i - 2) * sizeof(PCRE2_SIZE));
|
||||
while (--i >= Foffset_top + 2) match_data->ovector[i] = PCRE2_UNSET;
|
||||
return MATCH_MATCH; /* Note: NOT RRETURN */
|
||||
|
||||
|
||||
|
@ -5328,7 +5342,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
|
|||
offset = Flast_group_offset;
|
||||
while (offset != PCRE2_UNSET)
|
||||
{
|
||||
N = (heapframe *)((char *)mb->match_frames + offset);
|
||||
N = (heapframe *)((char *)match_data->heapframes + offset);
|
||||
P = (heapframe *)((char *)N - frame_size);
|
||||
if (N->group_frame_type == (GF_RECURSE | number))
|
||||
{
|
||||
|
@ -5729,7 +5743,7 @@ fprintf(stderr, "++ op=%d\n", *Fecode);
|
|||
|
||||
if (*bracode != OP_BRA && *bracode != OP_COND)
|
||||
{
|
||||
N = (heapframe *)((char *)mb->match_frames + Flast_group_offset);
|
||||
N = (heapframe *)((char *)match_data->heapframes + Flast_group_offset);
|
||||
P = (heapframe *)((char *)N - frame_size);
|
||||
Flast_group_offset = P->last_group_offset;
|
||||
|
||||
|
@ -6346,6 +6360,7 @@ BOOL jit_checked_utf = FALSE;
|
|||
#endif /* SUPPORT_UNICODE */
|
||||
|
||||
PCRE2_SIZE frame_size;
|
||||
PCRE2_SIZE heapframes_size;
|
||||
|
||||
/* We need to have mb as a pointer to a match block, because the IS_NEWLINE
|
||||
macro is used below, and it expects NLBLOCK to be defined as a pointer. */
|
||||
|
@ -6354,15 +6369,6 @@ pcre2_callout_block cb;
|
|||
match_block actual_match_block;
|
||||
match_block *mb = &actual_match_block;
|
||||
|
||||
/* Allocate an initial vector of backtracking frames on the stack. If this
|
||||
proves to be too small, it is replaced by a larger one on the heap. To get a
|
||||
vector of the size required that is aligned for pointers, allocate it as a
|
||||
vector of pointers. */
|
||||
|
||||
PCRE2_SPTR stack_frames_vector[START_FRAMES_SIZE/sizeof(PCRE2_SPTR)]
|
||||
PCRE2_KEEP_UNINITIALIZED;
|
||||
mb->stack_frames = (heapframe *)stack_frames_vector;
|
||||
|
||||
/* Recognize NULL, length 0 as an empty string. */
|
||||
|
||||
if (subject == NULL && length == 0) subject = (PCRE2_SPTR)"";
|
||||
|
@ -6793,15 +6799,11 @@ switch(re->newline_convention)
|
|||
vector at the end, whose size depends on the number of capturing parentheses in
|
||||
the pattern. It is not used at all if there are no capturing parentheses.
|
||||
|
||||
frame_size is the total size of each frame
|
||||
mb->frame_vector_size is the total usable size of the vector (rounded down
|
||||
to a whole number of frames)
|
||||
frame_size is the total size of each frame
|
||||
match_data->heapframes is the pointer to the frames vector
|
||||
match_data->heapframes_size is the total size of the vector
|
||||
|
||||
The last of these is changed within the match() function if the frame vector
|
||||
has to be expanded. We therefore put it into the match block so that it is
|
||||
correct when calling match() more than once for non-anchored patterns.
|
||||
|
||||
We must also pad frame_size for alignment to ensure subsequent frames are as
|
||||
We must pad the frame_size for alignment to ensure subsequent frames are as
|
||||
aligned as heapframe. Whilst ovector is word-aligned due to being a PCRE2_SIZE
|
||||
array, that does not guarantee it is suitably aligned for pointers, as some
|
||||
architectures have pointers that are larger than a size_t. */
|
||||
|
@ -6813,8 +6815,8 @@ frame_size = (offsetof(heapframe, ovector) +
|
|||
/* Limits set in the pattern override the match context only if they are
|
||||
smaller. */
|
||||
|
||||
mb->heap_limit = (mcontext->heap_limit < re->limit_heap)?
|
||||
mcontext->heap_limit : re->limit_heap;
|
||||
mb->heap_limit = ((mcontext->heap_limit < re->limit_heap)?
|
||||
mcontext->heap_limit : re->limit_heap) * 1024;
|
||||
|
||||
mb->match_limit = (mcontext->match_limit < re->limit_match)?
|
||||
mcontext->match_limit : re->limit_match;
|
||||
|
@ -6823,35 +6825,40 @@ mb->match_limit_depth = (mcontext->depth_limit < re->limit_depth)?
|
|||
mcontext->depth_limit : re->limit_depth;
|
||||
|
||||
/* If a pattern has very many capturing parentheses, the frame size may be very
|
||||
large. Ensure that there are at least 10 available frames by getting an initial
|
||||
vector on the heap if necessary, except when the heap limit prevents this. Get
|
||||
fewer if possible. (The heap limit is in kibibytes.) */
|
||||
large. Set the initial frame vector size to ensure that there are at least 10
|
||||
available frames, but enforce a minimum of START_FRAMES_SIZE. If this is
|
||||
greater than the heap limit, get as large a vector as possible. Always round
|
||||
the size to a multiple of the frame size. */
|
||||
|
||||
if (frame_size <= START_FRAMES_SIZE/10)
|
||||
heapframes_size = frame_size * 10;
|
||||
if (heapframes_size < START_FRAMES_SIZE) heapframes_size = START_FRAMES_SIZE;
|
||||
if (heapframes_size > mb->heap_limit)
|
||||
{
|
||||
mb->match_frames = mb->stack_frames; /* Initial frame vector on the stack */
|
||||
mb->frame_vector_size = ((START_FRAMES_SIZE/frame_size) * frame_size);
|
||||
if (frame_size > mb->heap_limit ) return PCRE2_ERROR_HEAPLIMIT;
|
||||
heapframes_size = mb->heap_limit;
|
||||
}
|
||||
else
|
||||
|
||||
/* If an existing frame vector in the match_data block is large enough, we can
|
||||
use it.Otherwise, free any pre-existing vector and get a new one. */
|
||||
|
||||
if (match_data->heapframes_size < heapframes_size)
|
||||
{
|
||||
mb->frame_vector_size = frame_size * 10;
|
||||
if ((mb->frame_vector_size / 1024) > mb->heap_limit)
|
||||
match_data->memctl.free(match_data->heapframes,
|
||||
match_data->memctl.memory_data);
|
||||
match_data->heapframes = match_data->memctl.malloc(heapframes_size,
|
||||
match_data->memctl.memory_data);
|
||||
if (match_data->heapframes == NULL)
|
||||
{
|
||||
if (frame_size > mb->heap_limit * 1024) return PCRE2_ERROR_HEAPLIMIT;
|
||||
mb->frame_vector_size = ((mb->heap_limit * 1024)/frame_size) * frame_size;
|
||||
match_data->heapframes_size = 0;
|
||||
return PCRE2_ERROR_NOMEMORY;
|
||||
}
|
||||
mb->match_frames = mb->memctl.malloc(mb->frame_vector_size,
|
||||
mb->memctl.memory_data);
|
||||
if (mb->match_frames == NULL) return PCRE2_ERROR_NOMEMORY;
|
||||
match_data->heapframes_size = heapframes_size;
|
||||
}
|
||||
|
||||
mb->match_frames_top =
|
||||
(heapframe *)((char *)mb->match_frames + mb->frame_vector_size);
|
||||
|
||||
/* Write to the ovector within the first frame to mark every capture unset and
|
||||
to avoid uninitialized memory read errors when it is copied to a new frame. */
|
||||
|
||||
memset((char *)(mb->match_frames) + offsetof(heapframe, ovector), 0xff,
|
||||
memset((char *)(match_data->heapframes) + offsetof(heapframe, ovector), 0xff,
|
||||
frame_size - offsetof(heapframe, ovector));
|
||||
|
||||
/* Pointers to the individual character tables */
|
||||
|
@ -7279,8 +7286,8 @@ for(;;)
|
|||
mb->end_offset_top = 0;
|
||||
mb->skip_arg_count = 0;
|
||||
|
||||
rc = match(start_match, mb->start_code, match_data->ovector,
|
||||
match_data->oveccount, re->top_bracket, frame_size, mb);
|
||||
rc = match(start_match, mb->start_code, re->top_bracket, frame_size,
|
||||
match_data, mb);
|
||||
|
||||
if (mb->hitend && start_partial == NULL)
|
||||
{
|
||||
|
@ -7463,11 +7470,6 @@ if (utf && end_subject != true_end_subject &&
|
|||
}
|
||||
#endif /* SUPPORT_UNICODE */
|
||||
|
||||
/* Release an enlarged frame vector that is on the heap. */
|
||||
|
||||
if (mb->match_frames != mb->stack_frames)
|
||||
mb->memctl.free(mb->match_frames, mb->memctl.memory_data);
|
||||
|
||||
/* Fill in fields that are always returned in the match data. */
|
||||
|
||||
match_data->code = re;
|
||||
|
@ -7533,4 +7535,10 @@ else match_data->rc = PCRE2_ERROR_NOMATCH;
|
|||
return match_data->rc;
|
||||
}
|
||||
|
||||
/* These #undefs are here to enable unity builds with CMake. */
|
||||
|
||||
#undef NLBLOCK /* Block containing newline information */
|
||||
#undef PSSTART /* Field containing processed string start */
|
||||
#undef PSEND /* Field containing processed string end */
|
||||
|
||||
/* End of pcre2_match.c */
|
||||
|
|
11
thirdparty/pcre2/src/pcre2_match_data.c
vendored
11
thirdparty/pcre2/src/pcre2_match_data.c
vendored
|
@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
|
|||
|
||||
Written by Philip Hazel
|
||||
Original API code Copyright (c) 1997-2012 University of Cambridge
|
||||
New API code Copyright (c) 2016-2019 University of Cambridge
|
||||
New API code Copyright (c) 2016-2022 University of Cambridge
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -51,19 +51,23 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
* Create a match data block given ovector size *
|
||||
*************************************************/
|
||||
|
||||
/* A minimum of 1 is imposed on the number of ovector pairs. */
|
||||
/* A minimum of 1 is imposed on the number of ovector pairs. A maximum is also
|
||||
imposed because the oveccount field in a match data block is uintt6_t. */
|
||||
|
||||
PCRE2_EXP_DEFN pcre2_match_data * PCRE2_CALL_CONVENTION
|
||||
pcre2_match_data_create(uint32_t oveccount, pcre2_general_context *gcontext)
|
||||
{
|
||||
pcre2_match_data *yield;
|
||||
if (oveccount < 1) oveccount = 1;
|
||||
if (oveccount > UINT16_MAX) oveccount = UINT16_MAX;
|
||||
yield = PRIV(memctl_malloc)(
|
||||
offsetof(pcre2_match_data, ovector) + 2*oveccount*sizeof(PCRE2_SIZE),
|
||||
(pcre2_memctl *)gcontext);
|
||||
if (yield == NULL) return NULL;
|
||||
yield->oveccount = oveccount;
|
||||
yield->flags = 0;
|
||||
yield->heapframes = NULL;
|
||||
yield->heapframes_size = 0;
|
||||
return yield;
|
||||
}
|
||||
|
||||
|
@ -95,6 +99,9 @@ pcre2_match_data_free(pcre2_match_data *match_data)
|
|||
{
|
||||
if (match_data != NULL)
|
||||
{
|
||||
if (match_data->heapframes != NULL)
|
||||
match_data->memctl.free(match_data->heapframes,
|
||||
match_data->memctl.memory_data);
|
||||
if ((match_data->flags & PCRE2_MD_COPIED_SUBJECT) != 0)
|
||||
match_data->memctl.free((void *)match_data->subject,
|
||||
match_data->memctl.memory_data);
|
||||
|
|
25
thirdparty/pcre2/src/pcre2_substitute.c
vendored
25
thirdparty/pcre2/src/pcre2_substitute.c
vendored
|
@ -7,7 +7,7 @@ and semantics are as close as possible to those of the Perl 5 language.
|
|||
|
||||
Written by Philip Hazel
|
||||
Original API code Copyright (c) 1997-2012 University of Cambridge
|
||||
New API code Copyright (c) 2016-2021 University of Cambridge
|
||||
New API code Copyright (c) 2016-2022 University of Cambridge
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -259,16 +259,16 @@ PCRE2_UNSET, so as not to imply an offset in the replacement. */
|
|||
|
||||
if ((options & (PCRE2_PARTIAL_HARD|PCRE2_PARTIAL_SOFT)) != 0)
|
||||
return PCRE2_ERROR_BADOPTION;
|
||||
|
||||
/* Validate length and find the end of the replacement. A NULL replacement of
|
||||
|
||||
/* Validate length and find the end of the replacement. A NULL replacement of
|
||||
zero length is interpreted as an empty string. */
|
||||
|
||||
if (replacement == NULL)
|
||||
if (replacement == NULL)
|
||||
{
|
||||
if (rlength != 0) return PCRE2_ERROR_NULL;
|
||||
replacement = (PCRE2_SPTR)"";
|
||||
}
|
||||
|
||||
replacement = (PCRE2_SPTR)"";
|
||||
}
|
||||
|
||||
if (rlength == PCRE2_ZERO_TERMINATED) rlength = PRIV(strlen)(replacement);
|
||||
repend = replacement + rlength;
|
||||
|
||||
|
@ -282,8 +282,9 @@ replacement_only = ((options & PCRE2_SUBSTITUTE_REPLACEMENT_ONLY) != 0);
|
|||
match data block. We create an internal match_data block in two cases: (a) an
|
||||
external one is not supplied (and we are not starting from an existing match);
|
||||
(b) an existing match is to be used for the first substitution. In the latter
|
||||
case, we copy the existing match into the internal block. This ensures that no
|
||||
changes are made to the existing match data block. */
|
||||
case, we copy the existing match into the internal block, except for any cached
|
||||
heap frame size and pointer. This ensures that no changes are made to the
|
||||
external match data block. */
|
||||
|
||||
if (match_data == NULL)
|
||||
{
|
||||
|
@ -309,6 +310,8 @@ else if (use_existing_match)
|
|||
if (internal_match_data == NULL) return PCRE2_ERROR_NOMEMORY;
|
||||
memcpy(internal_match_data, match_data, offsetof(pcre2_match_data, ovector)
|
||||
+ 2*pairs*sizeof(PCRE2_SIZE));
|
||||
internal_match_data->heapframes = NULL;
|
||||
internal_match_data->heapframes_size = 0;
|
||||
match_data = internal_match_data;
|
||||
}
|
||||
|
||||
|
@ -328,9 +331,9 @@ scb.ovector = ovector;
|
|||
|
||||
if (subject == NULL)
|
||||
{
|
||||
if (length != 0) return PCRE2_ERROR_NULL;
|
||||
if (length != 0) return PCRE2_ERROR_NULL;
|
||||
subject = (PCRE2_SPTR)"";
|
||||
}
|
||||
}
|
||||
|
||||
/* Find length of zero-terminated subject */
|
||||
|
||||
|
|
14
thirdparty/pcre2/src/sljit/sljitConfig.h
vendored
14
thirdparty/pcre2/src/sljit/sljitConfig.h
vendored
|
@ -53,7 +53,8 @@ extern "C" {
|
|||
/* #define SLJIT_CONFIG_PPC_64 1 */
|
||||
/* #define SLJIT_CONFIG_MIPS_32 1 */
|
||||
/* #define SLJIT_CONFIG_MIPS_64 1 */
|
||||
/* #define SLJIT_CONFIG_SPARC_32 1 */
|
||||
/* #define SLJIT_CONFIG_RISCV_32 1 */
|
||||
/* #define SLJIT_CONFIG_RISCV_64 1 */
|
||||
/* #define SLJIT_CONFIG_S390X 1 */
|
||||
|
||||
/* #define SLJIT_CONFIG_AUTO 1 */
|
||||
|
@ -127,17 +128,6 @@ extern "C" {
|
|||
|
||||
#endif /* !SLJIT_EXECUTABLE_ALLOCATOR */
|
||||
|
||||
/* Force cdecl calling convention even if a better calling
|
||||
convention (e.g. fastcall) is supported by the C compiler.
|
||||
If this option is disabled (this is the default), functions
|
||||
called from JIT should be defined with SLJIT_FUNC attribute.
|
||||
Standard C functions can still be called by using the
|
||||
SLJIT_CALL_CDECL jump type. */
|
||||
#ifndef SLJIT_USE_CDECL_CALLING_CONVENTION
|
||||
/* Disabled by default */
|
||||
#define SLJIT_USE_CDECL_CALLING_CONVENTION 0
|
||||
#endif
|
||||
|
||||
/* Return with error when an invalid argument is passed. */
|
||||
#ifndef SLJIT_ARGUMENT_CHECKS
|
||||
/* Disabled by default */
|
||||
|
|
134
thirdparty/pcre2/src/sljit/sljitConfigInternal.h
vendored
134
thirdparty/pcre2/src/sljit/sljitConfigInternal.h
vendored
|
@ -59,7 +59,8 @@ extern "C" {
|
|||
SLJIT_64BIT_ARCHITECTURE : 64 bit architecture
|
||||
SLJIT_LITTLE_ENDIAN : little endian architecture
|
||||
SLJIT_BIG_ENDIAN : big endian architecture
|
||||
SLJIT_UNALIGNED : allows unaligned memory accesses for non-fpu operations (only!)
|
||||
SLJIT_UNALIGNED : unaligned memory accesses for non-fpu operations are supported
|
||||
SLJIT_FPU_UNALIGNED : unaligned memory accesses for fpu operations are supported
|
||||
SLJIT_INDIRECT_CALL : see SLJIT_FUNC_ADDR() for more information
|
||||
|
||||
Constants:
|
||||
|
@ -98,7 +99,8 @@ extern "C" {
|
|||
+ (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
|
||||
+ (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
|
||||
+ (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
|
||||
+ (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \
|
||||
+ (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) \
|
||||
+ (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \
|
||||
+ (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
|
||||
+ (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \
|
||||
+ (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) >= 2
|
||||
|
@ -115,7 +117,8 @@ extern "C" {
|
|||
&& !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
|
||||
&& !(defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
|
||||
&& !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
|
||||
&& !(defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \
|
||||
&& !(defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) \
|
||||
&& !(defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \
|
||||
&& !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X) \
|
||||
&& !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) \
|
||||
&& !(defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO)
|
||||
|
@ -156,8 +159,10 @@ extern "C" {
|
|||
#define SLJIT_CONFIG_MIPS_32 1
|
||||
#elif defined(__mips64)
|
||||
#define SLJIT_CONFIG_MIPS_64 1
|
||||
#elif (defined(__sparc__) || defined(__sparc)) && !defined(_LP64)
|
||||
#define SLJIT_CONFIG_SPARC_32 1
|
||||
#elif defined (__riscv_xlen) && (__riscv_xlen == 32)
|
||||
#define SLJIT_CONFIG_RISCV_32 1
|
||||
#elif defined (__riscv_xlen) && (__riscv_xlen == 64)
|
||||
#define SLJIT_CONFIG_RISCV_64 1
|
||||
#elif defined(__s390x__)
|
||||
#define SLJIT_CONFIG_S390X 1
|
||||
#else
|
||||
|
@ -205,8 +210,8 @@ extern "C" {
|
|||
#define SLJIT_CONFIG_PPC 1
|
||||
#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
|
||||
#define SLJIT_CONFIG_MIPS 1
|
||||
#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) || (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64)
|
||||
#define SLJIT_CONFIG_SPARC 1
|
||||
#elif (defined SLJIT_CONFIG_RISCV_32 && SLJIT_CONFIG_RISCV_32) || (defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64)
|
||||
#define SLJIT_CONFIG_RISCV 1
|
||||
#endif
|
||||
|
||||
/***********************************************************/
|
||||
|
@ -330,8 +335,14 @@ extern "C" {
|
|||
* older versions are known to abort in some targets
|
||||
* https://github.com/PhilipHazel/pcre2/issues/92
|
||||
*
|
||||
* beware APPLE is known to have removed the code in iOS so
|
||||
* it will need to be excempted or result in broken builds
|
||||
* beware some vendors (ex: Microsoft, Apple) are known to have
|
||||
* removed the code to support this builtin even if the call for
|
||||
* __has_builtin reports it is available.
|
||||
*
|
||||
* make sure linking doesn't fail because __clear_cache() is
|
||||
* missing before changing it or add an exception so that the
|
||||
* system provided method that should be defined below is used
|
||||
* instead.
|
||||
*/
|
||||
#if (!defined SLJIT_CACHE_FLUSH && defined __has_builtin)
|
||||
#if __has_builtin(__builtin___clear_cache) && !defined(__clang__)
|
||||
|
@ -339,9 +350,9 @@ extern "C" {
|
|||
/*
|
||||
* https://gcc.gnu.org/bugzilla//show_bug.cgi?id=91248
|
||||
* https://gcc.gnu.org/bugzilla//show_bug.cgi?id=93811
|
||||
* gcc's clear_cache builtin for power and sparc are broken
|
||||
* gcc's clear_cache builtin for power is broken
|
||||
*/
|
||||
#if !defined(SLJIT_CONFIG_PPC) && !defined(SLJIT_CONFIG_SPARC_32)
|
||||
#if !defined(SLJIT_CONFIG_PPC)
|
||||
#define SLJIT_CACHE_FLUSH(from, to) \
|
||||
__builtin___clear_cache((char*)(from), (char*)(to))
|
||||
#endif
|
||||
|
@ -373,12 +384,10 @@ extern "C" {
|
|||
ppc_cache_flush((from), (to))
|
||||
#define SLJIT_CACHE_FLUSH_OWN_IMPL 1
|
||||
|
||||
#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
|
||||
#elif defined(_WIN32)
|
||||
|
||||
/* The __clear_cache() implementation of GCC is a dummy function on Sparc. */
|
||||
#define SLJIT_CACHE_FLUSH(from, to) \
|
||||
sparc_cache_flush((from), (to))
|
||||
#define SLJIT_CACHE_FLUSH_OWN_IMPL 1
|
||||
FlushInstructionCache(GetCurrentProcess(), (void*)(from), (char*)(to) - (char*)(from))
|
||||
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) || defined(__clang__)
|
||||
|
||||
|
@ -392,11 +401,6 @@ extern "C" {
|
|||
#define SLJIT_CACHE_FLUSH(from, to) \
|
||||
cacheflush((long)(from), (long)(to), 0)
|
||||
|
||||
#elif defined _WIN32
|
||||
|
||||
#define SLJIT_CACHE_FLUSH(from, to) \
|
||||
FlushInstructionCache(GetCurrentProcess(), (void*)(from), (char*)(to) - (char*)(from))
|
||||
|
||||
#else
|
||||
|
||||
/* Call __ARM_NR_cacheflush on ARM-Linux or the corresponding MIPS syscall. */
|
||||
|
@ -435,6 +439,7 @@ typedef long int sljit_sw;
|
|||
&& !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
|
||||
&& !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
|
||||
&& !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \
|
||||
&& !(defined SLJIT_CONFIG_RISCV_64 && SLJIT_CONFIG_RISCV_64) \
|
||||
&& !(defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
|
||||
#define SLJIT_32BIT_ARCHITECTURE 1
|
||||
#define SLJIT_WORD_SHIFT 2
|
||||
|
@ -495,8 +500,7 @@ typedef double sljit_f64;
|
|||
#if !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN)
|
||||
|
||||
/* These macros are mostly useful for the applications. */
|
||||
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
|
||||
|| (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
#if (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC)
|
||||
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
#define SLJIT_LITTLE_ENDIAN 1
|
||||
|
@ -504,8 +508,7 @@ typedef double sljit_f64;
|
|||
#define SLJIT_BIG_ENDIAN 1
|
||||
#endif
|
||||
|
||||
#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \
|
||||
|| (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
|
||||
#elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS)
|
||||
|
||||
#ifdef __MIPSEL__
|
||||
#define SLJIT_LITTLE_ENDIAN 1
|
||||
|
@ -532,8 +535,7 @@ typedef double sljit_f64;
|
|||
|
||||
#endif /* !SLJIT_MIPS_REV */
|
||||
|
||||
#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \
|
||||
|| (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
|
||||
#elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
|
||||
|
||||
#define SLJIT_BIG_ENDIAN 1
|
||||
|
||||
|
@ -554,19 +556,30 @@ typedef double sljit_f64;
|
|||
|
||||
#ifndef SLJIT_UNALIGNED
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \
|
||||
|| (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \
|
||||
#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
|
||||
|| (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \
|
||||
|| (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \
|
||||
|| (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
|
||||
|| (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \
|
||||
|| (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \
|
||||
|| (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \
|
||||
|| (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \
|
||||
|| (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
|
||||
#define SLJIT_UNALIGNED 1
|
||||
#endif
|
||||
|
||||
#endif /* !SLJIT_UNALIGNED */
|
||||
|
||||
#ifndef SLJIT_FPU_UNALIGNED
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) \
|
||||
|| (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \
|
||||
|| (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \
|
||||
|| (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \
|
||||
|| (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
|
||||
#define SLJIT_FPU_UNALIGNED 1
|
||||
#endif
|
||||
|
||||
#endif /* !SLJIT_FPU_UNALIGNED */
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
/* Auto detect SSE2 support using CPUID.
|
||||
On 64 bit x86 cpus, sse2 must be present. */
|
||||
|
@ -578,38 +591,7 @@ typedef double sljit_f64;
|
|||
/*****************************************************************************************/
|
||||
|
||||
#ifndef SLJIT_FUNC
|
||||
|
||||
#if (defined SLJIT_USE_CDECL_CALLING_CONVENTION && SLJIT_USE_CDECL_CALLING_CONVENTION) \
|
||||
|| !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
|
||||
#define SLJIT_FUNC
|
||||
|
||||
#elif defined(__GNUC__) && !defined(__APPLE__)
|
||||
|
||||
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
|
||||
#define SLJIT_FUNC __attribute__ ((fastcall))
|
||||
#define SLJIT_X86_32_FASTCALL 1
|
||||
#else
|
||||
#define SLJIT_FUNC
|
||||
#endif /* gcc >= 3.4 */
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#define SLJIT_FUNC __fastcall
|
||||
#define SLJIT_X86_32_FASTCALL 1
|
||||
|
||||
#elif defined(__BORLANDC__)
|
||||
|
||||
#define SLJIT_FUNC __msfastcall
|
||||
#define SLJIT_X86_32_FASTCALL 1
|
||||
|
||||
#else /* Unknown compiler. */
|
||||
|
||||
/* The cdecl calling convention is usually the x86 default. */
|
||||
#define SLJIT_FUNC
|
||||
|
||||
#endif /* SLJIT_USE_CDECL_CALLING_CONVENTION */
|
||||
|
||||
#endif /* !SLJIT_FUNC */
|
||||
|
||||
#ifndef SLJIT_INDIRECT_CALL
|
||||
|
@ -621,14 +603,10 @@ typedef double sljit_f64;
|
|||
#endif
|
||||
#endif /* SLJIT_INDIRECT_CALL */
|
||||
|
||||
/* The offset which needs to be substracted from the return address to
|
||||
/* The offset which needs to be subtracted from the return address to
|
||||
determine the next executed instruction after return. */
|
||||
#ifndef SLJIT_RETURN_ADDRESS_OFFSET
|
||||
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
|
||||
#define SLJIT_RETURN_ADDRESS_OFFSET 8
|
||||
#else
|
||||
#define SLJIT_RETURN_ADDRESS_OFFSET 0
|
||||
#endif
|
||||
#endif /* SLJIT_RETURN_ADDRESS_OFFSET */
|
||||
|
||||
/***************************************************/
|
||||
|
@ -666,10 +644,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
|
|||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
|
||||
#define SLJIT_NUMBER_OF_REGISTERS 12
|
||||
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 9
|
||||
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 7
|
||||
#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 7
|
||||
#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0
|
||||
#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
|
||||
#define SLJIT_LOCALS_OFFSET_BASE (8 * SSIZE_OF(sw))
|
||||
#define SLJIT_PREF_SHIFT_REG SLJIT_R2
|
||||
|
||||
#elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
|
@ -683,7 +661,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
|
|||
#else /* _WIN64 */
|
||||
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
|
||||
#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 10
|
||||
#define SLJIT_LOCALS_OFFSET_BASE (4 * (sljit_s32)sizeof(sljit_sw))
|
||||
#define SLJIT_LOCALS_OFFSET_BASE (4 * SSIZE_OF(sw))
|
||||
#endif /* !_WIN64 */
|
||||
#define SLJIT_PREF_SHIFT_REG SLJIT_R3
|
||||
|
||||
|
@ -740,17 +718,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
|
|||
#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 8
|
||||
#endif
|
||||
|
||||
#elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC)
|
||||
#elif (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV)
|
||||
|
||||
#define SLJIT_NUMBER_OF_REGISTERS 18
|
||||
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 14
|
||||
#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 14
|
||||
#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 0
|
||||
#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
|
||||
/* saved registers (16), return struct pointer (1), space for 6 argument words (1),
|
||||
4th double arg (2), double alignment (1). */
|
||||
#define SLJIT_LOCALS_OFFSET_BASE ((16 + 1 + 6 + 2 + 1) * (sljit_s32)sizeof(sljit_sw))
|
||||
#endif
|
||||
#define SLJIT_NUMBER_OF_REGISTERS 23
|
||||
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 12
|
||||
#define SLJIT_LOCALS_OFFSET_BASE 0
|
||||
#define SLJIT_NUMBER_OF_FLOAT_REGISTERS 30
|
||||
#define SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS 12
|
||||
|
||||
#elif (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
|
||||
|
||||
|
@ -806,7 +780,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr);
|
|||
#if (defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM) \
|
||||
|| (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) \
|
||||
|| (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) \
|
||||
|| (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) \
|
||||
|| (defined SLJIT_CONFIG_RISCV && SLJIT_CONFIG_RISCV) \
|
||||
|| (defined SLJIT_CONFIG_S390X && SLJIT_CONFIG_S390X)
|
||||
#define SLJIT_HAS_STATUS_FLAGS_STATE 1
|
||||
#endif
|
||||
|
|
|
@ -152,9 +152,6 @@ static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec)
|
|||
{
|
||||
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000
|
||||
pthread_jit_write_protect_np(enable_exec);
|
||||
#elif defined(__clang__)
|
||||
if (__builtin_available(macOS 11.0, *))
|
||||
pthread_jit_write_protect_np(enable_exec);
|
||||
#else
|
||||
#error "Must target Big Sur or newer"
|
||||
#endif /* BigSur */
|
||||
|
|
815
thirdparty/pcre2/src/sljit/sljitLir.c
vendored
815
thirdparty/pcre2/src/sljit/sljitLir.c
vendored
File diff suppressed because it is too large
Load diff
800
thirdparty/pcre2/src/sljit/sljitLir.h
vendored
800
thirdparty/pcre2/src/sljit/sljitLir.h
vendored
File diff suppressed because it is too large
Load diff
1004
thirdparty/pcre2/src/sljit/sljitNativeARM_32.c
vendored
1004
thirdparty/pcre2/src/sljit/sljitNativeARM_32.c
vendored
File diff suppressed because it is too large
Load diff
399
thirdparty/pcre2/src/sljit/sljitNativeARM_64.c
vendored
399
thirdparty/pcre2/src/sljit/sljitNativeARM_64.c
vendored
|
@ -86,6 +86,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
|
|||
#define CSINC 0x9a800400
|
||||
#define EOR 0xca000000
|
||||
#define EORI 0xd2000000
|
||||
#define EXTR 0x93c00000
|
||||
#define FABS 0x1e60c000
|
||||
#define FADD 0x1e602800
|
||||
#define FCMP 0x1e602000
|
||||
|
@ -98,6 +99,7 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
|
|||
#define FSUB 0x1e603800
|
||||
#define LDRI 0xf9400000
|
||||
#define LDRI_F64 0xfd400000
|
||||
#define LDRI_POST 0xf8400400
|
||||
#define LDP 0xa9400000
|
||||
#define LDP_F64 0x6d400000
|
||||
#define LDP_POST 0xa8c00000
|
||||
|
@ -112,7 +114,9 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
|
|||
#define ORN 0xaa200000
|
||||
#define ORR 0xaa000000
|
||||
#define ORRI 0xb2000000
|
||||
#define RBIT 0xdac00000
|
||||
#define RET 0xd65f0000
|
||||
#define RORV 0x9ac02c00
|
||||
#define SBC 0xda000000
|
||||
#define SBFM 0x93000000
|
||||
#define SCVTF 0x9e620000
|
||||
|
@ -137,8 +141,6 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
|
|||
#define UDIV 0x9ac00800
|
||||
#define UMULH 0x9bc03c00
|
||||
|
||||
/* dest_reg is the absolute name of the register
|
||||
Useful for reordering instructions in the delay slot. */
|
||||
static sljit_s32 push_inst(struct sljit_compiler *compiler, sljit_ins ins)
|
||||
{
|
||||
sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins));
|
||||
|
@ -296,8 +298,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
|
|||
}
|
||||
next_addr = compute_next_addr(label, jump, const_, put_label);
|
||||
}
|
||||
code_ptr ++;
|
||||
word_count ++;
|
||||
code_ptr++;
|
||||
word_count++;
|
||||
} while (buf_ptr < buf_end);
|
||||
|
||||
buf = buf->next;
|
||||
|
@ -391,6 +393,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
|
|||
#endif
|
||||
|
||||
case SLJIT_HAS_CLZ:
|
||||
case SLJIT_HAS_CTZ:
|
||||
case SLJIT_HAS_ROT:
|
||||
case SLJIT_HAS_CMOV:
|
||||
case SLJIT_HAS_PREFETCH:
|
||||
return 1;
|
||||
|
@ -631,6 +635,7 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
|
|||
switch (op) {
|
||||
case SLJIT_MUL:
|
||||
case SLJIT_CLZ:
|
||||
case SLJIT_CTZ:
|
||||
case SLJIT_ADDC:
|
||||
case SLJIT_SUBC:
|
||||
/* No form with immediate operand (except imm 0, which
|
||||
|
@ -701,36 +706,50 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
|
|||
FAIL_IF(push_inst(compiler, (inst_bits ^ inv_bits) | RD(dst) | RN(reg)));
|
||||
goto set_flags;
|
||||
case SLJIT_SHL:
|
||||
case SLJIT_MSHL:
|
||||
if (flags & ARG1_IMM)
|
||||
break;
|
||||
|
||||
if (flags & INT_OP) {
|
||||
imm &= 0x1f;
|
||||
FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1)
|
||||
| (((sljit_ins)-imm & 0x1f) << 16) | ((31 - (sljit_ins)imm) << 10)));
|
||||
}
|
||||
else {
|
||||
inst_bits = (((sljit_ins)-imm & 0x1f) << 16) | ((31 - (sljit_ins)imm) << 10);
|
||||
} else {
|
||||
imm &= 0x3f;
|
||||
FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | (1 << 22)
|
||||
| (((sljit_ins)-imm & 0x3f) << 16) | ((63 - (sljit_ins)imm) << 10)));
|
||||
inst_bits = ((sljit_ins)1 << 22) | (((sljit_ins)-imm & 0x3f) << 16) | ((63 - (sljit_ins)imm) << 10);
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | inst_bits));
|
||||
goto set_flags;
|
||||
case SLJIT_LSHR:
|
||||
case SLJIT_MLSHR:
|
||||
case SLJIT_ASHR:
|
||||
case SLJIT_MASHR:
|
||||
if (flags & ARG1_IMM)
|
||||
break;
|
||||
if (op == SLJIT_ASHR)
|
||||
|
||||
if (op >= SLJIT_ASHR)
|
||||
inv_bits |= 1 << 30;
|
||||
|
||||
if (flags & INT_OP) {
|
||||
imm &= 0x1f;
|
||||
FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1)
|
||||
| ((sljit_ins)imm << 16) | (31 << 10)));
|
||||
}
|
||||
else {
|
||||
inst_bits = ((sljit_ins)imm << 16) | (31 << 10);
|
||||
} else {
|
||||
imm &= 0x3f;
|
||||
FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1)
|
||||
| (1 << 22) | ((sljit_ins)imm << 16) | (63 << 10)));
|
||||
inst_bits = ((sljit_ins)1 << 22) | ((sljit_ins)imm << 16) | (63 << 10);
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(dst) | RN(arg1) | inst_bits));
|
||||
goto set_flags;
|
||||
case SLJIT_ROTL:
|
||||
case SLJIT_ROTR:
|
||||
if (flags & ARG1_IMM)
|
||||
break;
|
||||
|
||||
if (op == SLJIT_ROTL)
|
||||
imm = -imm;
|
||||
|
||||
imm &= (flags & INT_OP) ? 0x1f : 0x3f;
|
||||
return push_inst(compiler, (EXTR ^ (inv_bits | (inv_bits >> 9))) | RD(dst) | RN(arg1) | RM(arg1) | ((sljit_ins)imm << 10));
|
||||
default:
|
||||
SLJIT_UNREACHABLE();
|
||||
break;
|
||||
|
@ -796,6 +815,10 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
|
|||
case SLJIT_CLZ:
|
||||
SLJIT_ASSERT(arg1 == TMP_REG1);
|
||||
return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(arg2));
|
||||
case SLJIT_CTZ:
|
||||
SLJIT_ASSERT(arg1 == TMP_REG1);
|
||||
FAIL_IF(push_inst(compiler, (RBIT ^ inv_bits) | RD(dst) | RN(arg2)));
|
||||
return push_inst(compiler, (CLZ ^ inv_bits) | RD(dst) | RN(dst));
|
||||
case SLJIT_ADD:
|
||||
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
|
||||
CHECK_FLAGS(1 << 29);
|
||||
|
@ -834,14 +857,23 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
|
|||
FAIL_IF(push_inst(compiler, (EOR ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
|
||||
break; /* Set flags. */
|
||||
case SLJIT_SHL:
|
||||
case SLJIT_MSHL:
|
||||
FAIL_IF(push_inst(compiler, (LSLV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
|
||||
break; /* Set flags. */
|
||||
case SLJIT_LSHR:
|
||||
case SLJIT_MLSHR:
|
||||
FAIL_IF(push_inst(compiler, (LSRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
|
||||
break; /* Set flags. */
|
||||
case SLJIT_ASHR:
|
||||
case SLJIT_MASHR:
|
||||
FAIL_IF(push_inst(compiler, (ASRV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2)));
|
||||
break; /* Set flags. */
|
||||
case SLJIT_ROTL:
|
||||
FAIL_IF(push_inst(compiler, (SUB ^ inv_bits) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(arg2)));
|
||||
arg2 = TMP_REG2;
|
||||
/* fallthrough */
|
||||
case SLJIT_ROTR:
|
||||
return push_inst(compiler, (RORV ^ inv_bits) | RD(dst) | RN(arg1) | RM(arg2));
|
||||
default:
|
||||
SLJIT_UNREACHABLE();
|
||||
return SLJIT_SUCCESS;
|
||||
|
@ -895,21 +927,37 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 flags, s
|
|||
return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10));
|
||||
}
|
||||
|
||||
if (argw >= 0 && (argw & ((1 << shift) - 1)) == 0) {
|
||||
if ((argw >> shift) <= 0xfff)
|
||||
return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | ((sljit_ins)argw << (10 - shift)));
|
||||
if ((argw & ((1 << shift) - 1)) == 0) {
|
||||
if (argw >= 0) {
|
||||
if ((argw >> shift) <= 0xfff)
|
||||
return push_inst(compiler, STRBI | type | RT(reg) | RN(arg) | ((sljit_ins)argw << (10 - shift)));
|
||||
|
||||
if (argw <= 0xffffff) {
|
||||
FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10)));
|
||||
if (argw <= 0xffffff) {
|
||||
FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10)));
|
||||
|
||||
argw = ((argw & 0xfff) >> shift);
|
||||
argw = ((argw & 0xfff) >> shift);
|
||||
return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10));
|
||||
}
|
||||
} else if (argw < -256 && argw >= -0xfff000) {
|
||||
FAIL_IF(push_inst(compiler, SUBI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)(-argw + 0xfff) >> 12) << 10)));
|
||||
argw = ((0x1000 + argw) & 0xfff) >> shift;
|
||||
return push_inst(compiler, STRBI | type | RT(reg) | RN(tmp_reg) | ((sljit_ins)argw << 10));
|
||||
}
|
||||
}
|
||||
|
||||
if (argw <= 255 && argw >= -256)
|
||||
if (argw <= 0xff && argw >= -0x100)
|
||||
return push_inst(compiler, STURBI | type | RT(reg) | RN(arg) | (((sljit_ins)argw & 0x1ff) << 12));
|
||||
|
||||
if (argw >= 0) {
|
||||
if (argw <= 0xfff0ff && ((argw + 0x100) & 0xfff) <= 0x1ff) {
|
||||
FAIL_IF(push_inst(compiler, ADDI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)argw >> 12) << 10)));
|
||||
return push_inst(compiler, STURBI | type | RT(reg) | RN(tmp_reg) | (((sljit_ins)argw & 0x1ff) << 12));
|
||||
}
|
||||
} else if (argw >= -0xfff100 && ((-argw + 0xff) & 0xfff) <= 0x1ff) {
|
||||
FAIL_IF(push_inst(compiler, SUBI | (1 << 22) | RD(tmp_reg) | RN(arg) | (((sljit_ins)-argw >> 12) << 10)));
|
||||
return push_inst(compiler, STURBI | type | RT(reg) | RN(tmp_reg) | (((sljit_ins)argw & 0x1ff) << 12));
|
||||
}
|
||||
|
||||
FAIL_IF(load_immediate(compiler, tmp_reg, argw));
|
||||
|
||||
return push_inst(compiler, STRB | type | RT(reg) | RN(arg) | RM(tmp_reg));
|
||||
|
@ -924,14 +972,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
|
||||
{
|
||||
sljit_s32 prev, fprev, saved_regs_size, i, tmp;
|
||||
sljit_s32 word_arg_count = 0;
|
||||
sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
|
||||
sljit_ins offs;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
|
||||
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
|
||||
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 2);
|
||||
saved_regs_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, SSIZE_OF(f64));
|
||||
|
||||
local_size = (local_size + saved_regs_size + 0xf) & ~0xf;
|
||||
|
@ -954,7 +1002,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
prev = -1;
|
||||
|
||||
tmp = SLJIT_S0 - saveds;
|
||||
for (i = SLJIT_S0; i > tmp; i--) {
|
||||
for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
|
||||
if (prev == -1) {
|
||||
prev = i;
|
||||
continue;
|
||||
|
@ -1003,23 +1051,27 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
if (prev != -1)
|
||||
FAIL_IF(push_inst(compiler, STRI | RT(prev) | RN(SLJIT_SP) | (offs >> 5) | ((fprev == -1) ? (1 << 10) : 0)));
|
||||
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
|
||||
#ifdef _WIN32
|
||||
if (local_size > 4096)
|
||||
FAIL_IF(push_inst(compiler, SUBI | RD(SLJIT_SP) | RN(SLJIT_SP) | (1 << 10) | (1 << 22)));
|
||||
#endif /* _WIN32 */
|
||||
|
||||
tmp = 0;
|
||||
while (arg_types > 0) {
|
||||
if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
|
||||
if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
|
||||
FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0 - tmp) | RN(TMP_ZERO) | RM(SLJIT_R0 + word_arg_count)));
|
||||
if (!(options & SLJIT_ENTER_REG_ARG)) {
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
saved_arg_count = 0;
|
||||
tmp = SLJIT_R0;
|
||||
|
||||
while (arg_types) {
|
||||
if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
|
||||
if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
|
||||
FAIL_IF(push_inst(compiler, ORR | RD(SLJIT_S0 - saved_arg_count) | RN(TMP_ZERO) | RM(tmp)));
|
||||
saved_arg_count++;
|
||||
}
|
||||
tmp++;
|
||||
}
|
||||
word_arg_count++;
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
}
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@ -1100,26 +1152,34 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
|
|||
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
|
||||
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
|
||||
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 2);
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 2);
|
||||
saved_regs_size += GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, SSIZE_OF(f64));
|
||||
|
||||
compiler->local_size = (local_size + saved_regs_size + 0xf) & ~0xf;
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
|
||||
{
|
||||
sljit_s32 local_size, prev, fprev, i, tmp;
|
||||
sljit_ins offs;
|
||||
|
||||
local_size = compiler->local_size;
|
||||
|
||||
if (local_size > 512 && local_size <= 512 + 496) {
|
||||
FAIL_IF(push_inst(compiler, LDP_POST | RT(TMP_FP) | RT2(TMP_LR)
|
||||
| RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << (15 - 3))));
|
||||
local_size = 512;
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
|
||||
if (!is_return_to) {
|
||||
if (local_size > 512 && local_size <= 512 + 496) {
|
||||
FAIL_IF(push_inst(compiler, LDP_POST | RT(TMP_FP) | RT2(TMP_LR)
|
||||
| RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << (15 - 3))));
|
||||
local_size = 512;
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, LDP | RT(TMP_FP) | RT2(TMP_LR) | RN(SLJIT_SP)));
|
||||
} else {
|
||||
if (local_size > 512 && local_size <= 512 + 248) {
|
||||
FAIL_IF(push_inst(compiler, LDRI_POST | RT(TMP_FP) | RN(SLJIT_SP) | ((sljit_ins)(local_size - 512) << 12)));
|
||||
local_size = 512;
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, LDRI | RT(TMP_FP) | RN(SLJIT_SP) | 0));
|
||||
}
|
||||
|
||||
if (local_size > 512) {
|
||||
local_size -= 512;
|
||||
|
@ -1137,7 +1197,7 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
prev = -1;
|
||||
|
||||
tmp = SLJIT_S0 - compiler->saveds;
|
||||
for (i = SLJIT_S0; i > tmp; i--) {
|
||||
for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) {
|
||||
if (prev == -1) {
|
||||
prev = i;
|
||||
continue;
|
||||
|
@ -1195,11 +1255,34 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_void(compiler));
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
|
||||
return push_inst(compiler, RET | RN(TMP_LR));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_to(compiler, src, srcw));
|
||||
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
|
||||
src = TMP_REG1;
|
||||
srcw = 0;
|
||||
} else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(src)));
|
||||
src = TMP_REG1;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 1));
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Operators */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
@ -1392,13 +1475,84 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 src_dst,
|
||||
sljit_s32 src1, sljit_sw src1w,
|
||||
sljit_s32 src2, sljit_sw src2w)
|
||||
{
|
||||
sljit_ins inv_bits, imm;
|
||||
sljit_s32 is_left;
|
||||
sljit_sw mask;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w));
|
||||
|
||||
is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL);
|
||||
|
||||
if (src_dst == src1) {
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, (is_left ? SLJIT_ROTL : SLJIT_ROTR) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w);
|
||||
}
|
||||
|
||||
ADJUST_LOCAL_OFFSET(src1, src1w);
|
||||
ADJUST_LOCAL_OFFSET(src2, src2w);
|
||||
|
||||
inv_bits = (op & SLJIT_32) ? W_OP : 0;
|
||||
mask = inv_bits ? 0x1f : 0x3f;
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
src2w &= mask;
|
||||
|
||||
if (src2w == 0)
|
||||
return SLJIT_SUCCESS;
|
||||
} else if (src2 & SLJIT_MEM) {
|
||||
FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG2, src2, src2w, TMP_REG2));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
if (src1 & SLJIT_MEM) {
|
||||
FAIL_IF(emit_op_mem(compiler, inv_bits ? INT_SIZE : WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG1));
|
||||
src1 = TMP_REG1;
|
||||
} else if (src1 & SLJIT_IMM) {
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
|
||||
src1 = TMP_REG1;
|
||||
}
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
if (is_left)
|
||||
src2w = (src2w ^ mask) + 1;
|
||||
|
||||
return push_inst(compiler, (EXTR ^ (inv_bits | (inv_bits >> 9))) | RD(src_dst)
|
||||
| RN(is_left ? src_dst : src1) | RM(is_left ? src1 : src_dst) | ((sljit_ins)src2w << 10));
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, ((is_left ? LSLV : LSRV) ^ inv_bits) | RD(src_dst) | RN(src_dst) | RM(src2)));
|
||||
|
||||
if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) {
|
||||
/* Shift left/right by 1. */
|
||||
if (is_left)
|
||||
imm = (sljit_ins)(inv_bits ? ((1 << 16) | (31 << 10)) : ((1 << 16) | (63 << 10) | (1 << 22)));
|
||||
else
|
||||
imm = (sljit_ins)(inv_bits ? ((31 << 16) | (30 << 10)) : ((63 << 16) | (62 << 10) | (1 << 22)));
|
||||
|
||||
FAIL_IF(push_inst(compiler, (UBFM ^ inv_bits) | RD(TMP_REG1) | RN(src1) | imm));
|
||||
|
||||
/* Set imm to mask. */
|
||||
imm = (sljit_ins)(inv_bits ? (4 << 10) : ((5 << 10) | (1 << 22)));
|
||||
FAIL_IF(push_inst(compiler, (EORI ^ inv_bits) | RD(TMP_REG2) | RN(src2) | imm));
|
||||
|
||||
src1 = TMP_REG1;
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, (SUB ^ inv_bits) | RD(TMP_REG2) | RN(TMP_ZERO) | RM(src2)));
|
||||
|
||||
FAIL_IF(push_inst(compiler, ((is_left ? LSRV : LSLV) ^ inv_bits) | RD(TMP_REG1) | RN(src1) | RM(TMP_REG2)));
|
||||
return push_inst(compiler, (ORR ^ inv_bits) | RD(src_dst) | RN(src_dst) | RM(TMP_REG1));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
|
@ -1550,10 +1704,9 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
|
|||
emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw, TMP_REG1);
|
||||
src = TMP_REG1;
|
||||
} else if (src & SLJIT_IMM) {
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
|
||||
srcw = (sljit_s32)srcw;
|
||||
#endif
|
||||
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
|
||||
src = TMP_REG1;
|
||||
}
|
||||
|
@ -1699,11 +1852,15 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
|
|||
{
|
||||
switch (type) {
|
||||
case SLJIT_EQUAL:
|
||||
case SLJIT_EQUAL_F64:
|
||||
case SLJIT_F_EQUAL:
|
||||
case SLJIT_ORDERED_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */
|
||||
return 0x1;
|
||||
|
||||
case SLJIT_NOT_EQUAL:
|
||||
case SLJIT_NOT_EQUAL_F64:
|
||||
case SLJIT_F_NOT_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_NOT_EQUAL:
|
||||
case SLJIT_ORDERED_NOT_EQUAL: /* Not supported. */
|
||||
return 0x0;
|
||||
|
||||
case SLJIT_CARRY:
|
||||
|
@ -1712,7 +1869,6 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
|
|||
/* fallthrough */
|
||||
|
||||
case SLJIT_LESS:
|
||||
case SLJIT_LESS_F64:
|
||||
return 0x2;
|
||||
|
||||
case SLJIT_NOT_CARRY:
|
||||
|
@ -1721,27 +1877,33 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
|
|||
/* fallthrough */
|
||||
|
||||
case SLJIT_GREATER_EQUAL:
|
||||
case SLJIT_GREATER_EQUAL_F64:
|
||||
return 0x3;
|
||||
|
||||
case SLJIT_GREATER:
|
||||
case SLJIT_GREATER_F64:
|
||||
case SLJIT_UNORDERED_OR_GREATER:
|
||||
return 0x9;
|
||||
|
||||
case SLJIT_LESS_EQUAL:
|
||||
case SLJIT_LESS_EQUAL_F64:
|
||||
case SLJIT_F_LESS_EQUAL:
|
||||
case SLJIT_ORDERED_LESS_EQUAL:
|
||||
return 0x8;
|
||||
|
||||
case SLJIT_SIG_LESS:
|
||||
case SLJIT_UNORDERED_OR_LESS:
|
||||
return 0xa;
|
||||
|
||||
case SLJIT_SIG_GREATER_EQUAL:
|
||||
case SLJIT_F_GREATER_EQUAL:
|
||||
case SLJIT_ORDERED_GREATER_EQUAL:
|
||||
return 0xb;
|
||||
|
||||
case SLJIT_SIG_GREATER:
|
||||
case SLJIT_F_GREATER:
|
||||
case SLJIT_ORDERED_GREATER:
|
||||
return 0xd;
|
||||
|
||||
case SLJIT_SIG_LESS_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_LESS_EQUAL:
|
||||
return 0xc;
|
||||
|
||||
case SLJIT_OVERFLOW:
|
||||
|
@ -1749,7 +1911,7 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
|
|||
return 0x0;
|
||||
/* fallthrough */
|
||||
|
||||
case SLJIT_UNORDERED_F64:
|
||||
case SLJIT_UNORDERED:
|
||||
return 0x7;
|
||||
|
||||
case SLJIT_NOT_OVERFLOW:
|
||||
|
@ -1757,9 +1919,16 @@ static sljit_ins get_cc(struct sljit_compiler *compiler, sljit_s32 type)
|
|||
return 0x1;
|
||||
/* fallthrough */
|
||||
|
||||
case SLJIT_ORDERED_F64:
|
||||
case SLJIT_ORDERED:
|
||||
return 0x6;
|
||||
|
||||
case SLJIT_F_LESS:
|
||||
case SLJIT_ORDERED_LESS:
|
||||
return 0x5;
|
||||
|
||||
case SLJIT_UNORDERED_OR_GREATER_EQUAL:
|
||||
return 0x4;
|
||||
|
||||
default:
|
||||
SLJIT_UNREACHABLE();
|
||||
return 0xe;
|
||||
|
@ -1816,15 +1985,11 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler));
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_jump(compiler, type);
|
||||
}
|
||||
|
||||
|
@ -1869,10 +2034,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
|
|||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
|
||||
if (!(src & SLJIT_IMM)) {
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
|
||||
src = TMP_REG1;
|
||||
}
|
||||
|
@ -1897,28 +2062,24 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
|
|||
SLJIT_UNUSED_ARG(arg_types);
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
|
||||
src = TMP_REG1;
|
||||
}
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(src)));
|
||||
src = TMP_REG1;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
type = SLJIT_JUMP;
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, type, src, srcw);
|
||||
}
|
||||
|
||||
|
@ -1933,7 +2094,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
|
||||
ADJUST_LOCAL_OFFSET(dst, dstw);
|
||||
|
||||
cc = get_cc(compiler, type & 0xff);
|
||||
cc = get_cc(compiler, type);
|
||||
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
|
||||
|
||||
if (GET_OPCODE(op) < SLJIT_ADD) {
|
||||
|
@ -1974,22 +2135,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
|
|||
sljit_s32 dst_reg,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
sljit_ins inv_bits = (dst_reg & SLJIT_32) ? W_OP : 0;
|
||||
sljit_ins inv_bits = (type & SLJIT_32) ? W_OP : 0;
|
||||
sljit_ins cc;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
|
||||
|
||||
if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
|
||||
if (dst_reg & SLJIT_32)
|
||||
if (type & SLJIT_32)
|
||||
srcw = (sljit_s32)srcw;
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
|
||||
src = TMP_REG1;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
cc = get_cc(compiler, type & 0xff);
|
||||
dst_reg &= ~SLJIT_32;
|
||||
cc = get_cc(compiler, type & ~SLJIT_32);
|
||||
|
||||
return push_inst(compiler, (CSEL ^ inv_bits) | (cc << 12) | RD(dst_reg) | RN(dst_reg) | RM(src));
|
||||
}
|
||||
|
@ -1998,11 +2158,82 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
|
|||
sljit_s32 reg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
sljit_u32 sign = 0, inst;
|
||||
sljit_u32 inst;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
|
||||
|
||||
if (!(reg & REG_PAIR_MASK))
|
||||
return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
|
||||
|
||||
ADJUST_LOCAL_OFFSET(mem, memw);
|
||||
|
||||
if (!(mem & REG_MASK)) {
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, memw & ~0x1f8));
|
||||
|
||||
mem = SLJIT_MEM1(TMP_REG1);
|
||||
memw &= 0x1f8;
|
||||
} else if (mem & OFFS_REG_MASK) {
|
||||
FAIL_IF(push_inst(compiler, ADD | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(OFFS_REG(mem)) | ((sljit_ins)(memw & 0x3) << 10)));
|
||||
|
||||
mem = SLJIT_MEM1(TMP_REG1);
|
||||
memw = 0;
|
||||
} else if ((memw & 0x7) != 0 || memw > 0x1f8 || memw < -0x200) {
|
||||
inst = ADDI;
|
||||
|
||||
if (memw < 0) {
|
||||
/* Remains negative for integer min. */
|
||||
memw = -memw;
|
||||
inst = SUBI;
|
||||
} else if ((memw & 0x7) == 0 && memw <= 0x7ff0) {
|
||||
if (!(type & SLJIT_MEM_STORE) && (mem & REG_MASK) == REG_PAIR_FIRST(reg)) {
|
||||
FAIL_IF(push_inst(compiler, LDRI | RD(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | ((sljit_ins)memw << 7)));
|
||||
return push_inst(compiler, LDRI | RD(REG_PAIR_FIRST(reg)) | RN(mem & REG_MASK) | ((sljit_ins)(memw + 0x8) << 7));
|
||||
}
|
||||
|
||||
inst = (type & SLJIT_MEM_STORE) ? STRI : LDRI;
|
||||
|
||||
FAIL_IF(push_inst(compiler, inst | RD(REG_PAIR_FIRST(reg)) | RN(mem & REG_MASK) | ((sljit_ins)memw << 7)));
|
||||
return push_inst(compiler, inst | RD(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | ((sljit_ins)(memw + 0x8) << 7));
|
||||
}
|
||||
|
||||
if ((sljit_uw)memw <= 0xfff) {
|
||||
FAIL_IF(push_inst(compiler, inst | RD(TMP_REG1) | RN(mem & REG_MASK) | ((sljit_ins)memw << 10)));
|
||||
memw = 0;
|
||||
} else if ((sljit_uw)memw <= 0xffffff) {
|
||||
FAIL_IF(push_inst(compiler, inst | (1 << 22) | RD(TMP_REG1) | RN(mem & REG_MASK) | (((sljit_ins)memw >> 12) << 10)));
|
||||
|
||||
if ((memw & 0xe07) != 0) {
|
||||
FAIL_IF(push_inst(compiler, inst | RD(TMP_REG1) | RN(TMP_REG1) | (((sljit_ins)memw & 0xfff) << 10)));
|
||||
memw = 0;
|
||||
} else {
|
||||
memw &= 0xfff;
|
||||
}
|
||||
} else {
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, memw));
|
||||
FAIL_IF(push_inst(compiler, (inst == ADDI ? ADD : SUB) | RD(TMP_REG1) | RN(mem & REG_MASK) | RM(TMP_REG1)));
|
||||
memw = 0;
|
||||
}
|
||||
|
||||
mem = SLJIT_MEM1(TMP_REG1);
|
||||
|
||||
if (inst == SUBI)
|
||||
memw = -memw;
|
||||
}
|
||||
|
||||
SLJIT_ASSERT((memw & 0x7) == 0 && memw <= 0x1f8 && memw >= -0x200);
|
||||
return push_inst(compiler, ((type & SLJIT_MEM_STORE) ? STP : LDP) | RT(REG_PAIR_FIRST(reg)) | RT2(REG_PAIR_SECOND(reg)) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x3f8) << 12));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 reg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
sljit_u32 sign = 0, inst;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw));
|
||||
|
||||
if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256))
|
||||
return SLJIT_ERR_UNSUPPORTED;
|
||||
|
||||
|
@ -2042,20 +2273,20 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
|
|||
if (!(type & SLJIT_MEM_STORE))
|
||||
inst |= sign ? 0x00800000 : 0x00400000;
|
||||
|
||||
if (type & SLJIT_MEM_PRE)
|
||||
if (!(type & SLJIT_MEM_POST))
|
||||
inst |= 0x800;
|
||||
|
||||
return push_inst(compiler, inst | RT(reg) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x1ff) << 12));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 freg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
sljit_u32 inst;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
|
||||
CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw));
|
||||
|
||||
if ((mem & OFFS_REG_MASK) || (memw > 255 || memw < -256))
|
||||
return SLJIT_ERR_UNSUPPORTED;
|
||||
|
@ -2071,7 +2302,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil
|
|||
if (!(type & SLJIT_MEM_STORE))
|
||||
inst |= 0x00400000;
|
||||
|
||||
if (type & SLJIT_MEM_PRE)
|
||||
if (!(type & SLJIT_MEM_POST))
|
||||
inst |= 0x800;
|
||||
|
||||
return push_inst(compiler, inst | VT(freg) | RN(mem & REG_MASK) | (sljit_ins)((memw & 0x1ff) << 12));
|
||||
|
|
860
thirdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
vendored
860
thirdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
vendored
File diff suppressed because it is too large
Load diff
430
thirdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
vendored
430
thirdparty/pcre2/src/sljit/sljitNativeMIPS_32.c
vendored
|
@ -38,383 +38,6 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_a
|
|||
return (imm & 0xffff) ? push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar) : SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
#define EMIT_LOGICAL(op_imm, op_norm) \
|
||||
if (flags & SRC2_IMM) { \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \
|
||||
} \
|
||||
else { \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \
|
||||
}
|
||||
|
||||
#define EMIT_SHIFT(op_imm, op_v) \
|
||||
if (flags & SRC2_IMM) { \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \
|
||||
} \
|
||||
else { \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst))); \
|
||||
}
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
|
||||
sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
|
||||
{
|
||||
sljit_s32 is_overflow, is_carry, is_handled;
|
||||
|
||||
switch (GET_OPCODE(op)) {
|
||||
case SLJIT_MOV:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if (dst != src2)
|
||||
return push_inst(compiler, ADDU | S(src2) | TA(0) | D(dst), DR(dst));
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_U8:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
|
||||
return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst));
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_S8:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst));
|
||||
#else /* SLJIT_MIPS_REV < 1 */
|
||||
FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst)));
|
||||
return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(24), DR(dst));
|
||||
#endif /* SLJIT_MIPS_REV >= 1 */
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_U16:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
|
||||
return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst));
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_S16:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst));
|
||||
#else /* SLJIT_MIPS_REV < 1 */
|
||||
FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst)));
|
||||
return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(16), DR(dst));
|
||||
#endif /* SLJIT_MIPS_REV >= 1 */
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_NOT:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst)));
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_CLZ:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, CLZ | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
FAIL_IF(push_inst(compiler, CLZ | S(src2) | T(dst) | D(dst), DR(dst)));
|
||||
#else /* SLJIT_MIPS_REV < 1 */
|
||||
if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) {
|
||||
FAIL_IF(push_inst(compiler, SRL | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG));
|
||||
return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG);
|
||||
}
|
||||
/* Nearly all instructions are unmovable in the following sequence. */
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1)));
|
||||
/* Check zero. */
|
||||
FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM(32), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(dst) | IMM(-1), DR(dst)));
|
||||
/* Loop for searching the highest bit. */
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(dst) | T(dst) | IMM(1), DR(dst)));
|
||||
FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, SLL | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS));
|
||||
#endif /* SLJIT_MIPS_REV >= 1 */
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_ADD:
|
||||
is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_overflow) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
}
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
else {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
}
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst)));
|
||||
}
|
||||
else {
|
||||
if (is_overflow)
|
||||
FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
|
||||
/* a + b >= a | b (otherwise, the carry should be set to 1). */
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
if (!is_overflow)
|
||||
return SLJIT_SUCCESS;
|
||||
FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
|
||||
|
||||
case SLJIT_ADDC:
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_carry) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
else {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
}
|
||||
}
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst)));
|
||||
} else {
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
|
||||
if (!is_carry)
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
/* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
/* Set carry flag. */
|
||||
return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
|
||||
|
||||
case SLJIT_SUB:
|
||||
if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
flags &= ~SRC2_IMM;
|
||||
}
|
||||
|
||||
is_handled = 0;
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
is_handled = 1;
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
is_handled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
|
||||
is_handled = 1;
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
flags &= ~SRC2_IMM;
|
||||
}
|
||||
|
||||
if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL)
|
||||
{
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL)
|
||||
{
|
||||
FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
}
|
||||
|
||||
if (is_handled) {
|
||||
if (flags & SRC2_IMM) {
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
return push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst));
|
||||
}
|
||||
else {
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
return push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst));
|
||||
}
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_overflow) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
}
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst)));
|
||||
}
|
||||
else {
|
||||
if (is_overflow)
|
||||
FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
|
||||
if (!is_overflow)
|
||||
return SLJIT_SUCCESS;
|
||||
FAIL_IF(push_inst(compiler, SLL | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
return push_inst(compiler, SRL | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
|
||||
|
||||
case SLJIT_SUBC:
|
||||
if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
flags &= ~SRC2_IMM;
|
||||
}
|
||||
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst)));
|
||||
}
|
||||
else {
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
|
||||
|
||||
FAIL_IF(push_inst(compiler, SUBU | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
|
||||
return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MUL:
|
||||
SLJIT_ASSERT(!(flags & SRC2_IMM));
|
||||
|
||||
if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) {
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst));
|
||||
#else /* SLJIT_MIPS_REV < 1 */
|
||||
FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS));
|
||||
return push_inst(compiler, MFLO | D(dst), DR(dst));
|
||||
#endif /* SLJIT_MIPS_REV >= 1 */
|
||||
}
|
||||
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
|
||||
FAIL_IF(push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
FAIL_IF(push_inst(compiler, MUH | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
#else /* SLJIT_MIPS_REV < 6 */
|
||||
FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst)));
|
||||
#endif /* SLJIT_MIPS_REV >= 6 */
|
||||
FAIL_IF(push_inst(compiler, SRA | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG));
|
||||
return push_inst(compiler, SUBU | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
|
||||
|
||||
case SLJIT_AND:
|
||||
EMIT_LOGICAL(ANDI, AND);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_OR:
|
||||
EMIT_LOGICAL(ORI, OR);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_XOR:
|
||||
EMIT_LOGICAL(XORI, XOR);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_SHL:
|
||||
EMIT_SHIFT(SLL, SLLV);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_LSHR:
|
||||
EMIT_SHIFT(SRL, SRLV);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_ASHR:
|
||||
EMIT_SHIFT(SRA, SRAV);
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
SLJIT_UNREACHABLE();
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
|
||||
{
|
||||
FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 16), DR(dst)));
|
||||
|
@ -573,8 +196,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
sljit_s32 arg_types)
|
||||
{
|
||||
struct sljit_jump *jump;
|
||||
sljit_u32 extra_space = (sljit_u32)type;
|
||||
sljit_ins ins;
|
||||
sljit_u32 extra_space = 0;
|
||||
sljit_ins ins = NOP;
|
||||
|
||||
CHECK_ERROR_PTR();
|
||||
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
|
||||
|
@ -583,14 +206,23 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
PTR_FAIL_IF(!jump);
|
||||
set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
|
||||
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space));
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG) {
|
||||
extra_space = (sljit_u32)type;
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space));
|
||||
} else if (type & SLJIT_CALL_RETURN)
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
|
||||
|
||||
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
|
||||
|
||||
PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0));
|
||||
if (ins == NOP && compiler->delay_slot != UNMOVABLE_INS)
|
||||
jump->flags |= IS_MOVABLE;
|
||||
|
||||
if (!(type & SLJIT_CALL_RETURN) || extra_space > 0) {
|
||||
jump->flags |= IS_JAL | IS_CALL;
|
||||
jump->flags |= IS_JAL;
|
||||
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG)
|
||||
jump->flags |= IS_CALL;
|
||||
|
||||
PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
|
||||
} else
|
||||
PTR_FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS));
|
||||
|
@ -598,6 +230,9 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
jump->addr = compiler->size;
|
||||
PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
|
||||
|
||||
/* Maximum number of instructions required for generating a constant. */
|
||||
compiler->size += 2;
|
||||
|
||||
if (extra_space == 0)
|
||||
return jump;
|
||||
|
||||
|
@ -623,16 +258,37 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
|
||||
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
|
||||
src = PIC_ADDR_REG;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
|
||||
src = PIC_ADDR_REG;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
|
||||
|
||||
if (ins != NOP)
|
||||
FAIL_IF(push_inst(compiler, ins, MOVABLE_INS));
|
||||
}
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, type, src, srcw);
|
||||
}
|
||||
|
||||
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
|
||||
|
||||
if (src & SLJIT_IMM)
|
||||
FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw));
|
||||
else if (FAST_IS_REG(src))
|
||||
else if (src != PIC_ADDR_REG)
|
||||
FAIL_IF(push_inst(compiler, ADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
|
||||
else if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
|
||||
}
|
||||
|
||||
FAIL_IF(call_with_args(compiler, arg_types, &ins, &extra_space));
|
||||
|
||||
|
|
460
thirdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
vendored
460
thirdparty/pcre2/src/sljit/sljitNativeMIPS_64.c
vendored
|
@ -118,421 +118,6 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_a
|
|||
return !(imm & 0xffff) ? SLJIT_SUCCESS : push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar);
|
||||
}
|
||||
|
||||
#define SELECT_OP(a, b) \
|
||||
(!(op & SLJIT_32) ? a : b)
|
||||
|
||||
#define EMIT_LOGICAL(op_imm, op_norm) \
|
||||
if (flags & SRC2_IMM) { \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \
|
||||
} \
|
||||
else { \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \
|
||||
}
|
||||
|
||||
#define EMIT_SHIFT(op_dimm, op_dimm32, op_imm, op_dv, op_v) \
|
||||
if (flags & SRC2_IMM) { \
|
||||
if (src2 >= 32) { \
|
||||
SLJIT_ASSERT(!(op & SLJIT_32)); \
|
||||
ins = op_dimm32; \
|
||||
src2 -= 32; \
|
||||
} \
|
||||
else \
|
||||
ins = (op & SLJIT_32) ? op_imm : op_dimm; \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, ins | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, ins | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \
|
||||
} \
|
||||
else { \
|
||||
ins = (op & SLJIT_32) ? op_v : op_dv; \
|
||||
if (op & SLJIT_SET_Z) \
|
||||
FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \
|
||||
if (!(flags & UNUSED_DEST)) \
|
||||
FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | D(dst), DR(dst))); \
|
||||
}
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
|
||||
sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
|
||||
{
|
||||
sljit_ins ins;
|
||||
sljit_s32 is_overflow, is_carry, is_handled;
|
||||
|
||||
switch (GET_OPCODE(op)) {
|
||||
case SLJIT_MOV:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if (dst != src2)
|
||||
return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(dst), DR(dst));
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_U8:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
|
||||
return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst));
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_S8:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
if (op & SLJIT_32)
|
||||
return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst));
|
||||
#endif /* SLJIT_MIPS_REV >= 1 */
|
||||
FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(24), DR(dst)));
|
||||
return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(24), DR(dst));
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_U16:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE))
|
||||
return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst));
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_S16:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
if (op & SLJIT_32)
|
||||
return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst));
|
||||
#endif /* SLJIT_MIPS_REV >= 1 */
|
||||
FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(16), DR(dst)));
|
||||
return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(16), DR(dst));
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_U32:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 2)
|
||||
if (dst == src2)
|
||||
return push_inst(compiler, DINSU | T(src2) | SA(0) | (31 << 11) | (0 << 11), DR(dst));
|
||||
#endif /* SLJIT_MIPS_REV >= 2 */
|
||||
FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(0), DR(dst)));
|
||||
return push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst));
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_S32:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM) && !(op & SLJIT_32));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
return push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(0), DR(dst));
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_NOT:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst)));
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_CLZ:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | T(dst) | D(dst), DR(dst)));
|
||||
#else /* SLJIT_MIPS_REV < 1 */
|
||||
if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) {
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG));
|
||||
return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG);
|
||||
}
|
||||
/* Nearly all instructions are unmovable in the following sequence. */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1)));
|
||||
/* Check zero. */
|
||||
FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM((op & SLJIT_32) ? 32 : 64), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | T(dst) | IMM(-1), DR(dst)));
|
||||
/* Loop for searching the highest bit. */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(dst) | T(dst) | IMM(1), DR(dst)));
|
||||
FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSLL, SLL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS));
|
||||
#endif /* SLJIT_MIPS_REV >= 1 */
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_ADD:
|
||||
is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_overflow) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
}
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
else {
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
}
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst)));
|
||||
}
|
||||
else {
|
||||
if (is_overflow)
|
||||
FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
|
||||
/* a + b >= a | b (otherwise, the carry should be set to 1). */
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
if (!is_overflow)
|
||||
return SLJIT_SUCCESS;
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
|
||||
|
||||
case SLJIT_ADDC:
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_carry) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
else {
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
}
|
||||
}
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst)));
|
||||
} else {
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
|
||||
if (!is_carry)
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
/* Set ULESS_FLAG (dst == 0) && (OTHER_FLAG == 1). */
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
/* Set carry flag. */
|
||||
return push_inst(compiler, OR | SA(OTHER_FLAG) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
|
||||
|
||||
case SLJIT_SUB:
|
||||
if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
flags &= ~SRC2_IMM;
|
||||
}
|
||||
|
||||
is_handled = 0;
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
is_handled = 1;
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLTI | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
is_handled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_handled && GET_FLAG_TYPE(op) >= SLJIT_LESS && GET_FLAG_TYPE(op) <= SLJIT_SIG_LESS_EQUAL) {
|
||||
is_handled = 1;
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
flags &= ~SRC2_IMM;
|
||||
}
|
||||
|
||||
if (GET_FLAG_TYPE(op) == SLJIT_LESS || GET_FLAG_TYPE(op) == SLJIT_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_GREATER || GET_FLAG_TYPE(op) == SLJIT_LESS_EQUAL)
|
||||
{
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_SIG_LESS || GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER_EQUAL) {
|
||||
FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
else if (GET_FLAG_TYPE(op) == SLJIT_SIG_GREATER || GET_FLAG_TYPE(op) == SLJIT_SIG_LESS_EQUAL)
|
||||
{
|
||||
FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
}
|
||||
}
|
||||
|
||||
if (is_handled) {
|
||||
if (flags & SRC2_IMM) {
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
return push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst));
|
||||
}
|
||||
else {
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
if (!(flags & UNUSED_DEST))
|
||||
return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst));
|
||||
}
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
is_overflow = GET_FLAG_TYPE(op) == SLJIT_OVERFLOW;
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_overflow) {
|
||||
if (src2 >= 0)
|
||||
FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
}
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OTHER_FLAG) | IMM(src2), OTHER_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)));
|
||||
}
|
||||
else {
|
||||
if (is_overflow)
|
||||
FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
else if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
|
||||
if (is_overflow || is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
if (!(flags & UNUSED_DEST) || (op & VARIABLE_FLAG_MASK))
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
|
||||
if (!is_overflow)
|
||||
return SLJIT_SUCCESS;
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(OTHER_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(EQUAL_FLAG) | DA(OTHER_FLAG), OTHER_FLAG));
|
||||
if (op & SLJIT_SET_Z)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OTHER_FLAG) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG);
|
||||
|
||||
case SLJIT_SUBC:
|
||||
if ((flags & SRC2_IMM) && src2 == SIMM_MIN) {
|
||||
FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
flags &= ~SRC2_IMM;
|
||||
}
|
||||
|
||||
is_carry = GET_FLAG_TYPE(op) == GET_FLAG_TYPE(SLJIT_SET_CARRY);
|
||||
|
||||
if (flags & SRC2_IMM) {
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst)));
|
||||
}
|
||||
else {
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
/* dst may be the same as src1 or src2. */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
}
|
||||
|
||||
if (is_carry)
|
||||
FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OTHER_FLAG) | D(TMP_REG1), DR(TMP_REG1)));
|
||||
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(dst) | TA(OTHER_FLAG) | D(dst), DR(dst)));
|
||||
return (is_carry) ? push_inst(compiler, OR | SA(EQUAL_FLAG) | T(TMP_REG1) | DA(OTHER_FLAG), OTHER_FLAG) : SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MUL:
|
||||
SLJIT_ASSERT(!(flags & SRC2_IMM));
|
||||
|
||||
if (GET_FLAG_TYPE(op) != SLJIT_OVERFLOW) {
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
|
||||
return push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst));
|
||||
#elif (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 1)
|
||||
if (op & SLJIT_32)
|
||||
return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst));
|
||||
FAIL_IF(push_inst(compiler, DMULT | S(src1) | T(src2), MOVABLE_INS));
|
||||
return push_inst(compiler, MFLO | D(dst), DR(dst));
|
||||
#else /* SLJIT_MIPS_REV < 1 */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS));
|
||||
return push_inst(compiler, MFLO | D(dst), DR(dst));
|
||||
#endif /* SLJIT_MIPS_REV >= 6 */
|
||||
}
|
||||
|
||||
#if (defined SLJIT_MIPS_REV && SLJIT_MIPS_REV >= 6)
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DMUL, MUL) | S(src1) | T(src2) | D(dst), DR(dst)));
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DMUH, MUH) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
#else /* SLJIT_MIPS_REV < 6 */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, MFHI | DA(EQUAL_FLAG), EQUAL_FLAG));
|
||||
FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst)));
|
||||
#endif /* SLJIT_MIPS_REV >= 6 */
|
||||
FAIL_IF(push_inst(compiler, SELECT_OP(DSRA32, SRA) | T(dst) | DA(OTHER_FLAG) | SH_IMM(31), OTHER_FLAG));
|
||||
return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(EQUAL_FLAG) | TA(OTHER_FLAG) | DA(OTHER_FLAG), OTHER_FLAG);
|
||||
|
||||
case SLJIT_AND:
|
||||
EMIT_LOGICAL(ANDI, AND);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_OR:
|
||||
EMIT_LOGICAL(ORI, OR);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_XOR:
|
||||
EMIT_LOGICAL(XORI, XOR);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_SHL:
|
||||
EMIT_SHIFT(DSLL, DSLL32, SLL, DSLLV, SLLV);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_LSHR:
|
||||
EMIT_SHIFT(DSRL, DSRL32, SRL, DSRLV, SRLV);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_ASHR:
|
||||
EMIT_SHIFT(DSRA, DSRA32, SRA, DSRAV, SRAV);
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
SLJIT_UNREACHABLE();
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
|
||||
{
|
||||
FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 48), DR(dst)));
|
||||
|
@ -653,14 +238,20 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
if (type & SLJIT_CALL_RETURN)
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
|
||||
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins));
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG)
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, &ins));
|
||||
|
||||
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
|
||||
|
||||
PTR_FAIL_IF(emit_const(compiler, PIC_ADDR_REG, 0));
|
||||
if (ins == NOP && compiler->delay_slot != UNMOVABLE_INS)
|
||||
jump->flags |= IS_MOVABLE;
|
||||
|
||||
if (!(type & SLJIT_CALL_RETURN)) {
|
||||
jump->flags |= IS_JAL | IS_CALL;
|
||||
jump->flags |= IS_JAL;
|
||||
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG)
|
||||
jump->flags |= IS_CALL;
|
||||
|
||||
PTR_FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS));
|
||||
} else
|
||||
PTR_FAIL_IF(push_inst(compiler, JR | S(PIC_ADDR_REG), UNMOVABLE_INS));
|
||||
|
@ -668,6 +259,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
jump->addr = compiler->size;
|
||||
PTR_FAIL_IF(push_inst(compiler, ins, UNMOVABLE_INS));
|
||||
|
||||
/* Maximum number of instructions required for generating a constant. */
|
||||
compiler->size += 6;
|
||||
return jump;
|
||||
}
|
||||
|
||||
|
@ -680,16 +273,37 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
|
||||
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
|
||||
src = PIC_ADDR_REG;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
if ((type & 0xff) == SLJIT_CALL_REG_ARG) {
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
|
||||
src = PIC_ADDR_REG;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
|
||||
|
||||
if (ins != NOP)
|
||||
FAIL_IF(push_inst(compiler, ins, MOVABLE_INS));
|
||||
}
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, type, src, srcw);
|
||||
}
|
||||
|
||||
SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2);
|
||||
|
||||
if (src & SLJIT_IMM)
|
||||
FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw));
|
||||
else if (FAST_IS_REG(src))
|
||||
else if (src != PIC_ADDR_REG)
|
||||
FAIL_IF(push_inst(compiler, DADDU | S(src) | TA(0) | D(PIC_ADDR_REG), DR(PIC_ADDR_REG)));
|
||||
else if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, DR(PIC_ADDR_REG), src, srcw));
|
||||
}
|
||||
|
||||
if (type & SLJIT_CALL_RETURN)
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0, &ins));
|
||||
|
|
1724
thirdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
vendored
1724
thirdparty/pcre2/src/sljit/sljitNativeMIPS_common.c
vendored
File diff suppressed because it is too large
Load diff
92
thirdparty/pcre2/src/sljit/sljitNativePPC_32.c
vendored
92
thirdparty/pcre2/src/sljit/sljitNativePPC_32.c
vendored
|
@ -38,12 +38,15 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
|
|||
return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
/* Simplified mnemonics: clrlwi. */
|
||||
#define INS_CLEAR_LEFT(dst, src, from) \
|
||||
(RLWINM | S(src) | A(dst) | ((from) << 6) | (31 << 1))
|
||||
(RLWINM | S(src) | A(dst) | RLWI_MBE(from, 31))
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
|
||||
sljit_s32 dst, sljit_s32 src1, sljit_s32 src2)
|
||||
{
|
||||
sljit_u32 imm;
|
||||
|
||||
switch (op) {
|
||||
case SLJIT_MOV:
|
||||
case SLJIT_MOV_U32:
|
||||
|
@ -90,6 +93,16 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
SLJIT_ASSERT(src1 == TMP_REG1);
|
||||
return push_inst(compiler, CNTLZW | S(src2) | A(dst));
|
||||
|
||||
case SLJIT_CTZ:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1);
|
||||
FAIL_IF(push_inst(compiler, NEG | D(TMP_REG1) | A(src2)));
|
||||
FAIL_IF(push_inst(compiler, AND | S(src2) | A(dst) | B(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, CNTLZW | S(dst) | A(dst)));
|
||||
FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG1) | A(dst) | IMM(-32)));
|
||||
/* The highest bits are set, if dst < 32, zero otherwise. */
|
||||
FAIL_IF(push_inst(compiler, SRWI(27) | S(TMP_REG1) | A(TMP_REG1)));
|
||||
return push_inst(compiler, XOR | S(dst) | A(dst) | B(TMP_REG1));
|
||||
|
||||
case SLJIT_ADD:
|
||||
if (flags & ALT_FORM1) {
|
||||
/* Setting XER SO is not enough, CR SO is also needed. */
|
||||
|
@ -103,12 +116,14 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
if (flags & ALT_FORM3)
|
||||
return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm);
|
||||
|
||||
imm = compiler->imm;
|
||||
|
||||
if (flags & ALT_FORM4) {
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))));
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((imm >> 16) & 0xffff) + ((imm >> 15) & 0x1))));
|
||||
src1 = dst;
|
||||
}
|
||||
|
||||
return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff));
|
||||
return push_inst(compiler, ADDI | D(dst) | A(src1) | (imm & 0xffff));
|
||||
}
|
||||
if (flags & ALT_FORM3) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
|
@ -208,8 +223,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
}
|
||||
if (flags & ALT_FORM3) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm)));
|
||||
return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
|
||||
imm = compiler->imm;
|
||||
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(imm)));
|
||||
return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(imm >> 16));
|
||||
}
|
||||
return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
|
@ -224,34 +241,78 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
}
|
||||
if (flags & ALT_FORM3) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm)));
|
||||
return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
|
||||
imm = compiler->imm;
|
||||
|
||||
FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(imm)));
|
||||
return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(imm >> 16));
|
||||
}
|
||||
return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_SHL:
|
||||
case SLJIT_MSHL:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
compiler->imm &= 0x1f;
|
||||
return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1));
|
||||
imm = compiler->imm & 0x1f;
|
||||
return push_inst(compiler, SLWI(imm) | RC(flags) | S(src1) | A(dst));
|
||||
}
|
||||
|
||||
if (op == SLJIT_MSHL) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, SLW | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_LSHR:
|
||||
case SLJIT_MLSHR:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
compiler->imm &= 0x1f;
|
||||
return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1));
|
||||
imm = compiler->imm & 0x1f;
|
||||
/* Since imm can be 0, SRWI() cannot be used. */
|
||||
return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | RLWI_SH((32 - imm) & 0x1f) | RLWI_MBE(imm, 31));
|
||||
}
|
||||
|
||||
if (op == SLJIT_MLSHR) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, SRW | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_ASHR:
|
||||
case SLJIT_MASHR:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
compiler->imm &= 0x1f;
|
||||
return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11));
|
||||
imm = compiler->imm & 0x1f;
|
||||
return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (imm << 11));
|
||||
}
|
||||
|
||||
if (op == SLJIT_MASHR) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, SRAW | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_ROTL:
|
||||
case SLJIT_ROTR:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
imm = compiler->imm;
|
||||
|
||||
if (op == SLJIT_ROTR)
|
||||
imm = (sljit_u32)(-(sljit_s32)imm);
|
||||
|
||||
imm &= 0x1f;
|
||||
return push_inst(compiler, RLWINM | S(src1) | A(dst) | RLWI_SH(imm) | RLWI_MBE(0, 31));
|
||||
}
|
||||
|
||||
if (op == SLJIT_ROTR) {
|
||||
FAIL_IF(push_inst(compiler, SUBFIC | D(TMP_REG2) | A(src2) | 0));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, RLWNM | S(src1) | A(dst) | B(src2) | RLWI_MBE(0, 31));
|
||||
}
|
||||
|
||||
SLJIT_UNREACHABLE();
|
||||
|
@ -277,8 +338,3 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
|
|||
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
|
||||
SLJIT_CACHE_FLUSH(inst, inst + 2);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
|
||||
{
|
||||
sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
|
||||
}
|
||||
|
|
166
thirdparty/pcre2/src/sljit/sljitNativePPC_64.c
vendored
166
thirdparty/pcre2/src/sljit/sljitNativePPC_64.c
vendored
|
@ -35,8 +35,9 @@
|
|||
#error "Must implement count leading zeroes"
|
||||
#endif
|
||||
|
||||
#define PUSH_RLDICR(reg, shift) \
|
||||
push_inst(compiler, RLDI(reg, reg, 63 - shift, shift, 1))
|
||||
/* Computes SLDI(63 - shift). */
|
||||
#define PUSH_SLDI_NEG(reg, shift) \
|
||||
push_inst(compiler, RLDICR | S(reg) | A(reg) | RLDI_SH(63 - shift) | RLDI_ME(shift))
|
||||
|
||||
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm)
|
||||
{
|
||||
|
@ -66,14 +67,14 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
|
|||
if ((tmp & ~0xffff000000000000ul) == 0) {
|
||||
FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
|
||||
shift += 15;
|
||||
return PUSH_RLDICR(reg, shift);
|
||||
return PUSH_SLDI_NEG(reg, shift);
|
||||
}
|
||||
|
||||
if ((tmp & ~0xffffffff00000000ul) == 0) {
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
|
||||
FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp >> 32)));
|
||||
shift += 31;
|
||||
return PUSH_RLDICR(reg, shift);
|
||||
return PUSH_SLDI_NEG(reg, shift);
|
||||
}
|
||||
|
||||
/* Cut out the 16 bit from immediate. */
|
||||
|
@ -82,13 +83,13 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
|
|||
|
||||
if (tmp2 <= 0xffff) {
|
||||
FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
|
||||
FAIL_IF(PUSH_RLDICR(reg, shift));
|
||||
FAIL_IF(PUSH_SLDI_NEG(reg, shift));
|
||||
return push_inst(compiler, ORI | S(reg) | A(reg) | (sljit_ins)tmp2);
|
||||
}
|
||||
|
||||
if (tmp2 <= 0xffffffff) {
|
||||
FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48)));
|
||||
FAIL_IF(PUSH_RLDICR(reg, shift));
|
||||
FAIL_IF(PUSH_SLDI_NEG(reg, shift));
|
||||
FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | (sljit_ins)(tmp2 >> 16)));
|
||||
return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp2)) : SLJIT_SUCCESS;
|
||||
}
|
||||
|
@ -100,22 +101,23 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
|
|||
FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | (sljit_ins)(tmp >> 48)));
|
||||
shift2 += 15;
|
||||
shift += (63 - shift2);
|
||||
FAIL_IF(PUSH_RLDICR(reg, shift));
|
||||
FAIL_IF(PUSH_SLDI_NEG(reg, shift));
|
||||
FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | (sljit_ins)(tmp2 >> 48)));
|
||||
return PUSH_RLDICR(reg, shift2);
|
||||
return PUSH_SLDI_NEG(reg, shift2);
|
||||
}
|
||||
|
||||
/* The general version. */
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | (sljit_ins)((sljit_uw)imm >> 48)));
|
||||
FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm >> 32)));
|
||||
FAIL_IF(PUSH_RLDICR(reg, 31));
|
||||
FAIL_IF(PUSH_SLDI_NEG(reg, 31));
|
||||
FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(imm >> 16)));
|
||||
return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm));
|
||||
}
|
||||
|
||||
/* Simplified mnemonics: clrldi. */
|
||||
#define INS_CLEAR_LEFT(dst, src, from) \
|
||||
(RLDICL | S(src) | A(dst) | ((from) << 6) | (1 << 5))
|
||||
#undef PUSH_SLDI_NEG
|
||||
|
||||
#define CLRLDI(dst, src, n) \
|
||||
(RLDICL | S(src) | A(dst) | RLDI_SH(0) | RLDI_MB(n))
|
||||
|
||||
/* Sign extension for integer operations. */
|
||||
#define UN_EXTS() \
|
||||
|
@ -145,6 +147,8 @@ static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 reg,
|
|||
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 flags,
|
||||
sljit_s32 dst, sljit_s32 src1, sljit_s32 src2)
|
||||
{
|
||||
sljit_u32 imm;
|
||||
|
||||
switch (op) {
|
||||
case SLJIT_MOV:
|
||||
case SLJIT_MOV_P:
|
||||
|
@ -159,7 +163,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
if (op == SLJIT_MOV_S32)
|
||||
return push_inst(compiler, EXTSW | S(src2) | A(dst));
|
||||
return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 0));
|
||||
return push_inst(compiler, CLRLDI(dst, src2, 32));
|
||||
}
|
||||
else {
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
|
@ -172,7 +176,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
if (op == SLJIT_MOV_S8)
|
||||
return push_inst(compiler, EXTSB | S(src2) | A(dst));
|
||||
return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24));
|
||||
return push_inst(compiler, CLRLDI(dst, src2, 56));
|
||||
}
|
||||
else if ((flags & REG_DEST) && op == SLJIT_MOV_S8)
|
||||
return push_inst(compiler, EXTSB | S(src2) | A(dst));
|
||||
|
@ -187,7 +191,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
if (op == SLJIT_MOV_S16)
|
||||
return push_inst(compiler, EXTSH | S(src2) | A(dst));
|
||||
return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16));
|
||||
return push_inst(compiler, CLRLDI(dst, src2, 48));
|
||||
}
|
||||
else {
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
|
@ -201,22 +205,30 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
|
||||
case SLJIT_CLZ:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1);
|
||||
if (flags & ALT_FORM1)
|
||||
return push_inst(compiler, CNTLZW | S(src2) | A(dst));
|
||||
return push_inst(compiler, CNTLZD | S(src2) | A(dst));
|
||||
return push_inst(compiler, ((flags & ALT_FORM1) ? CNTLZW : CNTLZD) | S(src2) | A(dst));
|
||||
|
||||
case SLJIT_CTZ:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1);
|
||||
FAIL_IF(push_inst(compiler, NEG | D(TMP_REG1) | A(src2)));
|
||||
FAIL_IF(push_inst(compiler, AND | S(src2) | A(dst) | B(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, ((flags & ALT_FORM1) ? CNTLZW : CNTLZD) | S(dst) | A(dst)));
|
||||
FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG1) | A(dst) | IMM((flags & ALT_FORM1) ? -32 : -64)));
|
||||
/* The highest bits are set, if dst < bit width, zero otherwise. */
|
||||
FAIL_IF(push_inst(compiler, ((flags & ALT_FORM1) ? SRWI(27) : SRDI(58)) | S(TMP_REG1) | A(TMP_REG1)));
|
||||
return push_inst(compiler, XOR | S(dst) | A(dst) | B(TMP_REG1));
|
||||
|
||||
case SLJIT_ADD:
|
||||
if (flags & ALT_FORM1) {
|
||||
if (flags & ALT_SIGN_EXT) {
|
||||
FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1)));
|
||||
FAIL_IF(push_inst(compiler, SLDI(32) | S(src1) | A(TMP_REG1)));
|
||||
src1 = TMP_REG1;
|
||||
FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1)));
|
||||
FAIL_IF(push_inst(compiler, SLDI(32) | S(src2) | A(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
/* Setting XER SO is not enough, CR SO is also needed. */
|
||||
FAIL_IF(push_inst(compiler, ADD | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)));
|
||||
if (flags & ALT_SIGN_EXT)
|
||||
return push_inst(compiler, RLDI(dst, dst, 32, 32, 0));
|
||||
return push_inst(compiler, SRDI(32) | S(dst) | A(dst));
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -227,12 +239,14 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
if (flags & ALT_FORM3)
|
||||
return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm);
|
||||
|
||||
imm = compiler->imm;
|
||||
|
||||
if (flags & ALT_FORM4) {
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))));
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(dst) | A(src1) | (((imm >> 16) & 0xffff) + ((imm >> 15) & 0x1))));
|
||||
src1 = dst;
|
||||
}
|
||||
|
||||
return push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff));
|
||||
return push_inst(compiler, ADDI | D(dst) | A(src1) | (imm & 0xffff));
|
||||
}
|
||||
if (flags & ALT_FORM3) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
|
@ -287,11 +301,11 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
if (flags & ALT_FORM3) {
|
||||
if (flags & ALT_SIGN_EXT) {
|
||||
if (src1 != TMP_ZERO) {
|
||||
FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, src1, 32, 31, 1)));
|
||||
FAIL_IF(push_inst(compiler, SLDI(32) | S(src1) | A(TMP_REG1)));
|
||||
src1 = TMP_REG1;
|
||||
}
|
||||
if (src2 != TMP_ZERO) {
|
||||
FAIL_IF(push_inst(compiler, RLDI(TMP_REG2, src2, 32, 31, 1)));
|
||||
FAIL_IF(push_inst(compiler, SLDI(32) | S(src2) | A(TMP_REG2)));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
}
|
||||
|
@ -303,7 +317,7 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
FAIL_IF(push_inst(compiler, NEG | OE(ALT_SET_FLAGS) | RC(ALT_SET_FLAGS) | D(dst) | A(src2)));
|
||||
|
||||
if (flags & ALT_SIGN_EXT)
|
||||
return push_inst(compiler, RLDI(dst, dst, 32, 32, 0));
|
||||
return push_inst(compiler, SRDI(32) | S(dst) | A(dst));
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -362,8 +376,10 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
}
|
||||
if (flags & ALT_FORM3) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm)));
|
||||
return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
|
||||
imm = compiler->imm;
|
||||
|
||||
FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(imm)));
|
||||
return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(imm >> 16));
|
||||
}
|
||||
return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
|
@ -378,46 +394,105 @@ static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sl
|
|||
}
|
||||
if (flags & ALT_FORM3) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm)));
|
||||
return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16));
|
||||
imm = compiler->imm;
|
||||
|
||||
FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(imm)));
|
||||
return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(imm >> 16));
|
||||
}
|
||||
return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_SHL:
|
||||
case SLJIT_MSHL:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
imm = compiler->imm;
|
||||
|
||||
if (flags & ALT_FORM2) {
|
||||
compiler->imm &= 0x1f;
|
||||
return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1));
|
||||
imm &= 0x1f;
|
||||
return push_inst(compiler, SLWI(imm) | RC(flags) | S(src1) | A(dst));
|
||||
}
|
||||
compiler->imm &= 0x3f;
|
||||
return push_inst(compiler, RLDI(dst, src1, compiler->imm, 63 - compiler->imm, 1) | RC(flags));
|
||||
|
||||
imm &= 0x3f;
|
||||
return push_inst(compiler, SLDI(imm) | RC(flags) | S(src1) | A(dst));
|
||||
}
|
||||
|
||||
if (op == SLJIT_MSHL) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f)));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, ((flags & ALT_FORM2) ? SLW : SLD) | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_LSHR:
|
||||
case SLJIT_MLSHR:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
imm = compiler->imm;
|
||||
|
||||
if (flags & ALT_FORM2) {
|
||||
compiler->imm &= 0x1f;
|
||||
return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1));
|
||||
imm &= 0x1f;
|
||||
/* Since imm can be 0, SRWI() cannot be used. */
|
||||
return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | RLWI_SH((32 - imm) & 0x1f) | RLWI_MBE(imm, 31));
|
||||
}
|
||||
compiler->imm &= 0x3f;
|
||||
return push_inst(compiler, RLDI(dst, src1, 64 - compiler->imm, compiler->imm, 0) | RC(flags));
|
||||
|
||||
imm &= 0x3f;
|
||||
/* Since imm can be 0, SRDI() cannot be used. */
|
||||
return push_inst(compiler, RLDICL | RC(flags) | S(src1) | A(dst) | RLDI_SH((64 - imm) & 0x3f) | RLDI_MB(imm));
|
||||
}
|
||||
|
||||
if (op == SLJIT_MLSHR) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f)));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, ((flags & ALT_FORM2) ? SRW : SRD) | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_ASHR:
|
||||
case SLJIT_MASHR:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
imm = compiler->imm;
|
||||
|
||||
if (flags & ALT_FORM2) {
|
||||
compiler->imm &= 0x1f;
|
||||
return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11));
|
||||
imm &= 0x1f;
|
||||
return push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (imm << 11));
|
||||
}
|
||||
compiler->imm &= 0x3f;
|
||||
return push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | ((compiler->imm & 0x1f) << 11) | ((compiler->imm & 0x20) >> 4));
|
||||
|
||||
imm &= 0x3f;
|
||||
return push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | RLDI_SH(imm));
|
||||
}
|
||||
|
||||
if (op == SLJIT_MASHR) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | ((flags & ALT_FORM2) ? 0x1f : 0x3f)));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, ((flags & ALT_FORM2) ? SRAW : SRAD) | RC(flags) | S(src1) | A(dst) | B(src2));
|
||||
|
||||
case SLJIT_ROTL:
|
||||
case SLJIT_ROTR:
|
||||
if (flags & ALT_FORM1) {
|
||||
SLJIT_ASSERT(src2 == TMP_REG2);
|
||||
imm = compiler->imm;
|
||||
|
||||
if (op == SLJIT_ROTR)
|
||||
imm = (sljit_u32)(-(sljit_s32)imm);
|
||||
|
||||
if (flags & ALT_FORM2) {
|
||||
imm &= 0x1f;
|
||||
return push_inst(compiler, RLWINM | S(src1) | A(dst) | RLWI_SH(imm) | RLWI_MBE(0, 31));
|
||||
}
|
||||
|
||||
imm &= 0x3f;
|
||||
return push_inst(compiler, RLDICL | S(src1) | A(dst) | RLDI_SH(imm));
|
||||
}
|
||||
|
||||
if (op == SLJIT_ROTR) {
|
||||
FAIL_IF(push_inst(compiler, SUBFIC | D(TMP_REG2) | A(src2) | 0));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, ((flags & ALT_FORM2) ? (RLWNM | RLWI_MBE(0, 31)) : (RLDCL | RLDI_MB(0))) | S(src1) | A(dst) | B(src2));
|
||||
}
|
||||
|
||||
SLJIT_UNREACHABLE();
|
||||
|
@ -483,7 +558,7 @@ static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_
|
|||
{
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 48)));
|
||||
FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value >> 32)));
|
||||
FAIL_IF(PUSH_RLDICR(reg, 31));
|
||||
FAIL_IF(push_inst(compiler, SLDI(32) | S(reg) | A(reg)));
|
||||
FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(init_value >> 16)));
|
||||
return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value));
|
||||
}
|
||||
|
@ -502,8 +577,3 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
|
|||
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
|
||||
SLJIT_CACHE_FLUSH(inst, inst + 5);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
|
||||
{
|
||||
sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
|
||||
}
|
||||
|
|
506
thirdparty/pcre2/src/sljit/sljitNativePPC_common.c
vendored
506
thirdparty/pcre2/src/sljit/sljitNativePPC_common.c
vendored
|
@ -203,8 +203,13 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
|
|||
#define OR (HI(31) | LO(444))
|
||||
#define ORI (HI(24))
|
||||
#define ORIS (HI(25))
|
||||
#define RLDICL (HI(30))
|
||||
#define RLDCL (HI(30) | LO(8))
|
||||
#define RLDICL (HI(30) | LO(0 << 1))
|
||||
#define RLDICR (HI(30) | LO(1 << 1))
|
||||
#define RLDIMI (HI(30) | LO(3 << 1))
|
||||
#define RLWIMI (HI(20))
|
||||
#define RLWINM (HI(21))
|
||||
#define RLWNM (HI(23))
|
||||
#define SLD (HI(31) | LO(27))
|
||||
#define SLW (HI(31) | LO(24))
|
||||
#define SRAD (HI(31) | LO(794))
|
||||
|
@ -233,9 +238,24 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
|
|||
#define SIMM_MIN (-0x8000)
|
||||
#define UIMM_MAX (0xffff)
|
||||
|
||||
#define RLDI(dst, src, sh, mb, type) \
|
||||
(HI(30) | S(src) | A(dst) | ((sljit_ins)(type) << 2) | (((sljit_ins)(sh) & 0x1f) << 11) \
|
||||
| (((sljit_ins)(sh) & 0x20) >> 4) | (((sljit_ins)(mb) & 0x1f) << 6) | ((sljit_ins)(mb) & 0x20))
|
||||
/* Shift helpers. */
|
||||
#define RLWI_SH(sh) ((sljit_ins)(sh) << 11)
|
||||
#define RLWI_MBE(mb, me) (((sljit_ins)(mb) << 6) | ((sljit_ins)(me) << 1))
|
||||
#define RLDI_SH(sh) ((((sljit_ins)(sh) & 0x1f) << 11) | (((sljit_ins)(sh) & 0x20) >> 4))
|
||||
#define RLDI_MB(mb) ((((sljit_ins)(mb) & 0x1f) << 6) | ((sljit_ins)(mb) & 0x20))
|
||||
#define RLDI_ME(me) RLDI_MB(me)
|
||||
|
||||
#define SLWI(shift) (RLWINM | RLWI_SH(shift) | RLWI_MBE(0, 31 - (shift)))
|
||||
#define SLDI(shift) (RLDICR | RLDI_SH(shift) | RLDI_ME(63 - (shift)))
|
||||
/* shift > 0 */
|
||||
#define SRWI(shift) (RLWINM | RLWI_SH(32 - (shift)) | RLWI_MBE((shift), 31))
|
||||
#define SRDI(shift) (RLDICL | RLDI_SH(64 - (shift)) | RLDI_MB(shift))
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
|
||||
#define SLWI_W(shift) SLWI(shift)
|
||||
#else /* !SLJIT_CONFIG_PPC_32 */
|
||||
#define SLWI_W(shift) SLDI(shift)
|
||||
#endif /* SLJIT_CONFIG_PPC_32 */
|
||||
|
||||
#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL)
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_uw addr, void* func)
|
||||
|
@ -368,10 +388,10 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label)
|
|||
else {
|
||||
inst[0] = ORIS | S(TMP_ZERO) | A(reg) | IMM(addr >> 48);
|
||||
inst[1] = ORI | S(reg) | A(reg) | IMM((addr >> 32) & 0xffff);
|
||||
inst ++;
|
||||
inst++;
|
||||
}
|
||||
|
||||
inst[1] = RLDI(reg, reg, 32, 31, 1);
|
||||
inst[1] = SLDI(32) | S(reg) | A(reg);
|
||||
inst[2] = ORIS | S(reg) | A(reg) | IMM((addr >> 16) & 0xffff);
|
||||
inst += 2;
|
||||
}
|
||||
|
@ -379,7 +399,7 @@ static SLJIT_INLINE void put_label_set(struct sljit_put_label *put_label)
|
|||
inst[1] = ORI | S(reg) | A(reg) | IMM(addr & 0xffff);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler)
|
||||
{
|
||||
|
@ -497,8 +517,8 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
|
|||
}
|
||||
next_addr = compute_next_addr(label, jump, const_, put_label);
|
||||
}
|
||||
code_ptr ++;
|
||||
word_count ++;
|
||||
code_ptr++;
|
||||
word_count++;
|
||||
} while (buf_ptr < buf_end);
|
||||
|
||||
buf = buf->next;
|
||||
|
@ -641,14 +661,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
|
|||
/* A saved register is set to a zero value. */
|
||||
case SLJIT_HAS_ZERO_REGISTER:
|
||||
case SLJIT_HAS_CLZ:
|
||||
case SLJIT_HAS_ROT:
|
||||
case SLJIT_HAS_PREFETCH:
|
||||
return 1;
|
||||
|
||||
case SLJIT_HAS_CTZ:
|
||||
return 2;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
|
||||
{
|
||||
return (type >= SLJIT_UNORDERED && type <= SLJIT_ORDERED_LESS_EQUAL);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Entry, exit */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
@ -715,13 +744,16 @@ ALT_FORM5 0x010000 */
|
|||
|
||||
#define STACK_MAX_DISTANCE (0x8000 - SSIZE_OF(sw) - LR_SAVE_OFFSET)
|
||||
|
||||
static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flags, sljit_s32 reg,
|
||||
sljit_s32 arg, sljit_sw argw, sljit_s32 tmp_reg);
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler,
|
||||
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
|
||||
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
|
||||
{
|
||||
sljit_s32 i, tmp, base, offset;
|
||||
sljit_s32 word_arg_count = 0;
|
||||
sljit_s32 saved_arg_count = 0;
|
||||
sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
sljit_s32 arg_count = 0;
|
||||
#endif
|
||||
|
@ -730,8 +762,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
|
||||
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
|
||||
|
||||
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1)
|
||||
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 0)
|
||||
+ GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64));
|
||||
|
||||
if (!(options & SLJIT_ENTER_REG_ARG))
|
||||
local_size += SSIZE_OF(sw);
|
||||
|
||||
local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
|
||||
compiler->local_size = local_size;
|
||||
|
||||
|
@ -770,11 +806,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
FAIL_IF(push_inst(compiler, STFD | FS(i) | A(base) | IMM(offset)));
|
||||
}
|
||||
|
||||
offset -= SSIZE_OF(sw);
|
||||
FAIL_IF(push_inst(compiler, STACK_STORE | S(TMP_ZERO) | A(base) | IMM(offset)));
|
||||
if (!(options & SLJIT_ENTER_REG_ARG)) {
|
||||
offset -= SSIZE_OF(sw);
|
||||
FAIL_IF(push_inst(compiler, STACK_STORE | S(TMP_ZERO) | A(base) | IMM(offset)));
|
||||
}
|
||||
|
||||
tmp = SLJIT_S0 - saveds;
|
||||
for (i = SLJIT_S0; i > tmp; i--) {
|
||||
for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
|
||||
offset -= SSIZE_OF(sw);
|
||||
FAIL_IF(push_inst(compiler, STACK_STORE | S(i) | A(base) | IMM(offset)));
|
||||
}
|
||||
|
@ -785,9 +823,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(base) | IMM(local_size + LR_SAVE_OFFSET)));
|
||||
|
||||
if (options & SLJIT_ENTER_REG_ARG)
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
FAIL_IF(push_inst(compiler, ADDI | D(TMP_ZERO) | A(0) | 0));
|
||||
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
saved_arg_count = 0;
|
||||
|
||||
while (arg_types > 0) {
|
||||
if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
|
||||
|
@ -829,14 +872,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
|
|||
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
|
||||
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
|
||||
|
||||
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1)
|
||||
local_size += GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 0)
|
||||
+ GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, sizeof(sljit_f64));
|
||||
|
||||
if (!(options & SLJIT_ENTER_REG_ARG))
|
||||
local_size += SSIZE_OF(sw);
|
||||
|
||||
compiler->local_size = (local_size + SLJIT_LOCALS_OFFSET + 15) & ~0xf;
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
|
||||
{
|
||||
sljit_s32 i, tmp, base, offset;
|
||||
sljit_s32 local_size = compiler->local_size;
|
||||
|
@ -854,7 +900,8 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
}
|
||||
|
||||
offset = local_size;
|
||||
FAIL_IF(push_inst(compiler, STACK_LOAD | S(0) | A(base) | IMM(offset + LR_SAVE_OFFSET)));
|
||||
if (!is_return_to)
|
||||
FAIL_IF(push_inst(compiler, STACK_LOAD | S(0) | A(base) | IMM(offset + LR_SAVE_OFFSET)));
|
||||
|
||||
tmp = SLJIT_FS0 - compiler->fsaveds;
|
||||
for (i = SLJIT_FS0; i > tmp; i--) {
|
||||
|
@ -867,11 +914,13 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
FAIL_IF(push_inst(compiler, LFD | FS(i) | A(base) | IMM(offset)));
|
||||
}
|
||||
|
||||
offset -= SSIZE_OF(sw);
|
||||
FAIL_IF(push_inst(compiler, STACK_LOAD | S(TMP_ZERO) | A(base) | IMM(offset)));
|
||||
if (!(compiler->options & SLJIT_ENTER_REG_ARG)) {
|
||||
offset -= SSIZE_OF(sw);
|
||||
FAIL_IF(push_inst(compiler, STACK_LOAD | S(TMP_ZERO) | A(base) | IMM(offset)));
|
||||
}
|
||||
|
||||
tmp = SLJIT_S0 - compiler->saveds;
|
||||
for (i = SLJIT_S0; i > tmp; i--) {
|
||||
for (i = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options); i > tmp; i--) {
|
||||
offset -= SSIZE_OF(sw);
|
||||
FAIL_IF(push_inst(compiler, STACK_LOAD | S(i) | A(base) | IMM(offset)));
|
||||
}
|
||||
|
@ -881,7 +930,8 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
FAIL_IF(push_inst(compiler, STACK_LOAD | S(i) | A(base) | IMM(offset)));
|
||||
}
|
||||
|
||||
push_inst(compiler, MTLR | S(0));
|
||||
if (!is_return_to)
|
||||
push_inst(compiler, MTLR | S(0));
|
||||
|
||||
if (local_size > 0)
|
||||
return push_inst(compiler, ADDI | D(SLJIT_SP) | A(base) | IMM(local_size));
|
||||
|
@ -890,17 +940,40 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
return push_inst(compiler, OR | S(base) | A(SLJIT_SP) | B(base));
|
||||
}
|
||||
|
||||
#undef STACK_STORE
|
||||
#undef STACK_LOAD
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
|
||||
{
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_void(compiler));
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
return push_inst(compiler, BLR);
|
||||
}
|
||||
|
||||
#undef STACK_STORE
|
||||
#undef STACK_LOAD
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_to(compiler, src, srcw));
|
||||
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG));
|
||||
src = TMP_CALL_REG;
|
||||
srcw = 0;
|
||||
} else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src)));
|
||||
src = TMP_CALL_REG;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 1));
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Operators */
|
||||
|
@ -1066,7 +1139,6 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
|
|||
{
|
||||
sljit_ins inst;
|
||||
sljit_s32 offs_reg;
|
||||
sljit_sw high_short;
|
||||
|
||||
/* Should work when (arg & REG_MASK) == 0. */
|
||||
SLJIT_ASSERT(A(0) == 0);
|
||||
|
@ -1077,11 +1149,7 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
|
|||
offs_reg = OFFS_REG(arg);
|
||||
|
||||
if (argw != 0) {
|
||||
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
|
||||
FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(arg)) | A(tmp_reg) | ((sljit_ins)argw << 11) | ((31 - (sljit_ins)argw) << 1)));
|
||||
#else
|
||||
FAIL_IF(push_inst(compiler, RLDI(tmp_reg, OFFS_REG(arg), argw, 63 - argw, 1)));
|
||||
#endif
|
||||
FAIL_IF(push_inst(compiler, SLWI_W(argw) | S(OFFS_REG(arg)) | A(tmp_reg)));
|
||||
offs_reg = tmp_reg;
|
||||
}
|
||||
|
||||
|
@ -1089,7 +1157,7 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
|
|||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
SLJIT_ASSERT(!(inst & INT_ALIGNED));
|
||||
#endif
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
|
||||
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & REG_MASK) | B(offs_reg));
|
||||
}
|
||||
|
@ -1104,36 +1172,24 @@ static sljit_s32 emit_op_mem(struct sljit_compiler *compiler, sljit_s32 inp_flag
|
|||
inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
|
||||
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg));
|
||||
}
|
||||
#endif
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
|
||||
if (argw <= SIMM_MAX && argw >= SIMM_MIN)
|
||||
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | IMM(argw));
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
if (argw <= 0x7fff7fffl && argw >= -0x80000000l) {
|
||||
#endif
|
||||
|
||||
high_short = (sljit_s32)(argw + ((argw & 0x8000) << 1)) & ~0xffff;
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
SLJIT_ASSERT(high_short && high_short <= 0x7fffffffl && high_short >= -0x80000000l);
|
||||
#else
|
||||
SLJIT_ASSERT(high_short);
|
||||
#endif
|
||||
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM(high_short >> 16)));
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(tmp_reg) | A(arg) | IMM((argw + 0x8000) >> 16)));
|
||||
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_reg) | IMM(argw));
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
}
|
||||
|
||||
/* The rest is PPC-64 only. */
|
||||
|
||||
FAIL_IF(load_immediate(compiler, tmp_reg, argw));
|
||||
|
||||
inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK];
|
||||
return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg) | B(tmp_reg));
|
||||
#endif
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
}
|
||||
|
||||
static sljit_s32 emit_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 input_flags,
|
||||
|
@ -1273,11 +1329,7 @@ static sljit_s32 emit_prefetch(struct sljit_compiler *compiler,
|
|||
if (srcw == 0)
|
||||
return push_inst(compiler, DCBT | A(src & REG_MASK) | B(OFFS_REG(src)));
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
|
||||
FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(src)) | A(TMP_REG1) | ((sljit_ins)srcw << 11) | ((31 - (sljit_ins)srcw) << 1)));
|
||||
#else
|
||||
FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(src), srcw, 63 - srcw, 1)));
|
||||
#endif
|
||||
FAIL_IF(push_inst(compiler, SLWI_W(srcw) | S(OFFS_REG(src)) | A(TMP_REG1)));
|
||||
return push_inst(compiler, DCBT | A(src & REG_MASK) | B(TMP_REG1));
|
||||
}
|
||||
|
||||
|
@ -1362,10 +1414,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
|
|||
return emit_op(compiler, SLJIT_NOT, flags, dst, dstw, TMP_REG1, 0, src, srcw);
|
||||
|
||||
case SLJIT_CLZ:
|
||||
case SLJIT_CTZ:
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
return emit_op(compiler, SLJIT_CLZ, flags | (!(op_flags & SLJIT_32) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw);
|
||||
return emit_op(compiler, op, flags | (!(op_flags & SLJIT_32) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw);
|
||||
#else
|
||||
return emit_op(compiler, SLJIT_CLZ, flags, dst, dstw, TMP_REG1, 0, src, srcw);
|
||||
return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1626,7 +1679,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
|
|||
return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0);
|
||||
}
|
||||
}
|
||||
if (GET_OPCODE(op) != SLJIT_AND) {
|
||||
if (!HAS_FLAGS(op) && GET_OPCODE(op) != SLJIT_AND) {
|
||||
/* Unlike or and xor, the and resets unwanted bits as well. */
|
||||
if (TEST_UI_IMM(src2, src2w)) {
|
||||
compiler->imm = (sljit_ins)src2w;
|
||||
|
@ -1640,8 +1693,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
|
|||
return emit_op(compiler, GET_OPCODE(op), flags, dst, dstw, src1, src1w, src2, src2w);
|
||||
|
||||
case SLJIT_SHL:
|
||||
case SLJIT_MSHL:
|
||||
case SLJIT_LSHR:
|
||||
case SLJIT_MLSHR:
|
||||
case SLJIT_ASHR:
|
||||
case SLJIT_MASHR:
|
||||
case SLJIT_ROTL:
|
||||
case SLJIT_ROTR:
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
if (op & SLJIT_32)
|
||||
flags |= ALT_FORM2;
|
||||
|
@ -1663,10 +1721,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, op, TMP_REG2, 0, src1, src1w, src2, src2w);
|
||||
}
|
||||
|
||||
|
@ -1674,6 +1729,102 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil
|
|||
#undef TEST_SUB_FORM2
|
||||
#undef TEST_SUB_FORM3
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 src_dst,
|
||||
sljit_s32 src1, sljit_sw src1w,
|
||||
sljit_s32 src2, sljit_sw src2w)
|
||||
{
|
||||
sljit_s32 is_right;
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
sljit_s32 inp_flags = ((op & SLJIT_32) ? INT_DATA : WORD_DATA) | LOAD_DATA;
|
||||
sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64;
|
||||
#else /* !SLJIT_CONFIG_PPC_64 */
|
||||
sljit_s32 inp_flags = WORD_DATA | LOAD_DATA;
|
||||
sljit_sw bit_length = 32;
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w));
|
||||
|
||||
is_right = (GET_OPCODE(op) == SLJIT_LSHR || GET_OPCODE(op) == SLJIT_MLSHR);
|
||||
|
||||
if (src_dst == src1) {
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, (is_right ? SLJIT_ROTR : SLJIT_ROTL) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w);
|
||||
}
|
||||
|
||||
ADJUST_LOCAL_OFFSET(src1, src1w);
|
||||
ADJUST_LOCAL_OFFSET(src2, src2w);
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
src2w &= bit_length - 1;
|
||||
|
||||
if (src2w == 0)
|
||||
return SLJIT_SUCCESS;
|
||||
} else if (src2 & SLJIT_MEM) {
|
||||
FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG2, src2, src2w, TMP_REG2));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
if (src1 & SLJIT_MEM) {
|
||||
FAIL_IF(emit_op_mem(compiler, inp_flags, TMP_REG1, src1, src1w, TMP_REG1));
|
||||
src1 = TMP_REG1;
|
||||
} else if (src1 & SLJIT_IMM) {
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, src1w));
|
||||
src1 = TMP_REG1;
|
||||
}
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
if (!(op & SLJIT_32)) {
|
||||
if (is_right) {
|
||||
FAIL_IF(push_inst(compiler, SRDI(src2w) | S(src_dst) | A(src_dst)));
|
||||
return push_inst(compiler, RLDIMI | S(src1) | A(src_dst) | RLDI_SH(64 - src2w) | RLDI_MB(0));
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, SLDI(src2w) | S(src_dst) | A(src_dst)));
|
||||
/* Computes SRDI(64 - src2w). */
|
||||
FAIL_IF(push_inst(compiler, RLDICL | S(src1) | A(TMP_REG1) | RLDI_SH(src2w) | RLDI_MB(64 - src2w)));
|
||||
return push_inst(compiler, OR | S(src_dst) | A(src_dst) | B(TMP_REG1));
|
||||
}
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
|
||||
if (is_right) {
|
||||
FAIL_IF(push_inst(compiler, SRWI(src2w) | S(src_dst) | A(src_dst)));
|
||||
return push_inst(compiler, RLWIMI | S(src1) | A(src_dst) | RLWI_SH(32 - src2w) | RLWI_MBE(0, src2w - 1));
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, SLWI(src2w) | S(src_dst) | A(src_dst)));
|
||||
return push_inst(compiler, RLWIMI | S(src1) | A(src_dst) | RLWI_SH(src2w) | RLWI_MBE(32 - src2w, 31));
|
||||
}
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
if (!(op & SLJIT_32)) {
|
||||
if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x3f));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, (is_right ? SRD : SLD) | S(src_dst) | A(src_dst) | B(src2)));
|
||||
FAIL_IF(push_inst(compiler, (is_right ? SLDI(1) : SRDI(1)) | S(src1) | A(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, XORI | S(src2) | A(TMP_REG2) | 0x3f));
|
||||
FAIL_IF(push_inst(compiler, (is_right ? SLD : SRD) | S(TMP_REG1) | A(TMP_REG1) | B(TMP_REG2)));
|
||||
return push_inst(compiler, OR | S(src_dst) | A(src_dst) | B(TMP_REG1));
|
||||
}
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
|
||||
if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR) {
|
||||
FAIL_IF(push_inst(compiler, ANDI | S(src2) | A(TMP_REG2) | 0x1f));
|
||||
src2 = TMP_REG2;
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, (is_right ? SRW : SLW) | S(src_dst) | A(src_dst) | B(src2)));
|
||||
FAIL_IF(push_inst(compiler, (is_right ? SLWI(1) : SRWI(1)) | S(src1) | A(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, XORI | S(src2) | A(TMP_REG2) | 0x1f));
|
||||
FAIL_IF(push_inst(compiler, (is_right ? SLW : SRW) | S(TMP_REG1) | A(TMP_REG1) | B(TMP_REG2)));
|
||||
return push_inst(compiler, OR | S(src_dst) | A(src_dst) | B(TMP_REG1));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
|
@ -1686,7 +1837,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
|
|||
if (FAST_IS_REG(src))
|
||||
FAIL_IF(push_inst(compiler, MTLR | S(src)));
|
||||
else {
|
||||
FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw));
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw, TMP_REG2));
|
||||
FAIL_IF(push_inst(compiler, MTLR | S(TMP_REG2)));
|
||||
}
|
||||
|
||||
|
@ -1782,11 +1933,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
|
|||
if (dst & OFFS_REG_MASK) {
|
||||
dstw &= 0x3;
|
||||
if (dstw) {
|
||||
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
|
||||
FAIL_IF(push_inst(compiler, RLWINM | S(OFFS_REG(dst)) | A(TMP_REG1) | ((sljit_ins)dstw << 11) | ((31 - (sljit_ins)dstw) << 1)));
|
||||
#else
|
||||
FAIL_IF(push_inst(compiler, RLDI(TMP_REG1, OFFS_REG(dst), dstw, 63 - dstw, 1)));
|
||||
#endif
|
||||
FAIL_IF(push_inst(compiler, SLWI_W(dstw) | S(OFFS_REG(dst)) | A(TMP_REG1)));
|
||||
dstw = TMP_REG1;
|
||||
}
|
||||
else
|
||||
|
@ -1818,6 +1965,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
|
|||
if (src & SLJIT_IMM) {
|
||||
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
|
||||
srcw = (sljit_s32)srcw;
|
||||
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
|
||||
src = TMP_REG1;
|
||||
}
|
||||
|
@ -1863,7 +2011,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
|
|||
The double precision format has exactly 53 bit precision, so the lower 32 bit represents
|
||||
the lower 32 bit of such value. The result of xor 2^31 is the same as adding 0x80000000
|
||||
to the input, which shifts it into the 0 - 0xffffffff range. To get the converted floating
|
||||
point value, we need to substract 2^53 + 2^31 from the constructed value. */
|
||||
point value, we need to subtract 2^53 + 2^31 from the constructed value. */
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG2) | A(0) | 0x4330));
|
||||
if (invert_sign)
|
||||
FAIL_IF(push_inst(compiler, XORIS | S(src) | A(TMP_REG1) | 0x8000));
|
||||
|
@ -1899,7 +2047,21 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
|
|||
src2 = TMP_FREG2;
|
||||
}
|
||||
|
||||
return push_inst(compiler, FCMPU | CRD(4) | FA(src1) | FB(src2));
|
||||
FAIL_IF(push_inst(compiler, FCMPU | CRD(4) | FA(src1) | FB(src2)));
|
||||
|
||||
switch (GET_FLAG_TYPE(op)) {
|
||||
case SLJIT_UNORDERED_OR_EQUAL:
|
||||
case SLJIT_ORDERED_NOT_EQUAL:
|
||||
return push_inst(compiler, CROR | ((4 + 2) << 21) | ((4 + 2) << 16) | ((4 + 3) << 11));
|
||||
case SLJIT_UNORDERED_OR_LESS:
|
||||
case SLJIT_ORDERED_GREATER_EQUAL:
|
||||
return push_inst(compiler, CROR | ((4 + 0) << 21) | ((4 + 0) << 16) | ((4 + 3) << 11));
|
||||
case SLJIT_UNORDERED_OR_GREATER:
|
||||
case SLJIT_ORDERED_LESS_EQUAL:
|
||||
return push_inst(compiler, CROR | ((4 + 1) << 21) | ((4 + 1) << 16) | ((4 + 3) << 11));
|
||||
}
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
|
@ -2076,38 +2238,50 @@ static sljit_ins get_bo_bi_flags(struct sljit_compiler *compiler, sljit_s32 type
|
|||
case SLJIT_SIG_LESS_EQUAL:
|
||||
return (4 << 21) | (1 << 16);
|
||||
|
||||
case SLJIT_LESS_F64:
|
||||
return (12 << 21) | ((4 + 0) << 16);
|
||||
|
||||
case SLJIT_GREATER_EQUAL_F64:
|
||||
return (4 << 21) | ((4 + 0) << 16);
|
||||
|
||||
case SLJIT_GREATER_F64:
|
||||
return (12 << 21) | ((4 + 1) << 16);
|
||||
|
||||
case SLJIT_LESS_EQUAL_F64:
|
||||
return (4 << 21) | ((4 + 1) << 16);
|
||||
|
||||
case SLJIT_OVERFLOW:
|
||||
return (12 << 21) | (3 << 16);
|
||||
|
||||
case SLJIT_NOT_OVERFLOW:
|
||||
return (4 << 21) | (3 << 16);
|
||||
|
||||
case SLJIT_EQUAL_F64:
|
||||
case SLJIT_F_LESS:
|
||||
case SLJIT_ORDERED_LESS:
|
||||
case SLJIT_UNORDERED_OR_LESS:
|
||||
return (12 << 21) | ((4 + 0) << 16);
|
||||
|
||||
case SLJIT_F_GREATER_EQUAL:
|
||||
case SLJIT_ORDERED_GREATER_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_GREATER_EQUAL:
|
||||
return (4 << 21) | ((4 + 0) << 16);
|
||||
|
||||
case SLJIT_F_GREATER:
|
||||
case SLJIT_ORDERED_GREATER:
|
||||
case SLJIT_UNORDERED_OR_GREATER:
|
||||
return (12 << 21) | ((4 + 1) << 16);
|
||||
|
||||
case SLJIT_F_LESS_EQUAL:
|
||||
case SLJIT_ORDERED_LESS_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_LESS_EQUAL:
|
||||
return (4 << 21) | ((4 + 1) << 16);
|
||||
|
||||
case SLJIT_F_EQUAL:
|
||||
case SLJIT_ORDERED_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_EQUAL:
|
||||
return (12 << 21) | ((4 + 2) << 16);
|
||||
|
||||
case SLJIT_NOT_EQUAL_F64:
|
||||
case SLJIT_F_NOT_EQUAL:
|
||||
case SLJIT_ORDERED_NOT_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_NOT_EQUAL:
|
||||
return (4 << 21) | ((4 + 2) << 16);
|
||||
|
||||
case SLJIT_UNORDERED_F64:
|
||||
case SLJIT_UNORDERED:
|
||||
return (12 << 21) | ((4 + 3) << 16);
|
||||
|
||||
case SLJIT_ORDERED_F64:
|
||||
case SLJIT_ORDERED:
|
||||
return (4 << 21) | ((4 + 3) << 16);
|
||||
|
||||
default:
|
||||
SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_CDECL);
|
||||
SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL_REG_ARG);
|
||||
return (20 << 21);
|
||||
}
|
||||
}
|
||||
|
@ -2154,19 +2328,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG)
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
|
||||
#endif
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler));
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_jump(compiler, type);
|
||||
}
|
||||
|
||||
|
@ -2177,7 +2348,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
|
|||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_ijump(compiler, type, src, srcw));
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
|
||||
if (FAST_IS_REG(src)) {
|
||||
#if (defined SLJIT_PASS_ENTRY_ADDR_TO_CALL && SLJIT_PASS_ENTRY_ADDR_TO_CALL)
|
||||
|
@ -2204,9 +2374,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
|
|||
|
||||
FAIL_IF(emit_const(compiler, TMP_CALL_REG, 0));
|
||||
src_r = TMP_CALL_REG;
|
||||
}
|
||||
else {
|
||||
FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw));
|
||||
} else {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG));
|
||||
src_r = TMP_CALL_REG;
|
||||
}
|
||||
|
||||
|
@ -2225,29 +2395,26 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
|
|||
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_CALL_REG, 0, TMP_REG1, 0, src, srcw));
|
||||
FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_CALL_REG, src, srcw, TMP_CALL_REG));
|
||||
src = TMP_CALL_REG;
|
||||
}
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, OR | S(src) | A(TMP_CALL_REG) | B(src)));
|
||||
src = TMP_CALL_REG;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
type = SLJIT_JUMP;
|
||||
}
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
FAIL_IF(call_with_args(compiler, arg_types, &src));
|
||||
#endif
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG)
|
||||
FAIL_IF(call_with_args(compiler, arg_types, &src));
|
||||
#endif
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, type, src, srcw);
|
||||
}
|
||||
|
||||
|
@ -2279,7 +2446,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
bit = 0;
|
||||
from_xer = 0;
|
||||
|
||||
switch (type & 0xff) {
|
||||
switch (type) {
|
||||
case SLJIT_LESS:
|
||||
case SLJIT_SIG_LESS:
|
||||
break;
|
||||
|
@ -2332,38 +2499,50 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
invert = (compiler->status_flags_state & SLJIT_CURRENT_FLAGS_ADD) != 0;
|
||||
break;
|
||||
|
||||
case SLJIT_LESS_F64:
|
||||
case SLJIT_F_LESS:
|
||||
case SLJIT_ORDERED_LESS:
|
||||
case SLJIT_UNORDERED_OR_LESS:
|
||||
bit = 4 + 0;
|
||||
break;
|
||||
|
||||
case SLJIT_GREATER_EQUAL_F64:
|
||||
case SLJIT_F_GREATER_EQUAL:
|
||||
case SLJIT_ORDERED_GREATER_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_GREATER_EQUAL:
|
||||
bit = 4 + 0;
|
||||
invert = 1;
|
||||
break;
|
||||
|
||||
case SLJIT_GREATER_F64:
|
||||
case SLJIT_F_GREATER:
|
||||
case SLJIT_ORDERED_GREATER:
|
||||
case SLJIT_UNORDERED_OR_GREATER:
|
||||
bit = 4 + 1;
|
||||
break;
|
||||
|
||||
case SLJIT_LESS_EQUAL_F64:
|
||||
case SLJIT_F_LESS_EQUAL:
|
||||
case SLJIT_ORDERED_LESS_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_LESS_EQUAL:
|
||||
bit = 4 + 1;
|
||||
invert = 1;
|
||||
break;
|
||||
|
||||
case SLJIT_EQUAL_F64:
|
||||
case SLJIT_F_EQUAL:
|
||||
case SLJIT_ORDERED_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_EQUAL:
|
||||
bit = 4 + 2;
|
||||
break;
|
||||
|
||||
case SLJIT_NOT_EQUAL_F64:
|
||||
case SLJIT_F_NOT_EQUAL:
|
||||
case SLJIT_ORDERED_NOT_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_NOT_EQUAL:
|
||||
bit = 4 + 2;
|
||||
invert = 1;
|
||||
break;
|
||||
|
||||
case SLJIT_UNORDERED_F64:
|
||||
case SLJIT_UNORDERED:
|
||||
bit = 4 + 3;
|
||||
break;
|
||||
|
||||
case SLJIT_ORDERED_F64:
|
||||
case SLJIT_ORDERED:
|
||||
bit = 4 + 3;
|
||||
invert = 1;
|
||||
break;
|
||||
|
@ -2374,7 +2553,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, (from_xer ? MFXER : MFCR) | D(reg)));
|
||||
FAIL_IF(push_inst(compiler, RLWINM | S(reg) | A(reg) | ((1 + bit) << 11) | (31 << 6) | (31 << 1)));
|
||||
/* Simplified mnemonics: extrwi. */
|
||||
FAIL_IF(push_inst(compiler, RLWINM | S(reg) | A(reg) | RLWI_SH(1 + bit) | RLWI_MBE(31, 31)));
|
||||
|
||||
if (invert)
|
||||
FAIL_IF(push_inst(compiler, XORI | S(reg) | A(reg) | 0x1));
|
||||
|
@ -2385,10 +2565,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
return emit_op_mem(compiler, input_flags, reg, dst, dstw, TMP_REG1);
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
|
||||
if (dst & SLJIT_MEM)
|
||||
return sljit_emit_op2(compiler, saved_op, dst, saved_dstw, TMP_REG1, 0, TMP_REG2, 0);
|
||||
return sljit_emit_op2(compiler, saved_op, dst, 0, dst, 0, TMP_REG2, 0);
|
||||
|
@ -2404,15 +2582,94 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
|
|||
return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);;
|
||||
}
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32)
|
||||
|
||||
#define EMIT_MEM_LOAD_IMM(inst, mem, memw) \
|
||||
((sljit_s16)(memw) > SIMM_MAX - SSIZE_OF(sw))
|
||||
|
||||
#else /* !SLJIT_CONFIG_PPC_32 */
|
||||
|
||||
#define EMIT_MEM_LOAD_IMM(inst, mem, memw) \
|
||||
((((inst) & INT_ALIGNED) && ((memw) & 0x3) != 0) \
|
||||
|| ((sljit_s16)(memw) > SIMM_MAX - SSIZE_OF(sw)) \
|
||||
|| ((memw) > 0x7fff7fffl || (memw) < -0x80000000l)) \
|
||||
|
||||
#endif /* SLJIT_CONFIG_PPC_32 */
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 reg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
sljit_ins inst;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
|
||||
|
||||
if (!(reg & REG_PAIR_MASK))
|
||||
return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
|
||||
|
||||
ADJUST_LOCAL_OFFSET(mem, memw);
|
||||
|
||||
inst = data_transfer_insts[WORD_DATA | ((type & SLJIT_MEM_STORE) ? 0 : LOAD_DATA)];
|
||||
|
||||
if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
|
||||
memw &= 0x3;
|
||||
|
||||
if (memw != 0) {
|
||||
FAIL_IF(push_inst(compiler, SLWI_W(memw) | S(OFFS_REG(mem)) | A(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, ADD | D(TMP_REG1) | A(TMP_REG1) | B(mem & REG_MASK)));
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, ADD | D(TMP_REG1) | A(mem & REG_MASK) | B(OFFS_REG(mem))));
|
||||
|
||||
mem = TMP_REG1;
|
||||
memw = 0;
|
||||
} else {
|
||||
if (EMIT_MEM_LOAD_IMM(inst, mem, memw)) {
|
||||
if ((mem & REG_MASK) != 0) {
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
FAIL_IF(sljit_emit_op2(compiler, SLJIT_ADD, TMP_REG1, 0, mem & REG_MASK, 0, SLJIT_IMM, memw));
|
||||
} else
|
||||
FAIL_IF(load_immediate(compiler, TMP_REG1, memw));
|
||||
|
||||
memw = 0;
|
||||
mem = TMP_REG1;
|
||||
} else if (memw > SIMM_MAX || memw < SIMM_MIN) {
|
||||
FAIL_IF(push_inst(compiler, ADDIS | D(TMP_REG1) | A(mem & REG_MASK) | IMM((memw + 0x8000) >> 16)));
|
||||
|
||||
memw &= 0xffff;
|
||||
mem = TMP_REG1;
|
||||
} else {
|
||||
memw &= 0xffff;
|
||||
mem &= REG_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
SLJIT_ASSERT((memw >= 0 && memw <= SIMM_MAX - SSIZE_OF(sw)) || (memw >= 0x8000 && memw <= 0xffff));
|
||||
|
||||
#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
|
||||
inst &= (sljit_ins)~INT_ALIGNED;
|
||||
#endif /* SLJIT_CONFIG_PPC_64 */
|
||||
|
||||
if (!(type & SLJIT_MEM_STORE) && mem == REG_PAIR_FIRST(reg)) {
|
||||
FAIL_IF(push_inst(compiler, inst | D(REG_PAIR_SECOND(reg)) | A(mem) | IMM(memw + SSIZE_OF(sw))));
|
||||
return push_inst(compiler, inst | D(REG_PAIR_FIRST(reg)) | A(mem) | IMM(memw));
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, inst | D(REG_PAIR_FIRST(reg)) | A(mem) | IMM(memw)));
|
||||
return push_inst(compiler, inst | D(REG_PAIR_SECOND(reg)) | A(mem) | IMM(memw + SSIZE_OF(sw)));
|
||||
}
|
||||
|
||||
#undef EMIT_MEM_LOAD_IMM
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem_update(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 reg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
sljit_s32 mem_flags;
|
||||
sljit_ins inst;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
|
||||
CHECK(check_sljit_emit_mem_update(compiler, type, reg, mem, memw));
|
||||
|
||||
if (type & SLJIT_MEM_POST)
|
||||
return SLJIT_ERR_UNSUPPORTED;
|
||||
|
@ -2500,7 +2757,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
|
|||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem_update(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 freg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
|
@ -2508,7 +2765,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil
|
|||
sljit_ins inst;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
|
||||
CHECK(check_sljit_emit_fmem_update(compiler, type, freg, mem, memw));
|
||||
|
||||
if (type & SLJIT_MEM_POST)
|
||||
return SLJIT_ERR_UNSUPPORTED;
|
||||
|
@ -2587,3 +2844,8 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj
|
|||
|
||||
return put_label;
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
|
||||
{
|
||||
sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
|
||||
}
|
||||
|
|
73
thirdparty/pcre2/src/sljit/sljitNativeRISCV_32.c
vendored
Normal file
73
thirdparty/pcre2/src/sljit/sljitNativeRISCV_32.c
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Stack-less Just-In-Time compiler
|
||||
*
|
||||
* Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification, are
|
||||
* permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||
* conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||
* of conditions and the following disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_r, sljit_sw imm, sljit_s32 tmp_r)
|
||||
{
|
||||
SLJIT_UNUSED_ARG(tmp_r);
|
||||
SLJIT_ASSERT(dst_r != tmp_r);
|
||||
|
||||
if (imm <= SIMM_MAX && imm >= SIMM_MIN)
|
||||
return push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm));
|
||||
|
||||
if (imm & 0x800)
|
||||
imm += 0x1000;
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff)));
|
||||
|
||||
if ((imm & 0xfff) == 0)
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
return push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
|
||||
}
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value, sljit_ins last_ins)
|
||||
{
|
||||
if ((init_value & 0x800) != 0)
|
||||
init_value += 0x1000;
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst) | (sljit_ins)(init_value & ~0xfff)));
|
||||
return push_inst(compiler, last_ins | RS1(dst) | IMM_I(init_value));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
|
||||
{
|
||||
sljit_ins *inst = (sljit_ins*)addr;
|
||||
SLJIT_UNUSED_ARG(executable_offset);
|
||||
|
||||
if ((new_target & 0x800) != 0)
|
||||
new_target += 0x1000;
|
||||
|
||||
SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 0);
|
||||
|
||||
SLJIT_ASSERT((inst[0] & 0x7f) == LUI);
|
||||
inst[0] = (inst[0] & 0xfff) | (sljit_ins)((sljit_sw)new_target & ~0xfff);
|
||||
SLJIT_ASSERT((inst[1] & 0x707f) == ADDI || (inst[1] & 0x707f) == JALR);
|
||||
inst[1] = (inst[1] & 0xfffff) | IMM_I(new_target);
|
||||
|
||||
SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 1);
|
||||
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
|
||||
SLJIT_CACHE_FLUSH(inst, inst + 5);
|
||||
}
|
183
thirdparty/pcre2/src/sljit/sljitNativeRISCV_64.c
vendored
Normal file
183
thirdparty/pcre2/src/sljit/sljitNativeRISCV_64.c
vendored
Normal file
|
@ -0,0 +1,183 @@
|
|||
/*
|
||||
* Stack-less Just-In-Time compiler
|
||||
*
|
||||
* Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification, are
|
||||
* permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||
* conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||
* of conditions and the following disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst_r, sljit_sw imm, sljit_s32 tmp_r)
|
||||
{
|
||||
sljit_sw high;
|
||||
|
||||
SLJIT_ASSERT(dst_r != tmp_r);
|
||||
|
||||
if (imm <= SIMM_MAX && imm >= SIMM_MIN)
|
||||
return push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm));
|
||||
|
||||
if (imm <= 0x7fffffffl && imm >= S32_MIN) {
|
||||
if (imm > S32_MAX) {
|
||||
SLJIT_ASSERT((imm & 0x800) != 0);
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u));
|
||||
return push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
|
||||
}
|
||||
|
||||
if ((imm & 0x800) != 0)
|
||||
imm += 0x1000;
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff)));
|
||||
|
||||
if ((imm & 0xfff) == 0)
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
return push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
|
||||
}
|
||||
|
||||
/* Trailing zeroes could be used to produce shifted immediates. */
|
||||
|
||||
if (imm <= 0x7ffffffffffl && imm >= -0x80000000000l) {
|
||||
high = imm >> 12;
|
||||
|
||||
if (imm & 0x800)
|
||||
high = ~high;
|
||||
|
||||
if (high > S32_MAX) {
|
||||
SLJIT_ASSERT((high & 0x800) != 0);
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u));
|
||||
FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(high)));
|
||||
} else {
|
||||
if ((high & 0x800) != 0)
|
||||
high += 0x1000;
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(high & ~0xfff)));
|
||||
|
||||
if ((high & 0xfff) != 0)
|
||||
FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(high)));
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, SLLI | RD(dst_r) | RS1(dst_r) | IMM_I(12)));
|
||||
|
||||
if ((imm & 0xfff) != 0)
|
||||
return push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm));
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
high = imm >> 32;
|
||||
imm = (sljit_s32)imm;
|
||||
|
||||
if ((imm & 0x80000000l) != 0)
|
||||
high = ~high;
|
||||
|
||||
if (high <= 0x7ffff && high >= -0x80000) {
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(tmp_r) | (sljit_ins)(high << 12)));
|
||||
high = 0x1000;
|
||||
} else {
|
||||
if ((high & 0x800) != 0)
|
||||
high += 0x1000;
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(tmp_r) | (sljit_ins)(high & ~0xfff)));
|
||||
high &= 0xfff;
|
||||
}
|
||||
|
||||
if (imm <= SIMM_MAX && imm >= SIMM_MIN) {
|
||||
FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(TMP_ZERO) | IMM_I(imm)));
|
||||
imm = 0;
|
||||
} else if (imm > S32_MAX) {
|
||||
SLJIT_ASSERT((imm & 0x800) != 0);
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)0x80000000u));
|
||||
imm = 0x1000 | (imm & 0xfff);
|
||||
} else {
|
||||
if ((imm & 0x800) != 0)
|
||||
imm += 0x1000;
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst_r) | (sljit_ins)(imm & ~0xfff)));
|
||||
imm &= 0xfff;
|
||||
}
|
||||
|
||||
if ((high & 0xfff) != 0)
|
||||
FAIL_IF(push_inst(compiler, ADDI | RD(tmp_r) | RS1(tmp_r) | IMM_I(high)));
|
||||
|
||||
if (imm & 0x1000)
|
||||
FAIL_IF(push_inst(compiler, XORI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)));
|
||||
else if (imm != 0)
|
||||
FAIL_IF(push_inst(compiler, ADDI | RD(dst_r) | RS1(dst_r) | IMM_I(imm)));
|
||||
|
||||
FAIL_IF(push_inst(compiler, SLLI | RD(tmp_r) | RS1(tmp_r) | IMM_I((high & 0x1000) ? 20 : 32)));
|
||||
return push_inst(compiler, XOR | RD(dst_r) | RS1(dst_r) | RS2(tmp_r));
|
||||
}
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value, sljit_ins last_ins)
|
||||
{
|
||||
sljit_sw high;
|
||||
|
||||
if ((init_value & 0x800) != 0)
|
||||
init_value += 0x1000;
|
||||
|
||||
high = init_value >> 32;
|
||||
|
||||
if ((init_value & 0x80000000l) != 0)
|
||||
high = ~high;
|
||||
|
||||
if ((high & 0x800) != 0)
|
||||
high += 0x1000;
|
||||
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(TMP_REG3) | (sljit_ins)(high & ~0xfff)));
|
||||
FAIL_IF(push_inst(compiler, ADDI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(high)));
|
||||
FAIL_IF(push_inst(compiler, LUI | RD(dst) | (sljit_ins)(init_value & ~0xfff)));
|
||||
FAIL_IF(push_inst(compiler, SLLI | RD(TMP_REG3) | RS1(TMP_REG3) | IMM_I(32)));
|
||||
FAIL_IF(push_inst(compiler, XOR | RD(dst) | RS1(dst) | RS2(TMP_REG3)));
|
||||
return push_inst(compiler, last_ins | RS1(dst) | IMM_I(init_value));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
|
||||
{
|
||||
sljit_ins *inst = (sljit_ins*)addr;
|
||||
sljit_sw high;
|
||||
SLJIT_UNUSED_ARG(executable_offset);
|
||||
|
||||
if ((new_target & 0x800) != 0)
|
||||
new_target += 0x1000;
|
||||
|
||||
high = (sljit_sw)new_target >> 32;
|
||||
|
||||
if ((new_target & 0x80000000l) != 0)
|
||||
high = ~high;
|
||||
|
||||
if ((high & 0x800) != 0)
|
||||
high += 0x1000;
|
||||
|
||||
SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 0);
|
||||
|
||||
SLJIT_ASSERT((inst[0] & 0x7f) == LUI);
|
||||
inst[0] = (inst[0] & 0xfff) | (sljit_ins)(high & ~0xfff);
|
||||
SLJIT_ASSERT((inst[1] & 0x707f) == ADDI);
|
||||
inst[1] = (inst[1] & 0xfffff) | IMM_I(high);
|
||||
SLJIT_ASSERT((inst[2] & 0x7f) == LUI);
|
||||
inst[2] = (inst[2] & 0xfff) | (sljit_ins)((sljit_sw)new_target & ~0xfff);
|
||||
SLJIT_ASSERT((inst[5] & 0x707f) == ADDI || (inst[5] & 0x707f) == JALR);
|
||||
inst[5] = (inst[5] & 0xfffff) | IMM_I(new_target);
|
||||
SLJIT_UPDATE_WX_FLAGS(inst, inst + 5, 1);
|
||||
|
||||
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
|
||||
SLJIT_CACHE_FLUSH(inst, inst + 5);
|
||||
}
|
2762
thirdparty/pcre2/src/sljit/sljitNativeRISCV_common.c
vendored
Normal file
2762
thirdparty/pcre2/src/sljit/sljitNativeRISCV_common.c
vendored
Normal file
File diff suppressed because it is too large
Load diff
648
thirdparty/pcre2/src/sljit/sljitNativeS390X.c
vendored
648
thirdparty/pcre2/src/sljit/sljitNativeS390X.c
vendored
|
@ -103,11 +103,8 @@ static const sljit_gpr r15 = 15; /* reg_map[SLJIT_NUMBER_OF_REGISTERS + 1]: stac
|
|||
/* When reg cannot be unused. */
|
||||
#define IS_GPR_REG(reg) ((reg > 0) && (reg) <= SLJIT_SP)
|
||||
|
||||
/* Link registers. The normal link register is r14, but since
|
||||
we use that for flags we need to use r0 instead to do fast
|
||||
calls so that flags are preserved. */
|
||||
/* Link register. */
|
||||
static const sljit_gpr link_r = 14; /* r14 */
|
||||
static const sljit_gpr fast_link_r = 0; /* r0 */
|
||||
|
||||
#define TMP_FREG1 (0)
|
||||
|
||||
|
@ -220,7 +217,8 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
|
|||
}
|
||||
/* fallthrough */
|
||||
|
||||
case SLJIT_EQUAL_F64:
|
||||
case SLJIT_F_EQUAL:
|
||||
case SLJIT_ORDERED_EQUAL:
|
||||
return cc0;
|
||||
|
||||
case SLJIT_NOT_EQUAL:
|
||||
|
@ -234,13 +232,14 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
|
|||
}
|
||||
/* fallthrough */
|
||||
|
||||
case SLJIT_NOT_EQUAL_F64:
|
||||
case SLJIT_UNORDERED_OR_NOT_EQUAL:
|
||||
return (cc1 | cc2 | cc3);
|
||||
|
||||
case SLJIT_LESS:
|
||||
return cc1;
|
||||
|
||||
case SLJIT_GREATER_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_GREATER_EQUAL:
|
||||
return (cc0 | cc2 | cc3);
|
||||
|
||||
case SLJIT_GREATER:
|
||||
|
@ -254,7 +253,8 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
|
|||
return (cc0 | cc1 | cc2);
|
||||
|
||||
case SLJIT_SIG_LESS:
|
||||
case SLJIT_LESS_F64:
|
||||
case SLJIT_F_LESS:
|
||||
case SLJIT_ORDERED_LESS:
|
||||
return cc1;
|
||||
|
||||
case SLJIT_NOT_CARRY:
|
||||
|
@ -263,7 +263,8 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
|
|||
/* fallthrough */
|
||||
|
||||
case SLJIT_SIG_LESS_EQUAL:
|
||||
case SLJIT_LESS_EQUAL_F64:
|
||||
case SLJIT_F_LESS_EQUAL:
|
||||
case SLJIT_ORDERED_LESS_EQUAL:
|
||||
return (cc0 | cc1);
|
||||
|
||||
case SLJIT_CARRY:
|
||||
|
@ -272,6 +273,7 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
|
|||
/* fallthrough */
|
||||
|
||||
case SLJIT_SIG_GREATER:
|
||||
case SLJIT_UNORDERED_OR_GREATER:
|
||||
/* Overflow is considered greater, see SLJIT_SUB. */
|
||||
return cc2 | cc3;
|
||||
|
||||
|
@ -283,7 +285,7 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
|
|||
return (cc2 | cc3);
|
||||
/* fallthrough */
|
||||
|
||||
case SLJIT_UNORDERED_F64:
|
||||
case SLJIT_UNORDERED:
|
||||
return cc3;
|
||||
|
||||
case SLJIT_NOT_OVERFLOW:
|
||||
|
@ -291,14 +293,29 @@ static SLJIT_INLINE sljit_u8 get_cc(struct sljit_compiler *compiler, sljit_s32 t
|
|||
return (cc0 | cc1);
|
||||
/* fallthrough */
|
||||
|
||||
case SLJIT_ORDERED_F64:
|
||||
case SLJIT_ORDERED:
|
||||
return (cc0 | cc1 | cc2);
|
||||
|
||||
case SLJIT_GREATER_F64:
|
||||
case SLJIT_F_NOT_EQUAL:
|
||||
case SLJIT_ORDERED_NOT_EQUAL:
|
||||
return (cc1 | cc2);
|
||||
|
||||
case SLJIT_F_GREATER:
|
||||
case SLJIT_ORDERED_GREATER:
|
||||
return cc2;
|
||||
|
||||
case SLJIT_GREATER_EQUAL_F64:
|
||||
case SLJIT_F_GREATER_EQUAL:
|
||||
case SLJIT_ORDERED_GREATER_EQUAL:
|
||||
return (cc0 | cc2);
|
||||
|
||||
case SLJIT_UNORDERED_OR_LESS_EQUAL:
|
||||
return (cc0 | cc1 | cc3);
|
||||
|
||||
case SLJIT_UNORDERED_OR_EQUAL:
|
||||
return (cc0 | cc3);
|
||||
|
||||
case SLJIT_UNORDERED_OR_LESS:
|
||||
return (cc1 | cc3);
|
||||
}
|
||||
|
||||
SLJIT_UNREACHABLE();
|
||||
|
@ -978,7 +995,7 @@ static sljit_s32 make_addr_bx(struct sljit_compiler *compiler,
|
|||
(cond) ? EVAL(i1, r, addr) : EVAL(i2, r, addr)
|
||||
|
||||
/* May clobber tmp1. */
|
||||
static sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst,
|
||||
static sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst_r,
|
||||
sljit_s32 src, sljit_sw srcw,
|
||||
sljit_s32 is_32bit)
|
||||
{
|
||||
|
@ -986,21 +1003,36 @@ static sljit_s32 load_word(struct sljit_compiler *compiler, sljit_gpr dst,
|
|||
sljit_ins ins;
|
||||
|
||||
SLJIT_ASSERT(src & SLJIT_MEM);
|
||||
if (have_ldisp() || !is_32bit)
|
||||
FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
|
||||
else
|
||||
|
||||
if (is_32bit && ((src & OFFS_REG_MASK) || is_u12(srcw) || !is_s20(srcw))) {
|
||||
FAIL_IF(make_addr_bx(compiler, &addr, src, srcw, tmp1));
|
||||
return push_inst(compiler, 0x58000000 /* l */ | R20A(dst_r) | R16A(addr.index) | R12A(addr.base) | (sljit_ins)addr.offset);
|
||||
}
|
||||
|
||||
if (is_32bit)
|
||||
ins = WHEN(is_u12(addr.offset), dst, l, ly, addr);
|
||||
else
|
||||
ins = lg(dst, addr.offset, addr.index, addr.base);
|
||||
FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
|
||||
|
||||
return push_inst(compiler, ins);
|
||||
ins = is_32bit ? 0xe30000000058 /* ly */ : 0xe30000000004 /* lg */;
|
||||
return push_inst(compiler, ins | R36A(dst_r) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
|
||||
}
|
||||
|
||||
/* May clobber tmp1. */
|
||||
static sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src,
|
||||
static sljit_s32 load_unsigned_word(struct sljit_compiler *compiler, sljit_gpr dst_r,
|
||||
sljit_s32 src, sljit_sw srcw,
|
||||
sljit_s32 is_32bit)
|
||||
{
|
||||
struct addr addr;
|
||||
sljit_ins ins;
|
||||
|
||||
SLJIT_ASSERT(src & SLJIT_MEM);
|
||||
|
||||
FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
|
||||
|
||||
ins = is_32bit ? 0xe30000000016 /* llgf */ : 0xe30000000004 /* lg */;
|
||||
return push_inst(compiler, ins | R36A(dst_r) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
|
||||
}
|
||||
|
||||
/* May clobber tmp1. */
|
||||
static sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src_r,
|
||||
sljit_s32 dst, sljit_sw dstw,
|
||||
sljit_s32 is_32bit)
|
||||
{
|
||||
|
@ -1008,17 +1040,16 @@ static sljit_s32 store_word(struct sljit_compiler *compiler, sljit_gpr src,
|
|||
sljit_ins ins;
|
||||
|
||||
SLJIT_ASSERT(dst & SLJIT_MEM);
|
||||
if (have_ldisp() || !is_32bit)
|
||||
FAIL_IF(make_addr_bxy(compiler, &addr, dst, dstw, tmp1));
|
||||
else
|
||||
|
||||
if (is_32bit && ((dst & OFFS_REG_MASK) || is_u12(dstw) || !is_s20(dstw))) {
|
||||
FAIL_IF(make_addr_bx(compiler, &addr, dst, dstw, tmp1));
|
||||
return push_inst(compiler, 0x50000000 /* st */ | R20A(src_r) | R16A(addr.index) | R12A(addr.base) | (sljit_ins)addr.offset);
|
||||
}
|
||||
|
||||
if (is_32bit)
|
||||
ins = WHEN(is_u12(addr.offset), src, st, sty, addr);
|
||||
else
|
||||
ins = stg(src, addr.offset, addr.index, addr.base);
|
||||
FAIL_IF(make_addr_bxy(compiler, &addr, dst, dstw, tmp1));
|
||||
|
||||
return push_inst(compiler, ins);
|
||||
ins = is_32bit ? 0xe30000000050 /* sty */ : 0xe30000000024 /* stg */;
|
||||
return push_inst(compiler, ins | R36A(src_r) | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
|
||||
}
|
||||
|
||||
#undef WHEN
|
||||
|
@ -1618,16 +1649,24 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
|
|||
{
|
||||
/* TODO(mundaym): implement all */
|
||||
switch (feature_type) {
|
||||
case SLJIT_HAS_FPU:
|
||||
case SLJIT_HAS_CLZ:
|
||||
return have_eimm() ? 1 : 0; /* FLOGR instruction */
|
||||
case SLJIT_HAS_ROT:
|
||||
case SLJIT_HAS_PREFETCH:
|
||||
return 1;
|
||||
case SLJIT_HAS_CTZ:
|
||||
return 2;
|
||||
case SLJIT_HAS_CMOV:
|
||||
return have_lscond1() ? 1 : 0;
|
||||
case SLJIT_HAS_FPU:
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
|
||||
{
|
||||
return (type >= SLJIT_UNORDERED && type <= SLJIT_ORDERED_LESS_EQUAL);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Entry, exit */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
@ -1636,7 +1675,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
|
||||
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
|
||||
{
|
||||
sljit_s32 word_arg_count = 0;
|
||||
sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
|
||||
sljit_s32 offset, i, tmp;
|
||||
|
||||
CHECK_ERROR();
|
||||
|
@ -1648,8 +1687,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
|
||||
offset = 2 * SSIZE_OF(sw);
|
||||
if (saveds + scratches >= SLJIT_NUMBER_OF_REGISTERS) {
|
||||
FAIL_IF(push_inst(compiler, stmg(r6, r14, offset, r15))); /* save registers TODO(MGM): optimize */
|
||||
offset += 9 * SSIZE_OF(sw);
|
||||
if (saved_arg_count == 0) {
|
||||
FAIL_IF(push_inst(compiler, stmg(r6, r14, offset, r15)));
|
||||
offset += 9 * SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, stmg(r6, r13 - (sljit_gpr)saved_arg_count, offset, r15)));
|
||||
offset += (8 - saved_arg_count) * SSIZE_OF(sw);
|
||||
}
|
||||
} else {
|
||||
if (scratches == SLJIT_FIRST_SAVED_REG) {
|
||||
FAIL_IF(push_inst(compiler, stg(r6, offset, 0, r15)));
|
||||
|
@ -1659,15 +1703,30 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
offset += (scratches - (SLJIT_FIRST_SAVED_REG - 1)) * SSIZE_OF(sw);
|
||||
}
|
||||
|
||||
if (saveds == 0) {
|
||||
FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r14, offset, r15)));
|
||||
offset += (saveds + 1) * SSIZE_OF(sw);
|
||||
if (saved_arg_count == 0) {
|
||||
if (saveds == 0) {
|
||||
FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r14, offset, r15)));
|
||||
offset += (saveds + 1) * SSIZE_OF(sw);
|
||||
}
|
||||
} else if (saveds > saved_arg_count) {
|
||||
if (saveds == saved_arg_count + 1) {
|
||||
FAIL_IF(push_inst(compiler, stg(r14 - (sljit_gpr)saveds, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, stmg(r14 - (sljit_gpr)saveds, r13 - (sljit_gpr)saved_arg_count, offset, r15)));
|
||||
offset += (saveds - saved_arg_count) * SSIZE_OF(sw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (saved_arg_count > 0) {
|
||||
FAIL_IF(push_inst(compiler, stg(r14, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
}
|
||||
|
||||
tmp = SLJIT_FS0 - fsaveds;
|
||||
for (i = SLJIT_FS0; i > tmp; i--) {
|
||||
FAIL_IF(push_inst(compiler, 0x60000000 /* std */ | F20(i) | R12A(r15) | (sljit_ins)offset));
|
||||
|
@ -1684,15 +1743,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
|
||||
FAIL_IF(push_inst(compiler, 0xe30000000071 /* lay */ | R36A(r15) | R28A(r15) | disp_s20(-local_size)));
|
||||
|
||||
if (options & SLJIT_ENTER_REG_ARG)
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
saved_arg_count = 0;
|
||||
tmp = 0;
|
||||
while (arg_types > 0) {
|
||||
if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
|
||||
if (!(arg_types & SLJIT_ARG_TYPE_SCRATCH_REG)) {
|
||||
FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S0 - tmp), gpr(SLJIT_R0 + word_arg_count))));
|
||||
tmp++;
|
||||
FAIL_IF(push_inst(compiler, lgr(gpr(SLJIT_S0 - saved_arg_count), gpr(SLJIT_R0 + tmp))));
|
||||
saved_arg_count++;
|
||||
}
|
||||
word_arg_count++;
|
||||
tmp++;
|
||||
}
|
||||
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
|
@ -1713,12 +1776,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
|
|||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_gpr last_reg)
|
||||
{
|
||||
sljit_s32 offset, i, tmp;
|
||||
sljit_s32 local_size = compiler->local_size;
|
||||
sljit_s32 saveds = compiler->saveds;
|
||||
sljit_s32 scratches = compiler->scratches;
|
||||
sljit_s32 kept_saveds_count = SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
|
||||
|
||||
if (is_u12(local_size))
|
||||
FAIL_IF(push_inst(compiler, 0x41000000 /* ly */ | R20A(r15) | R12A(r15) | (sljit_ins)local_size));
|
||||
|
@ -1727,8 +1791,13 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
|
||||
offset = 2 * SSIZE_OF(sw);
|
||||
if (saveds + scratches >= SLJIT_NUMBER_OF_REGISTERS) {
|
||||
FAIL_IF(push_inst(compiler, lmg(r6, r14, offset, r15))); /* save registers TODO(MGM): optimize */
|
||||
offset += 9 * SSIZE_OF(sw);
|
||||
if (kept_saveds_count == 0) {
|
||||
FAIL_IF(push_inst(compiler, lmg(r6, last_reg, offset, r15)));
|
||||
offset += 9 * SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, lmg(r6, r13 - (sljit_gpr)kept_saveds_count, offset, r15)));
|
||||
offset += (8 - kept_saveds_count) * SSIZE_OF(sw);
|
||||
}
|
||||
} else {
|
||||
if (scratches == SLJIT_FIRST_SAVED_REG) {
|
||||
FAIL_IF(push_inst(compiler, lg(r6, offset, 0, r15)));
|
||||
|
@ -1738,15 +1807,35 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
offset += (scratches - (SLJIT_FIRST_SAVED_REG - 1)) * SSIZE_OF(sw);
|
||||
}
|
||||
|
||||
if (saveds == 0) {
|
||||
FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, r14, offset, r15)));
|
||||
offset += (saveds + 1) * SSIZE_OF(sw);
|
||||
if (kept_saveds_count == 0) {
|
||||
if (saveds == 0) {
|
||||
if (last_reg == r14)
|
||||
FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
} else if (saveds == 1 && last_reg == r13) {
|
||||
FAIL_IF(push_inst(compiler, lg(r13, offset, 0, r15)));
|
||||
offset += 2 * SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, last_reg, offset, r15)));
|
||||
offset += (saveds + 1) * SSIZE_OF(sw);
|
||||
}
|
||||
} else if (saveds > kept_saveds_count) {
|
||||
if (saveds == kept_saveds_count + 1) {
|
||||
FAIL_IF(push_inst(compiler, lg(r14 - (sljit_gpr)saveds, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
} else {
|
||||
FAIL_IF(push_inst(compiler, lmg(r14 - (sljit_gpr)saveds, r13 - (sljit_gpr)kept_saveds_count, offset, r15)));
|
||||
offset += (saveds - kept_saveds_count) * SSIZE_OF(sw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (kept_saveds_count > 0) {
|
||||
if (last_reg == r14)
|
||||
FAIL_IF(push_inst(compiler, lg(r14, offset, 0, r15)));
|
||||
offset += SSIZE_OF(sw);
|
||||
}
|
||||
|
||||
tmp = SLJIT_FS0 - compiler->fsaveds;
|
||||
for (i = SLJIT_FS0; i > tmp; i--) {
|
||||
FAIL_IF(push_inst(compiler, 0x68000000 /* ld */ | F20(i) | R12A(r15) | (sljit_ins)offset));
|
||||
|
@ -1766,10 +1855,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_void(compiler));
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
FAIL_IF(emit_stack_frame_release(compiler, r14));
|
||||
return push_inst(compiler, br(r14)); /* return */
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_to(compiler, src, srcw));
|
||||
|
||||
if (src & SLJIT_MEM) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(load_word(compiler, tmp1, src, srcw, 0 /* 64-bit */));
|
||||
src = TMP_REG2;
|
||||
srcw = 0;
|
||||
} else if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, lgr(tmp1, gpr(src))));
|
||||
src = TMP_REG2;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler, r13));
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Operators */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
@ -1858,6 +1970,47 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
|
|||
return push_inst(compiler, lgr(arg1, tmp0));
|
||||
}
|
||||
|
||||
static sljit_s32 sljit_emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 op, sljit_gpr dst_r, sljit_gpr src_r)
|
||||
{
|
||||
sljit_s32 is_ctz = (GET_OPCODE(op) == SLJIT_CTZ);
|
||||
|
||||
if ((op & SLJIT_32) && src_r != tmp0) {
|
||||
FAIL_IF(push_inst(compiler, 0xb9160000 /* llgfr */ | R4A(tmp0) | R0A(src_r)));
|
||||
src_r = tmp0;
|
||||
}
|
||||
|
||||
if (is_ctz) {
|
||||
FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? 0x1300 /* lcr */ : 0xb9030000 /* lcgr */) | R4A(tmp1) | R0A(src_r)));
|
||||
|
||||
if (src_r == tmp0)
|
||||
FAIL_IF(push_inst(compiler, ((op & SLJIT_32) ? 0x1400 /* nr */ : 0xb9800000 /* ngr */) | R4A(tmp0) | R0A(tmp1)));
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, 0xb9e40000 /* ngrk */ | R12A(tmp1) | R4A(tmp0) | R0A(src_r)));
|
||||
|
||||
src_r = tmp0;
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, 0xb9830000 /* flogr */ | R4A(tmp0) | R0A(src_r)));
|
||||
|
||||
if (is_ctz)
|
||||
FAIL_IF(push_inst(compiler, 0xec00000000d9 /* aghik */ | R36A(tmp1) | R32A(tmp0) | ((sljit_ins)(-64 & 0xffff) << 16)));
|
||||
|
||||
if (op & SLJIT_32) {
|
||||
if (!is_ctz && dst_r != tmp0)
|
||||
return push_inst(compiler, 0xec00000000d9 /* aghik */ | R36A(dst_r) | R32A(tmp0) | ((sljit_ins)(-32 & 0xffff) << 16));
|
||||
|
||||
FAIL_IF(push_inst(compiler, 0xc20800000000 /* agfi */ | R36A(tmp0) | (sljit_u32)-32));
|
||||
}
|
||||
|
||||
if (is_ctz)
|
||||
FAIL_IF(push_inst(compiler, 0xec0000000057 /* rxsbg */ | R36A(tmp0) | R32A(tmp1) | ((sljit_ins)((op & SLJIT_32) ? 59 : 58) << 24) | (63 << 16) | ((sljit_ins)((op & SLJIT_32) ? 5 : 6) << 8)));
|
||||
|
||||
if (dst_r == tmp0)
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
return push_inst(compiler, ((op & SLJIT_32) ? 0x1800 /* lr */ : 0xb9040000 /* lgr */) | R4A(dst_r) | R0A(tmp0));
|
||||
}
|
||||
|
||||
/* LEVAL will be defined later with different parameters as needed */
|
||||
#define WHEN2(cond, i1, i2) (cond) ? LEVAL(i1) : LEVAL(i2)
|
||||
|
||||
|
@ -2091,23 +2244,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
|
|||
|
||||
dst_r = FAST_IS_REG(dst) ? gpr(REG_MASK & dst) : tmp0;
|
||||
src_r = FAST_IS_REG(src) ? gpr(REG_MASK & src) : tmp0;
|
||||
if (src & SLJIT_MEM)
|
||||
FAIL_IF(load_word(compiler, src_r, src, srcw, src & SLJIT_32));
|
||||
|
||||
compiler->status_flags_state = op & (VARIABLE_FLAG_MASK | SLJIT_SET_Z);
|
||||
|
||||
/* TODO(mundaym): optimize loads and stores */
|
||||
switch (opcode | (op & SLJIT_32)) {
|
||||
switch (opcode) {
|
||||
case SLJIT_NOT:
|
||||
/* emulate ~x with x^-1 */
|
||||
FAIL_IF(push_load_imm_inst(compiler, tmp1, -1));
|
||||
if (src_r != dst_r)
|
||||
FAIL_IF(push_inst(compiler, lgr(dst_r, src_r)));
|
||||
if (src & SLJIT_MEM)
|
||||
FAIL_IF(load_word(compiler, src_r, src, srcw, op & SLJIT_32));
|
||||
|
||||
FAIL_IF(push_inst(compiler, xgr(dst_r, tmp1)));
|
||||
break;
|
||||
case SLJIT_NOT32:
|
||||
/* emulate ~x with x^-1 */
|
||||
if (!(op & SLJIT_32)) {
|
||||
FAIL_IF(push_load_imm_inst(compiler, tmp1, -1));
|
||||
if (src_r != dst_r)
|
||||
FAIL_IF(push_inst(compiler, lgr(dst_r, src_r)));
|
||||
|
||||
FAIL_IF(push_inst(compiler, xgr(dst_r, tmp1)));
|
||||
break;
|
||||
}
|
||||
|
||||
if (have_eimm())
|
||||
FAIL_IF(push_inst(compiler, xilf(dst_r, 0xffffffff)));
|
||||
else {
|
||||
|
@ -2119,24 +2274,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
|
|||
}
|
||||
break;
|
||||
case SLJIT_CLZ:
|
||||
if (have_eimm()) {
|
||||
FAIL_IF(push_inst(compiler, flogr(tmp0, src_r))); /* clobbers tmp1 */
|
||||
if (dst_r != tmp0)
|
||||
FAIL_IF(push_inst(compiler, lgr(dst_r, tmp0)));
|
||||
} else {
|
||||
abort(); /* TODO(mundaym): no eimm (?) */
|
||||
}
|
||||
break;
|
||||
case SLJIT_CLZ32:
|
||||
if (have_eimm()) {
|
||||
FAIL_IF(push_inst(compiler, sllg(tmp1, src_r, 32, 0)));
|
||||
FAIL_IF(push_inst(compiler, iilf(tmp1, 0xffffffff)));
|
||||
FAIL_IF(push_inst(compiler, flogr(tmp0, tmp1))); /* clobbers tmp1 */
|
||||
if (dst_r != tmp0)
|
||||
FAIL_IF(push_inst(compiler, lr(dst_r, tmp0)));
|
||||
} else {
|
||||
abort(); /* TODO(mundaym): no eimm (?) */
|
||||
}
|
||||
case SLJIT_CTZ:
|
||||
if (src & SLJIT_MEM)
|
||||
FAIL_IF(load_unsigned_word(compiler, src_r, src, srcw, op & SLJIT_32));
|
||||
|
||||
FAIL_IF(sljit_emit_clz_ctz(compiler, op, dst_r, src_r));
|
||||
break;
|
||||
default:
|
||||
SLJIT_UNREACHABLE();
|
||||
|
@ -2145,9 +2287,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
|
|||
if ((op & (SLJIT_SET_Z | VARIABLE_FLAG_MASK)) == (SLJIT_SET_Z | SLJIT_SET_OVERFLOW))
|
||||
FAIL_IF(update_zero_overflow(compiler, op, dst_r));
|
||||
|
||||
/* TODO(carenas): doesn't need FAIL_IF */
|
||||
if (dst & SLJIT_MEM)
|
||||
FAIL_IF(store_word(compiler, dst_r, dst, dstw, op & SLJIT_32));
|
||||
return store_word(compiler, dst_r, dst, dstw, op & SLJIT_32);
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
@ -2166,11 +2307,6 @@ static SLJIT_INLINE int is_commutative(sljit_s32 op)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE int is_shift(sljit_s32 op) {
|
||||
sljit_s32 v = GET_OPCODE(op);
|
||||
return (v == SLJIT_SHL || v == SLJIT_ASHR || v == SLJIT_LSHR) ? 1 : 0;
|
||||
}
|
||||
|
||||
static const struct ins_forms add_forms = {
|
||||
0x1a00, /* ar */
|
||||
0xb9080000, /* agr */
|
||||
|
@ -2604,33 +2740,41 @@ static sljit_s32 sljit_emit_shift(struct sljit_compiler *compiler, sljit_s32 op,
|
|||
sljit_ins ins;
|
||||
|
||||
if (FAST_IS_REG(src1))
|
||||
src_r = gpr(src1 & REG_MASK);
|
||||
src_r = gpr(src1);
|
||||
else
|
||||
FAIL_IF(emit_move(compiler, tmp0, src1, src1w));
|
||||
|
||||
if (src2 & SLJIT_IMM)
|
||||
if (!(src2 & SLJIT_IMM)) {
|
||||
if (FAST_IS_REG(src2))
|
||||
base_r = gpr(src2);
|
||||
else {
|
||||
FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
|
||||
base_r = tmp1;
|
||||
}
|
||||
|
||||
if ((op & SLJIT_32) && (type == SLJIT_MSHL || type == SLJIT_MLSHR || type == SLJIT_MASHR)) {
|
||||
if (base_r != tmp1) {
|
||||
FAIL_IF(push_inst(compiler, 0xec0000000055 /* risbg */ | R36A(tmp1) | R32A(base_r) | (59 << 24) | (1 << 23) | (63 << 16)));
|
||||
base_r = tmp1;
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, 0xa5070000 /* nill */ | R20A(tmp1) | 0x1f));
|
||||
}
|
||||
} else
|
||||
imm = (sljit_ins)(src2w & ((op & SLJIT_32) ? 0x1f : 0x3f));
|
||||
else if (FAST_IS_REG(src2))
|
||||
base_r = gpr(src2 & REG_MASK);
|
||||
else {
|
||||
FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
|
||||
base_r = tmp1;
|
||||
}
|
||||
|
||||
if ((op & SLJIT_32) && dst_r == src_r) {
|
||||
if (type == SLJIT_SHL)
|
||||
if (type == SLJIT_SHL || type == SLJIT_MSHL)
|
||||
ins = 0x89000000 /* sll */;
|
||||
else if (type == SLJIT_LSHR)
|
||||
else if (type == SLJIT_LSHR || type == SLJIT_MLSHR)
|
||||
ins = 0x88000000 /* srl */;
|
||||
else
|
||||
ins = 0x8a000000 /* sra */;
|
||||
|
||||
FAIL_IF(push_inst(compiler, ins | R20A(dst_r) | R12A(base_r) | imm));
|
||||
}
|
||||
else {
|
||||
if (type == SLJIT_SHL)
|
||||
} else {
|
||||
if (type == SLJIT_SHL || type == SLJIT_MSHL)
|
||||
ins = (op & SLJIT_32) ? 0xeb00000000df /* sllk */ : 0xeb000000000d /* sllg */;
|
||||
else if (type == SLJIT_LSHR)
|
||||
else if (type == SLJIT_LSHR || type == SLJIT_MLSHR)
|
||||
ins = (op & SLJIT_32) ? 0xeb00000000de /* srlk */ : 0xeb000000000c /* srlg */;
|
||||
else
|
||||
ins = (op & SLJIT_32) ? 0xeb00000000dc /* srak */ : 0xeb000000000a /* srag */;
|
||||
|
@ -2644,6 +2788,47 @@ static sljit_s32 sljit_emit_shift(struct sljit_compiler *compiler, sljit_s32 op,
|
|||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static sljit_s32 sljit_emit_rotate(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 dst,
|
||||
sljit_s32 src1, sljit_sw src1w,
|
||||
sljit_s32 src2, sljit_sw src2w)
|
||||
{
|
||||
sljit_gpr dst_r = FAST_IS_REG(dst) ? gpr(dst & REG_MASK) : tmp0;
|
||||
sljit_gpr src_r = tmp0;
|
||||
sljit_gpr base_r = tmp0;
|
||||
sljit_ins imm = 0;
|
||||
sljit_ins ins;
|
||||
|
||||
if (FAST_IS_REG(src1))
|
||||
src_r = gpr(src1);
|
||||
else
|
||||
FAIL_IF(emit_move(compiler, tmp0, src1, src1w));
|
||||
|
||||
if (!(src2 & SLJIT_IMM)) {
|
||||
if (FAST_IS_REG(src2))
|
||||
base_r = gpr(src2);
|
||||
else {
|
||||
FAIL_IF(emit_move(compiler, tmp1, src2, src2w));
|
||||
base_r = tmp1;
|
||||
}
|
||||
}
|
||||
|
||||
if (GET_OPCODE(op) == SLJIT_ROTR) {
|
||||
if (!(src2 & SLJIT_IMM)) {
|
||||
ins = (op & SLJIT_32) ? 0x1300 /* lcr */ : 0xb9030000 /* lcgr */;
|
||||
FAIL_IF(push_inst(compiler, ins | R4A(tmp1) | R0A(base_r)));
|
||||
base_r = tmp1;
|
||||
} else
|
||||
src2w = -src2w;
|
||||
}
|
||||
|
||||
if (src2 & SLJIT_IMM)
|
||||
imm = (sljit_ins)(src2w & ((op & SLJIT_32) ? 0x1f : 0x3f));
|
||||
|
||||
ins = (op & SLJIT_32) ? 0xeb000000001d /* rll */ : 0xeb000000001c /* rllg */;
|
||||
return push_inst(compiler, ins | R36A(dst_r) | R32A(src_r) | R28A(base_r) | (imm << 16));
|
||||
}
|
||||
|
||||
static const struct ins_forms addc_forms = {
|
||||
0xb9980000, /* alcr */
|
||||
0xb9880000, /* alcgr */
|
||||
|
@ -2716,10 +2901,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
|
|||
FAIL_IF(sljit_emit_bitwise(compiler, op, dst, src1, src1w, src2, src2w));
|
||||
break;
|
||||
case SLJIT_SHL:
|
||||
case SLJIT_MSHL:
|
||||
case SLJIT_LSHR:
|
||||
case SLJIT_MLSHR:
|
||||
case SLJIT_ASHR:
|
||||
case SLJIT_MASHR:
|
||||
FAIL_IF(sljit_emit_shift(compiler, op, dst, src1, src1w, src2, src2w));
|
||||
break;
|
||||
case SLJIT_ROTL:
|
||||
case SLJIT_ROTR:
|
||||
FAIL_IF(sljit_emit_rotate(compiler, op, dst, src1, src1w, src2, src2w));
|
||||
break;
|
||||
}
|
||||
|
||||
if (dst & SLJIT_MEM)
|
||||
|
@ -2734,18 +2926,130 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, op, (sljit_s32)tmp0, 0, src1, src1w, src2, src2w);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 src_dst,
|
||||
sljit_s32 src1, sljit_sw src1w,
|
||||
sljit_s32 src2, sljit_sw src2w)
|
||||
{
|
||||
sljit_s32 is_right;
|
||||
sljit_sw bit_length = (op & SLJIT_32) ? 32 : 64;
|
||||
sljit_gpr src_dst_r = gpr(src_dst);
|
||||
sljit_gpr src1_r = tmp0;
|
||||
sljit_gpr src2_r = tmp1;
|
||||
sljit_ins ins;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w));
|
||||
|
||||
is_right = (GET_OPCODE(op) == SLJIT_LSHR || GET_OPCODE(op) == SLJIT_MLSHR);
|
||||
|
||||
if (src_dst == src1) {
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, (is_right ? SLJIT_ROTR : SLJIT_ROTL) | (op & SLJIT_32), src_dst, 0, src_dst, 0, src2, src2w);
|
||||
}
|
||||
|
||||
ADJUST_LOCAL_OFFSET(src1, src1w);
|
||||
ADJUST_LOCAL_OFFSET(src2, src2w);
|
||||
|
||||
if (src1 & SLJIT_MEM)
|
||||
FAIL_IF(load_word(compiler, tmp0, src1, src1w, op & SLJIT_32));
|
||||
else if (src1 & SLJIT_IMM)
|
||||
FAIL_IF(push_load_imm_inst(compiler, tmp0, src1w));
|
||||
else
|
||||
src1_r = gpr(src1);
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
src2w &= bit_length - 1;
|
||||
|
||||
if (src2w == 0)
|
||||
return SLJIT_SUCCESS;
|
||||
} else if (!(src2 & SLJIT_MEM))
|
||||
src2_r = gpr(src2);
|
||||
else
|
||||
FAIL_IF(load_word(compiler, tmp1, src2, src2w, op & SLJIT_32));
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
if (op & SLJIT_32) {
|
||||
ins = is_right ? 0x88000000 /* srl */ : 0x89000000 /* sll */;
|
||||
FAIL_IF(push_inst(compiler, ins | R20A(src_dst_r) | (sljit_ins)src2w));
|
||||
} else {
|
||||
ins = is_right ? 0xeb000000000c /* srlg */ : 0xeb000000000d /* sllg */;
|
||||
FAIL_IF(push_inst(compiler, ins | R36A(src_dst_r) | R32A(src_dst_r) | ((sljit_ins)src2w << 16)));
|
||||
}
|
||||
|
||||
ins = 0xec0000000055 /* risbg */;
|
||||
|
||||
if (is_right) {
|
||||
src2w = bit_length - src2w;
|
||||
ins |= ((sljit_ins)(64 - bit_length) << 24) | ((sljit_ins)(63 - src2w) << 16) | ((sljit_ins)src2w << 8);
|
||||
} else
|
||||
ins |= ((sljit_ins)(64 - src2w) << 24) | ((sljit_ins)63 << 16) | ((sljit_ins)src2w << 8);
|
||||
|
||||
return push_inst(compiler, ins | R36A(src_dst_r) | R32A(src1_r));
|
||||
}
|
||||
|
||||
if (op & SLJIT_32) {
|
||||
if (GET_OPCODE(op) == SLJIT_MSHL || GET_OPCODE(op) == SLJIT_MLSHR) {
|
||||
if (src2_r != tmp1) {
|
||||
FAIL_IF(push_inst(compiler, 0xec0000000055 /* risbg */ | R36A(tmp1) | R32A(src2_r) | (59 << 24) | (1 << 23) | (63 << 16)));
|
||||
src2_r = tmp1;
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, 0xa5070000 /* nill */ | R20A(tmp1) | 0x1f));
|
||||
}
|
||||
|
||||
ins = is_right ? 0x88000000 /* srl */ : 0x89000000 /* sll */;
|
||||
FAIL_IF(push_inst(compiler, ins | R20A(src_dst_r) | R12A(src2_r)));
|
||||
|
||||
if (src2_r != tmp1) {
|
||||
FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | 0x1f));
|
||||
FAIL_IF(push_inst(compiler, 0x1700 /* xr */ | R4A(tmp1) | R0A(src2_r)));
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, 0xc00700000000 /* xilf */ | R36A(tmp1) | 0x1f));
|
||||
|
||||
if (src1_r == tmp0) {
|
||||
ins = is_right ? 0x89000000 /* sll */ : 0x88000000 /* srl */;
|
||||
FAIL_IF(push_inst(compiler, ins | R20A(tmp0) | R12A(tmp1) | 0x1));
|
||||
} else {
|
||||
ins = is_right ? 0xeb00000000df /* sllk */ : 0xeb00000000de /* srlk */;
|
||||
FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src1_r) | R28A(tmp1) | (0x1 << 16)));
|
||||
}
|
||||
|
||||
return push_inst(compiler, 0x1600 /* or */ | R4A(src_dst_r) | R0A(tmp0));
|
||||
}
|
||||
|
||||
ins = is_right ? 0xeb000000000c /* srlg */ : 0xeb000000000d /* sllg */;
|
||||
FAIL_IF(push_inst(compiler, ins | R36A(src_dst_r) | R32A(src_dst_r) | R28A(src2_r)));
|
||||
|
||||
ins = is_right ? 0xeb000000000d /* sllg */ : 0xeb000000000c /* srlg */;
|
||||
|
||||
if (!(op & SLJIT_SHIFT_INTO_NON_ZERO)) {
|
||||
if (src2_r != tmp1)
|
||||
FAIL_IF(push_inst(compiler, 0xa50f0000 /* llill */ | R20A(tmp1) | 0x3f));
|
||||
|
||||
FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src1_r) | (0x1 << 16)));
|
||||
src1_r = tmp0;
|
||||
|
||||
if (src2_r != tmp1)
|
||||
FAIL_IF(push_inst(compiler, 0xb9820000 /* xgr */ | R4A(tmp1) | R0A(src2_r)));
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, 0xc00700000000 /* xilf */ | R36A(tmp1) | 0x3f));
|
||||
} else
|
||||
FAIL_IF(push_inst(compiler, 0xb9030000 /* lcgr */ | R4A(tmp1) | R0A(src2_r)));
|
||||
|
||||
FAIL_IF(push_inst(compiler, ins | R36A(tmp0) | R32A(src1_r) | R28A(tmp1)));
|
||||
return push_inst(compiler, 0xb9810000 /* ogr */ | R4A(src_dst_r) | R0A(tmp0));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(
|
||||
struct sljit_compiler *compiler,
|
||||
sljit_s32 op, sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
sljit_gpr src_r;
|
||||
struct addr addr;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_op_src(compiler, op, src, srcw));
|
||||
|
@ -2759,16 +3063,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(
|
|||
|
||||
return push_inst(compiler, br(src_r));
|
||||
case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN:
|
||||
/* TODO(carenas): implement? */
|
||||
return SLJIT_SUCCESS;
|
||||
case SLJIT_PREFETCH_L1:
|
||||
case SLJIT_PREFETCH_L2:
|
||||
case SLJIT_PREFETCH_L3:
|
||||
case SLJIT_PREFETCH_ONCE:
|
||||
/* TODO(carenas): implement */
|
||||
return SLJIT_SUCCESS;
|
||||
FAIL_IF(make_addr_bxy(compiler, &addr, src, srcw, tmp1));
|
||||
return push_inst(compiler, 0xe31000000036 /* pfd */ | R32A(addr.index) | R28A(addr.base) | disp_s20(addr.offset));
|
||||
default:
|
||||
/* TODO(carenas): probably should not success by default */
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -3064,10 +3366,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
|
|||
ADJUST_LOCAL_OFFSET(dst, dstw);
|
||||
|
||||
if (FAST_IS_REG(dst))
|
||||
return push_inst(compiler, lgr(gpr(dst), fast_link_r));
|
||||
return push_inst(compiler, lgr(gpr(dst), link_r));
|
||||
|
||||
/* memory */
|
||||
return store_word(compiler, fast_link_r, dst, dstw, 0);
|
||||
return store_word(compiler, link_r, dst, dstw, 0);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
@ -3107,7 +3409,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
|
|||
/* emit jump instruction */
|
||||
type &= 0xff;
|
||||
if (type >= SLJIT_FAST_CALL)
|
||||
PTR_FAIL_IF(push_inst(compiler, brasl(type == SLJIT_FAST_CALL ? fast_link_r : link_r, 0)));
|
||||
PTR_FAIL_IF(push_inst(compiler, brasl(link_r, 0)));
|
||||
else
|
||||
PTR_FAIL_IF(push_inst(compiler, brcl(mask, 0)));
|
||||
|
||||
|
@ -3117,19 +3419,16 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
|
|||
SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 arg_types)
|
||||
{
|
||||
SLJIT_UNUSED_ARG(arg_types);
|
||||
CHECK_ERROR_PTR();
|
||||
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler));
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler, r14));
|
||||
type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_jump(compiler, type);
|
||||
}
|
||||
|
||||
|
@ -3151,7 +3450,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
|
|||
|
||||
/* emit jump instruction */
|
||||
if (type >= SLJIT_FAST_CALL)
|
||||
return push_inst(compiler, basr(type == SLJIT_FAST_CALL ? fast_link_r : link_r, src_r));
|
||||
return push_inst(compiler, basr(link_r, src_r));
|
||||
|
||||
return push_inst(compiler, br(src_r));
|
||||
}
|
||||
|
@ -3169,23 +3468,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
|
|||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
FAIL_IF(load_word(compiler, tmp1, src, srcw, 0 /* 64-bit */));
|
||||
src = TMP_REG2;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
FAIL_IF(push_inst(compiler, lgr(tmp1, gpr(src))));
|
||||
src = TMP_REG2;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
FAIL_IF(emit_stack_frame_release(compiler, r14));
|
||||
type = SLJIT_JUMP;
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, type, src, srcw);
|
||||
}
|
||||
|
||||
|
@ -3193,7 +3490,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
sljit_s32 dst, sljit_sw dstw,
|
||||
sljit_s32 type)
|
||||
{
|
||||
sljit_u8 mask = get_cc(compiler, type & 0xff);
|
||||
sljit_u8 mask = get_cc(compiler, type);
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type));
|
||||
|
@ -3263,27 +3560,92 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
|
|||
sljit_s32 dst_reg,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
sljit_u8 mask = get_cc(compiler, type & 0xff);
|
||||
sljit_gpr dst_r = gpr(dst_reg & ~SLJIT_32);
|
||||
sljit_gpr src_r = FAST_IS_REG(src) ? gpr(src) : tmp0;
|
||||
sljit_ins mask = get_cc(compiler, type & ~SLJIT_32);
|
||||
sljit_gpr src_r;
|
||||
sljit_ins ins;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
|
||||
|
||||
if (src & SLJIT_IMM) {
|
||||
/* TODO(mundaym): fast path with lscond2 */
|
||||
FAIL_IF(push_load_imm_inst(compiler, src_r, srcw));
|
||||
if (type & SLJIT_32)
|
||||
srcw = (sljit_s32)srcw;
|
||||
|
||||
if (have_lscond2() && (src & SLJIT_IMM) && is_s16(srcw)) {
|
||||
ins = (type & SLJIT_32) ? 0xec0000000042 /* lochi */ : 0xec0000000046 /* locghi */;
|
||||
return push_inst(compiler, ins | R36A(gpr(dst_reg)) | (mask << 32) | (sljit_ins)(srcw & 0xffff) << 16);
|
||||
}
|
||||
|
||||
#define LEVAL(i) i(dst_r, src_r, mask)
|
||||
if (have_lscond1())
|
||||
return push_inst(compiler,
|
||||
WHEN2(dst_reg & SLJIT_32, locr, locgr));
|
||||
if (src & SLJIT_IMM) {
|
||||
FAIL_IF(push_load_imm_inst(compiler, tmp0, srcw));
|
||||
src_r = tmp0;
|
||||
} else
|
||||
src_r = gpr(src);
|
||||
|
||||
#undef LEVAL
|
||||
if (have_lscond1()) {
|
||||
ins = (type & SLJIT_32) ? 0xb9f20000 /* locr */ : 0xb9e20000 /* locgr */;
|
||||
return push_inst(compiler, ins | (mask << 12) | R4A(gpr(dst_reg)) | R0A(src_r));
|
||||
}
|
||||
|
||||
/* TODO(mundaym): implement */
|
||||
return SLJIT_ERR_UNSUPPORTED;
|
||||
return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 reg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
sljit_ins ins, reg1, reg2, base, offs = 0;
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
|
||||
|
||||
if (!(reg & REG_PAIR_MASK))
|
||||
return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
|
||||
|
||||
ADJUST_LOCAL_OFFSET(mem, memw);
|
||||
|
||||
base = gpr(mem & REG_MASK);
|
||||
reg1 = gpr(REG_PAIR_FIRST(reg));
|
||||
reg2 = gpr(REG_PAIR_SECOND(reg));
|
||||
|
||||
if (mem & OFFS_REG_MASK) {
|
||||
memw &= 0x3;
|
||||
offs = gpr(OFFS_REG(mem));
|
||||
|
||||
if (memw != 0) {
|
||||
FAIL_IF(push_inst(compiler, 0xeb000000000d /* sllg */ | R36A(tmp1) | R32A(offs) | ((sljit_ins)memw << 16)));
|
||||
offs = tmp1;
|
||||
} else if (!(type & SLJIT_MEM_STORE) && (base == reg1 || base == reg2) && (offs == reg1 || offs == reg2)) {
|
||||
FAIL_IF(push_inst(compiler, 0xb9f80000 | R12A(tmp1) | R4A(base) | R0A(offs)));
|
||||
base = tmp1;
|
||||
offs = 0;
|
||||
}
|
||||
|
||||
memw = 0;
|
||||
} else if (memw < -0x80000 || memw > 0x7ffff - ((reg2 == reg1 + 1) ? 0 : SSIZE_OF(sw))) {
|
||||
FAIL_IF(push_load_imm_inst(compiler, tmp1, memw));
|
||||
|
||||
if (base == 0)
|
||||
base = tmp1;
|
||||
else
|
||||
offs = tmp1;
|
||||
|
||||
memw = 0;
|
||||
}
|
||||
|
||||
if (offs == 0 && reg2 == (reg1 + 1)) {
|
||||
ins = (type & SLJIT_MEM_STORE) ? 0xeb0000000024 /* stmg */ : 0xeb0000000004 /* lmg */;
|
||||
return push_inst(compiler, ins | R36A(reg1) | R32A(reg2) | R28A(base) | disp_s20((sljit_s32)memw));
|
||||
}
|
||||
|
||||
ins = ((type & SLJIT_MEM_STORE) ? 0xe30000000024 /* stg */ : 0xe30000000004 /* lg */) | R32A(offs) | R28A(base);
|
||||
|
||||
if (!(type & SLJIT_MEM_STORE) && base == reg1) {
|
||||
FAIL_IF(push_inst(compiler, ins | R36A(reg2) | disp_s20((sljit_s32)memw + SSIZE_OF(sw))));
|
||||
return push_inst(compiler, ins | R36A(reg1) | disp_s20((sljit_s32)memw));
|
||||
}
|
||||
|
||||
FAIL_IF(push_inst(compiler, ins | R36A(reg1) | disp_s20((sljit_s32)memw)));
|
||||
return push_inst(compiler, ins | R36A(reg2) | disp_s20((sljit_s32)memw + SSIZE_OF(sw)));
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
|
283
thirdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
vendored
283
thirdparty/pcre2/src/sljit/sljitNativeSPARC_32.c
vendored
|
@ -1,283 +0,0 @@
|
|||
/*
|
||||
* Stack-less Just-In-Time compiler
|
||||
*
|
||||
* Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification, are
|
||||
* permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||
* conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||
* of conditions and the following disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
static sljit_s32 load_immediate(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw imm)
|
||||
{
|
||||
if (imm <= SIMM_MAX && imm >= SIMM_MIN)
|
||||
return push_inst(compiler, OR | D(dst) | S1(0) | IMM(imm), DR(dst));
|
||||
|
||||
FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((imm >> 10) & 0x3fffff), DR(dst)));
|
||||
return (imm & 0x3ff) ? push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (imm & 0x3ff), DR(dst)) : SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
#define ARG2(flags, src2) ((flags & SRC2_IMM) ? IMM(src2) : S2(src2))
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_single_op(struct sljit_compiler *compiler, sljit_s32 op, sljit_u32 flags,
|
||||
sljit_s32 dst, sljit_s32 src1, sljit_sw src2)
|
||||
{
|
||||
SLJIT_COMPILE_ASSERT(ICC_IS_SET == SET_FLAGS, icc_is_set_and_set_flags_must_be_the_same);
|
||||
|
||||
switch (op) {
|
||||
case SLJIT_MOV:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if (dst != src2)
|
||||
return push_inst(compiler, OR | D(dst) | S1(0) | S2(src2), DR(dst));
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_U8:
|
||||
case SLJIT_MOV_S8:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
if (op == SLJIT_MOV_U8)
|
||||
return push_inst(compiler, AND | D(dst) | S1(src2) | IMM(0xff), DR(dst));
|
||||
FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(24), DR(dst)));
|
||||
return push_inst(compiler, SRA | D(dst) | S1(dst) | IMM(24), DR(dst));
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_MOV_U16:
|
||||
case SLJIT_MOV_S16:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) {
|
||||
FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(16), DR(dst)));
|
||||
return push_inst(compiler, (op == SLJIT_MOV_S16 ? SRA : SRL) | D(dst) | S1(dst) | IMM(16), DR(dst));
|
||||
}
|
||||
SLJIT_ASSERT(dst == src2);
|
||||
return SLJIT_SUCCESS;
|
||||
|
||||
case SLJIT_NOT:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
return push_inst(compiler, XNOR | (flags & SET_FLAGS) | D(dst) | S1(0) | S2(src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_CLZ:
|
||||
SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM));
|
||||
FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(src2) | S2(0), SET_FLAGS));
|
||||
FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2(src2), DR(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, BICC | DA(0x1) | (7 & DISP_MASK), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(32), UNMOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(-1), DR(dst)));
|
||||
|
||||
/* Loop. */
|
||||
FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(0), SET_FLAGS));
|
||||
FAIL_IF(push_inst(compiler, SLL | D(TMP_REG1) | S1(TMP_REG1) | IMM(1), DR(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, BICC | DA(0xe) | ((sljit_ins)-2 & DISP_MASK), UNMOVABLE_INS));
|
||||
return push_inst(compiler, ADD | D(dst) | S1(dst) | IMM(1), UNMOVABLE_INS);
|
||||
|
||||
case SLJIT_ADD:
|
||||
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
|
||||
return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_ADDC:
|
||||
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
|
||||
return push_inst(compiler, ADDC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_SUB:
|
||||
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
|
||||
return push_inst(compiler, SUB | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_SUBC:
|
||||
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
|
||||
return push_inst(compiler, SUBC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_MUL:
|
||||
compiler->status_flags_state = 0;
|
||||
FAIL_IF(push_inst(compiler, SMUL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst)));
|
||||
if (!(flags & SET_FLAGS))
|
||||
return SLJIT_SUCCESS;
|
||||
FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(dst) | IMM(31), DR(TMP_REG1)));
|
||||
FAIL_IF(push_inst(compiler, RDY | D(TMP_LINK), DR(TMP_LINK)));
|
||||
return push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(TMP_LINK), MOVABLE_INS | SET_FLAGS);
|
||||
|
||||
case SLJIT_AND:
|
||||
return push_inst(compiler, AND | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_OR:
|
||||
return push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_XOR:
|
||||
return push_inst(compiler, XOR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DRF(dst, flags));
|
||||
|
||||
case SLJIT_SHL:
|
||||
FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst)));
|
||||
return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS);
|
||||
|
||||
case SLJIT_LSHR:
|
||||
FAIL_IF(push_inst(compiler, SRL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst)));
|
||||
return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS);
|
||||
|
||||
case SLJIT_ASHR:
|
||||
FAIL_IF(push_inst(compiler, SRA | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst)));
|
||||
return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS);
|
||||
}
|
||||
|
||||
SLJIT_UNREACHABLE();
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src)
|
||||
{
|
||||
sljit_s32 reg_index = 8;
|
||||
sljit_s32 word_reg_index = 8;
|
||||
sljit_s32 float_arg_index = 1;
|
||||
sljit_s32 double_arg_count = 0;
|
||||
sljit_u32 float_offset = (16 + 6) * sizeof(sljit_sw);
|
||||
sljit_s32 types = 0;
|
||||
sljit_s32 reg = 0;
|
||||
sljit_s32 move_to_tmp2 = 0;
|
||||
|
||||
if (src)
|
||||
reg = reg_map[*src & REG_MASK];
|
||||
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
|
||||
while (arg_types) {
|
||||
types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
|
||||
|
||||
switch (arg_types & SLJIT_ARG_MASK) {
|
||||
case SLJIT_ARG_TYPE_F64:
|
||||
float_arg_index++;
|
||||
double_arg_count++;
|
||||
if (reg_index == reg || reg_index + 1 == reg)
|
||||
move_to_tmp2 = 1;
|
||||
reg_index += 2;
|
||||
break;
|
||||
case SLJIT_ARG_TYPE_F32:
|
||||
float_arg_index++;
|
||||
if (reg_index == reg)
|
||||
move_to_tmp2 = 1;
|
||||
reg_index++;
|
||||
break;
|
||||
default:
|
||||
if (reg_index != word_reg_index && reg_index == reg)
|
||||
move_to_tmp2 = 1;
|
||||
reg_index++;
|
||||
word_reg_index++;
|
||||
break;
|
||||
}
|
||||
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
}
|
||||
|
||||
if (move_to_tmp2) {
|
||||
if (reg < 14)
|
||||
FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2A(reg), DR(TMP_REG1)));
|
||||
*src = TMP_REG1;
|
||||
}
|
||||
|
||||
arg_types = types;
|
||||
|
||||
while (arg_types) {
|
||||
switch (arg_types & SLJIT_ARG_MASK) {
|
||||
case SLJIT_ARG_TYPE_F64:
|
||||
float_arg_index--;
|
||||
if (float_arg_index == 4 && double_arg_count == 4) {
|
||||
/* The address is not doubleword aligned, so two instructions are required to store the double. */
|
||||
FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM((16 + 7) * sizeof(sljit_sw)), MOVABLE_INS));
|
||||
FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | (1 << 25) | S1(SLJIT_SP) | IMM((16 + 8) * sizeof(sljit_sw)), MOVABLE_INS));
|
||||
}
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, STDF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
|
||||
float_offset -= sizeof(sljit_f64);
|
||||
break;
|
||||
case SLJIT_ARG_TYPE_F32:
|
||||
float_arg_index--;
|
||||
FAIL_IF(push_inst(compiler, STF | FD(float_arg_index) | S1(SLJIT_SP) | IMM(float_offset), MOVABLE_INS));
|
||||
float_offset -= sizeof(sljit_f64);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
arg_types >>= SLJIT_ARG_SHIFT;
|
||||
}
|
||||
|
||||
float_offset = (16 + 6) * sizeof(sljit_sw);
|
||||
|
||||
while (types) {
|
||||
switch (types & SLJIT_ARG_MASK) {
|
||||
case SLJIT_ARG_TYPE_F64:
|
||||
reg_index -= 2;
|
||||
if (reg_index < 14) {
|
||||
if ((reg_index & 0x1) != 0) {
|
||||
FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
|
||||
if (reg_index < 8 + 6 - 1)
|
||||
FAIL_IF(push_inst(compiler, LDUW | DA(reg_index + 1) | S1(SLJIT_SP) | IMM(float_offset + sizeof(sljit_sw)), reg_index + 1));
|
||||
}
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, LDD | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
|
||||
}
|
||||
float_offset -= sizeof(sljit_f64);
|
||||
break;
|
||||
case SLJIT_ARG_TYPE_F32:
|
||||
reg_index--;
|
||||
if (reg_index < 8 + 6)
|
||||
FAIL_IF(push_inst(compiler, LDUW | DA(reg_index) | S1(SLJIT_SP) | IMM(float_offset), reg_index));
|
||||
float_offset -= sizeof(sljit_f64);
|
||||
break;
|
||||
default:
|
||||
reg_index--;
|
||||
word_reg_index--;
|
||||
|
||||
if (reg_index != word_reg_index) {
|
||||
if (reg_index < 14)
|
||||
FAIL_IF(push_inst(compiler, OR | DA(reg_index) | S1(0) | S2A(word_reg_index), reg_index));
|
||||
else
|
||||
FAIL_IF(push_inst(compiler, STW | DA(word_reg_index) | S1(SLJIT_SP) | IMM(92), word_reg_index));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
types >>= SLJIT_ARG_SHIFT;
|
||||
}
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE sljit_s32 emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw init_value)
|
||||
{
|
||||
FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((init_value >> 10) & 0x3fffff), DR(dst)));
|
||||
return push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (init_value & 0x3ff), DR(dst));
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset)
|
||||
{
|
||||
sljit_ins *inst = (sljit_ins *)addr;
|
||||
SLJIT_UNUSED_ARG(executable_offset);
|
||||
|
||||
SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 0);
|
||||
SLJIT_ASSERT(((inst[0] & 0xc1c00000) == 0x01000000) && ((inst[1] & 0xc1f82000) == 0x80102000));
|
||||
inst[0] = (inst[0] & 0xffc00000) | ((new_target >> 10) & 0x3fffff);
|
||||
inst[1] = (inst[1] & 0xfffffc00) | (new_target & 0x3ff);
|
||||
SLJIT_UPDATE_WX_FLAGS(inst, inst + 2, 1);
|
||||
inst = (sljit_ins *)SLJIT_ADD_EXEC_OFFSET(inst, executable_offset);
|
||||
SLJIT_CACHE_FLUSH(inst, inst + 2);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset)
|
||||
{
|
||||
sljit_set_jump_addr(addr, (sljit_uw)new_constant, executable_offset);
|
||||
}
|
1673
thirdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
vendored
1673
thirdparty/pcre2/src/sljit/sljitNativeSPARC_common.c
vendored
File diff suppressed because it is too large
Load diff
1029
thirdparty/pcre2/src/sljit/sljitNativeX86_32.c
vendored
1029
thirdparty/pcre2/src/sljit/sljitNativeX86_32.c
vendored
File diff suppressed because it is too large
Load diff
234
thirdparty/pcre2/src/sljit/sljitNativeX86_64.c
vendored
234
thirdparty/pcre2/src/sljit/sljitNativeX86_64.c
vendored
|
@ -101,34 +101,38 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw
|
|||
/* Calculate size of b. */
|
||||
inst_size += 1; /* mod r/m byte. */
|
||||
if (b & SLJIT_MEM) {
|
||||
if (!(b & OFFS_REG_MASK)) {
|
||||
if (NOT_HALFWORD(immb)) {
|
||||
PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb));
|
||||
immb = 0;
|
||||
if (b & REG_MASK)
|
||||
b |= TO_OFFS_REG(TMP_REG2);
|
||||
else
|
||||
b |= TMP_REG2;
|
||||
}
|
||||
else if (reg_lmap[b & REG_MASK] == 4)
|
||||
b |= TO_OFFS_REG(SLJIT_SP);
|
||||
if (!(b & OFFS_REG_MASK) && NOT_HALFWORD(immb)) {
|
||||
PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb));
|
||||
immb = 0;
|
||||
if (b & REG_MASK)
|
||||
b |= TO_OFFS_REG(TMP_REG2);
|
||||
else
|
||||
b |= TMP_REG2;
|
||||
}
|
||||
|
||||
if (!(b & REG_MASK))
|
||||
inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */
|
||||
else {
|
||||
if (reg_map[b & REG_MASK] >= 8)
|
||||
rex |= REX_B;
|
||||
|
||||
if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) {
|
||||
if (immb != 0 && !(b & OFFS_REG_MASK)) {
|
||||
/* Immediate operand. */
|
||||
if (immb <= 127 && immb >= -128)
|
||||
inst_size += sizeof(sljit_s8);
|
||||
else
|
||||
inst_size += sizeof(sljit_s32);
|
||||
}
|
||||
else if (reg_lmap[b & REG_MASK] == 5)
|
||||
inst_size += sizeof(sljit_s8);
|
||||
else if (reg_lmap[b & REG_MASK] == 5) {
|
||||
/* Swap registers if possible. */
|
||||
if ((b & OFFS_REG_MASK) && (immb & 0x3) == 0 && reg_lmap[OFFS_REG(b)] != 5)
|
||||
b = SLJIT_MEM | OFFS_REG(b) | TO_OFFS_REG(b & REG_MASK);
|
||||
else
|
||||
inst_size += sizeof(sljit_s8);
|
||||
}
|
||||
|
||||
if (reg_map[b & REG_MASK] >= 8)
|
||||
rex |= REX_B;
|
||||
|
||||
if (reg_lmap[b & REG_MASK] == 4 && !(b & OFFS_REG_MASK))
|
||||
b |= TO_OFFS_REG(SLJIT_SP);
|
||||
|
||||
if (b & OFFS_REG_MASK) {
|
||||
inst_size += 1; /* SIB byte. */
|
||||
|
@ -153,9 +157,9 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw
|
|||
inst_size += 4;
|
||||
}
|
||||
else if (flags & EX86_SHIFT_INS) {
|
||||
imma &= compiler->mode32 ? 0x1f : 0x3f;
|
||||
SLJIT_ASSERT(imma <= (compiler->mode32 ? 0x1f : 0x3f));
|
||||
if (imma != 1) {
|
||||
inst_size ++;
|
||||
inst_size++;
|
||||
flags |= EX86_BYTE_ARG;
|
||||
}
|
||||
} else if (flags & EX86_BYTE_ARG)
|
||||
|
@ -223,7 +227,7 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw
|
|||
} else if (b & REG_MASK) {
|
||||
reg_lmap_b = reg_lmap[b & REG_MASK];
|
||||
|
||||
if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP) || reg_lmap_b == 5) {
|
||||
if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
|
||||
if (immb != 0 || reg_lmap_b == 5) {
|
||||
if (immb <= 127 && immb >= -128)
|
||||
*buf_ptr |= 0x40;
|
||||
|
@ -248,8 +252,14 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw
|
|||
}
|
||||
}
|
||||
else {
|
||||
if (reg_lmap_b == 5)
|
||||
*buf_ptr |= 0x40;
|
||||
|
||||
*buf_ptr++ |= 0x04;
|
||||
*buf_ptr++ = U8(reg_lmap_b | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6));
|
||||
|
||||
if (reg_lmap_b == 5)
|
||||
*buf_ptr++ = 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -366,7 +376,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
{
|
||||
sljit_uw size;
|
||||
sljit_s32 word_arg_count = 0;
|
||||
sljit_s32 saved_arg_count = 0;
|
||||
sljit_s32 saved_arg_count = SLJIT_KEPT_SAVEDS_COUNT(options);
|
||||
sljit_s32 saved_regs_size, tmp, i;
|
||||
#ifdef _WIN64
|
||||
sljit_s32 saved_float_regs_size;
|
||||
|
@ -379,16 +389,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
|
|||
CHECK(check_sljit_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
|
||||
set_emit_enter(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
|
||||
|
||||
if (options & SLJIT_ENTER_REG_ARG)
|
||||
arg_types = 0;
|
||||
|
||||
/* Emit ENDBR64 at function entry if needed. */
|
||||
FAIL_IF(emit_endbranch(compiler));
|
||||
|
||||
compiler->mode32 = 0;
|
||||
|
||||
/* Including the return address saved by the call instruction. */
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - saved_arg_count, 1);
|
||||
|
||||
tmp = SLJIT_S0 - saveds;
|
||||
for (i = SLJIT_S0; i > tmp; i--) {
|
||||
for (i = SLJIT_S0 - saved_arg_count; i > tmp; i--) {
|
||||
size = reg_map[i] >= 8 ? 2 : 1;
|
||||
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
|
||||
FAIL_IF(!inst);
|
||||
|
@ -561,15 +574,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
|
|||
#endif /* _WIN64 */
|
||||
|
||||
/* Including the return address saved by the call instruction. */
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
|
||||
saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1);
|
||||
compiler->local_size = ((local_size + saved_regs_size + 0xf) & ~0xf) - saved_regs_size;
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
||||
static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit_s32 is_return_to)
|
||||
{
|
||||
sljit_uw size;
|
||||
sljit_s32 i, tmp;
|
||||
sljit_s32 local_size, i, tmp;
|
||||
sljit_u8 *inst;
|
||||
#ifdef _WIN64
|
||||
sljit_s32 saved_float_regs_offset;
|
||||
|
@ -598,30 +611,21 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
*inst = MOVAPS_x_xm;
|
||||
saved_float_regs_offset += 16;
|
||||
}
|
||||
|
||||
compiler->mode32 = 0;
|
||||
}
|
||||
#endif /* _WIN64 */
|
||||
|
||||
if (compiler->local_size > 0) {
|
||||
if (compiler->local_size <= 127) {
|
||||
inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
|
||||
FAIL_IF(!inst);
|
||||
INC_SIZE(4);
|
||||
*inst++ = REX_W;
|
||||
*inst++ = GROUP_BINARY_83;
|
||||
*inst++ = MOD_REG | ADD | 4;
|
||||
*inst = U8(compiler->local_size);
|
||||
}
|
||||
else {
|
||||
inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
|
||||
FAIL_IF(!inst);
|
||||
INC_SIZE(7);
|
||||
*inst++ = REX_W;
|
||||
*inst++ = GROUP_BINARY_81;
|
||||
*inst++ = MOD_REG | ADD | 4;
|
||||
sljit_unaligned_store_s32(inst, compiler->local_size);
|
||||
}
|
||||
local_size = compiler->local_size;
|
||||
|
||||
if (is_return_to && compiler->scratches < SLJIT_FIRST_SAVED_REG && (compiler->saveds == SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
local_size += SSIZE_OF(sw);
|
||||
is_return_to = 0;
|
||||
}
|
||||
|
||||
if (local_size > 0)
|
||||
BINARY_IMM32(ADD, local_size, SLJIT_SP, 0);
|
||||
|
||||
tmp = compiler->scratches;
|
||||
for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) {
|
||||
size = reg_map[i] >= 8 ? 2 : 1;
|
||||
|
@ -633,8 +637,8 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
POP_REG(reg_lmap[i]);
|
||||
}
|
||||
|
||||
tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
|
||||
for (i = tmp; i <= SLJIT_S0; i++) {
|
||||
tmp = SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options);
|
||||
for (i = SLJIT_S0 + 1 - compiler->saveds; i <= tmp; i++) {
|
||||
size = reg_map[i] >= 8 ? 2 : 1;
|
||||
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
|
||||
FAIL_IF(!inst);
|
||||
|
@ -644,6 +648,9 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
|
|||
POP_REG(reg_lmap[i]);
|
||||
}
|
||||
|
||||
if (is_return_to)
|
||||
BINARY_IMM32(ADD, sizeof(sljit_sw), SLJIT_SP, 0);
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -654,7 +661,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler
|
|||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_void(compiler));
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
compiler->mode32 = 0;
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
|
||||
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
|
||||
FAIL_IF(!inst);
|
||||
|
@ -663,6 +672,28 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler
|
|||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_to(struct sljit_compiler *compiler,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_return_to(compiler, src, srcw));
|
||||
|
||||
compiler->mode32 = 0;
|
||||
|
||||
if ((src & SLJIT_MEM) || (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options)))) {
|
||||
ADJUST_LOCAL_OFFSET(src, srcw);
|
||||
|
||||
EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
|
||||
src = TMP_REG2;
|
||||
srcw = 0;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 1));
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, SLJIT_JUMP, src, srcw);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Call / return instructions */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
@ -786,17 +817,15 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
|
|||
|
||||
compiler->mode32 = 0;
|
||||
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG)
|
||||
PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler));
|
||||
PTR_FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_jump(compiler, type);
|
||||
}
|
||||
|
||||
|
@ -816,22 +845,21 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
|
|||
}
|
||||
|
||||
if (type & SLJIT_CALL_RETURN) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) {
|
||||
if (src >= SLJIT_FIRST_SAVED_REG && src <= (SLJIT_S0 - SLJIT_KEPT_SAVEDS_COUNT(compiler->options))) {
|
||||
EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
|
||||
src = TMP_REG2;
|
||||
}
|
||||
|
||||
FAIL_IF(emit_stack_frame_release(compiler));
|
||||
type = SLJIT_JUMP;
|
||||
FAIL_IF(emit_stack_frame_release(compiler, 0));
|
||||
}
|
||||
|
||||
FAIL_IF(call_with_args(compiler, arg_types, &src));
|
||||
if ((type & 0xff) != SLJIT_CALL_REG_ARG)
|
||||
FAIL_IF(call_with_args(compiler, arg_types, &src));
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
if (type & SLJIT_CALL_RETURN)
|
||||
type = SLJIT_JUMP;
|
||||
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_ijump(compiler, type, src, srcw);
|
||||
}
|
||||
|
||||
|
@ -907,9 +935,89 @@ static sljit_s32 emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src
|
|||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Extend input */
|
||||
/* Other operations */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
|
||||
sljit_s32 reg,
|
||||
sljit_s32 mem, sljit_sw memw)
|
||||
{
|
||||
sljit_u8* inst;
|
||||
sljit_s32 i, next, reg_idx;
|
||||
sljit_u8 regs[2];
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_mem(compiler, type, reg, mem, memw));
|
||||
|
||||
if (!(reg & REG_PAIR_MASK))
|
||||
return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
|
||||
|
||||
ADJUST_LOCAL_OFFSET(mem, memw);
|
||||
|
||||
compiler->mode32 = 0;
|
||||
|
||||
if ((mem & REG_MASK) == 0) {
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, memw);
|
||||
|
||||
mem = SLJIT_MEM1(TMP_REG1);
|
||||
memw = 0;
|
||||
} else if (!(mem & OFFS_REG_MASK) && ((memw < HALFWORD_MIN) || (memw > HALFWORD_MAX - SSIZE_OF(sw)))) {
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, memw);
|
||||
|
||||
mem = SLJIT_MEM2(mem & REG_MASK, TMP_REG1);
|
||||
memw = 0;
|
||||
}
|
||||
|
||||
regs[0] = U8(REG_PAIR_FIRST(reg));
|
||||
regs[1] = U8(REG_PAIR_SECOND(reg));
|
||||
|
||||
next = SSIZE_OF(sw);
|
||||
|
||||
if (!(type & SLJIT_MEM_STORE) && (regs[0] == (mem & REG_MASK) || regs[0] == OFFS_REG(mem))) {
|
||||
if (regs[1] == (mem & REG_MASK) || regs[1] == OFFS_REG(mem)) {
|
||||
/* Base and offset cannot be TMP_REG1. */
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, OFFS_REG(mem), 0);
|
||||
|
||||
if (regs[1] == OFFS_REG(mem))
|
||||
next = -SSIZE_OF(sw);
|
||||
|
||||
mem = (mem & ~OFFS_REG_MASK) | TO_OFFS_REG(TMP_REG1);
|
||||
} else {
|
||||
next = -SSIZE_OF(sw);
|
||||
|
||||
if (!(mem & OFFS_REG_MASK))
|
||||
memw += SSIZE_OF(sw);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
reg_idx = next > 0 ? i : (i ^ 0x1);
|
||||
reg = regs[reg_idx];
|
||||
|
||||
if ((mem & OFFS_REG_MASK) && (reg_idx == 1)) {
|
||||
inst = (sljit_u8*)ensure_buf(compiler, (sljit_uw)(1 + 5));
|
||||
FAIL_IF(!inst);
|
||||
|
||||
INC_SIZE(5);
|
||||
|
||||
inst[0] = U8(REX_W | ((reg_map[reg] >= 8) ? REX_R : 0) | ((reg_map[mem & REG_MASK] >= 8) ? REX_B : 0) | ((reg_map[OFFS_REG(mem)] >= 8) ? REX_X : 0));
|
||||
inst[1] = (type & SLJIT_MEM_STORE) ? MOV_rm_r : MOV_r_rm;
|
||||
inst[2] = 0x44 | U8(reg_lmap[reg] << 3);
|
||||
inst[3] = U8(memw << 6) | U8(reg_lmap[OFFS_REG(mem)] << 3) | reg_lmap[mem & REG_MASK];
|
||||
inst[4] = sizeof(sljit_sw);
|
||||
} else if (type & SLJIT_MEM_STORE) {
|
||||
EMIT_MOV(compiler, mem, memw, reg, 0);
|
||||
} else {
|
||||
EMIT_MOV(compiler, reg, 0, mem, memw);
|
||||
}
|
||||
|
||||
if (!(mem & OFFS_REG_MASK))
|
||||
memw += next;
|
||||
}
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign,
|
||||
sljit_s32 dst, sljit_sw dstw,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
|
|
543
thirdparty/pcre2/src/sljit/sljitNativeX86_common.c
vendored
543
thirdparty/pcre2/src/sljit/sljitNativeX86_common.c
vendored
|
@ -26,11 +26,7 @@
|
|||
|
||||
SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
|
||||
{
|
||||
#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
|
||||
return "x86" SLJIT_CPUINFO " ABI:fastcall";
|
||||
#else
|
||||
return "x86" SLJIT_CPUINFO;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -78,10 +74,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = {
|
|||
|
||||
#define CHECK_EXTRA_REGS(p, w, do) \
|
||||
if (p >= SLJIT_R3 && p <= SLJIT_S3) { \
|
||||
if (p <= compiler->scratches) \
|
||||
w = compiler->scratches_offset + ((p) - SLJIT_R3) * SSIZE_OF(sw); \
|
||||
else \
|
||||
w = compiler->locals_offset + ((p) - SLJIT_S2) * SSIZE_OF(sw); \
|
||||
w = (2 * SSIZE_OF(sw)) + ((p) - SLJIT_R3) * SSIZE_OF(sw); \
|
||||
p = SLJIT_MEM1(SLJIT_SP); \
|
||||
do; \
|
||||
}
|
||||
|
@ -181,6 +174,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
#define AND_rm_r 0x21
|
||||
#define ANDPD_x_xm 0x54
|
||||
#define BSR_r_rm (/* GROUP_0F */ 0xbd)
|
||||
#define BSF_r_rm (/* GROUP_0F */ 0xbc)
|
||||
#define CALL_i32 0xe8
|
||||
#define CALL_rm (/* GROUP_FF */ 2 << 3)
|
||||
#define CDQ 0x99
|
||||
|
@ -194,6 +188,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
#define CVTTSD2SI_r_xm 0x2c
|
||||
#define DIV (/* GROUP_F7 */ 6 << 3)
|
||||
#define DIVSD_x_xm 0x5e
|
||||
#define FLDS 0xd9
|
||||
#define FLDL 0xdd
|
||||
#define FSTPS 0xd9
|
||||
#define FSTPD 0xdd
|
||||
#define INT3 0xcc
|
||||
|
@ -209,6 +205,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
#define JMP_rm (/* GROUP_FF */ 4 << 3)
|
||||
#define LEA_r_m 0x8d
|
||||
#define LOOP_i8 0xe2
|
||||
#define LZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbd)
|
||||
#define MOV_r_rm 0x8b
|
||||
#define MOV_r_i32 0xb8
|
||||
#define MOV_rm_r 0x89
|
||||
|
@ -242,6 +239,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
#define PUSH_r 0x50
|
||||
#define PUSH_rm (/* GROUP_FF */ 6 << 3)
|
||||
#define PUSHF 0x9c
|
||||
#define ROL (/* SHIFT */ 0 << 3)
|
||||
#define ROR (/* SHIFT */ 1 << 3)
|
||||
#define RET_near 0xc3
|
||||
#define RET_i16 0xc2
|
||||
#define SBB (/* BINARY */ 3 << 3)
|
||||
|
@ -250,6 +249,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
#define SBB_rm_r 0x19
|
||||
#define SAR (/* SHIFT */ 7 << 3)
|
||||
#define SHL (/* SHIFT */ 4 << 3)
|
||||
#define SHLD (/* GROUP_0F */ 0xa5)
|
||||
#define SHRD (/* GROUP_0F */ 0xad)
|
||||
#define SHR (/* SHIFT */ 5 << 3)
|
||||
#define SUB (/* BINARY */ 5 << 3)
|
||||
#define SUB_EAX_i32 0x2d
|
||||
|
@ -258,6 +259,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
#define SUBSD_x_xm 0x5c
|
||||
#define TEST_EAX_i32 0xa9
|
||||
#define TEST_rm_r 0x85
|
||||
#define TZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbc)
|
||||
#define UCOMISD_x_xm 0x2e
|
||||
#define UNPCKLPD_x_xm 0x14
|
||||
#define XCHG_EAX_r 0x90
|
||||
|
@ -269,6 +271,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
#define XORPD_x_xm 0x57
|
||||
|
||||
#define GROUP_0F 0x0f
|
||||
#define GROUP_F3 0xf3
|
||||
#define GROUP_F7 0xf7
|
||||
#define GROUP_FF 0xff
|
||||
#define GROUP_BINARY_81 0x81
|
||||
|
@ -290,10 +293,15 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
|
|||
/* Multithreading does not affect these static variables, since they store
|
||||
built-in CPU features. Therefore they can be overwritten by different threads
|
||||
if they detect the CPU features in the same time. */
|
||||
#define CPU_FEATURE_DETECTED 0x001
|
||||
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
|
||||
static sljit_s32 cpu_has_sse2 = -1;
|
||||
#define CPU_FEATURE_SSE2 0x002
|
||||
#endif
|
||||
static sljit_s32 cpu_has_cmov = -1;
|
||||
#define CPU_FEATURE_LZCNT 0x004
|
||||
#define CPU_FEATURE_TZCNT 0x008
|
||||
#define CPU_FEATURE_CMOV 0x010
|
||||
|
||||
static sljit_u32 cpu_feature_list = 0;
|
||||
|
||||
#ifdef _WIN32_WCE
|
||||
#include <cmnintrin.h>
|
||||
|
@ -326,17 +334,64 @@ static SLJIT_INLINE void sljit_unaligned_store_sw(void *addr, sljit_sw value)
|
|||
|
||||
static void get_cpu_features(void)
|
||||
{
|
||||
sljit_u32 features;
|
||||
sljit_u32 feature_list = CPU_FEATURE_DETECTED;
|
||||
sljit_u32 value;
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER >= 1400
|
||||
|
||||
int CPUInfo[4];
|
||||
|
||||
__cpuid(CPUInfo, 0);
|
||||
if (CPUInfo[0] >= 7) {
|
||||
__cpuidex(CPUInfo, 7, 0);
|
||||
if (CPUInfo[1] & 0x8)
|
||||
feature_list |= CPU_FEATURE_TZCNT;
|
||||
}
|
||||
|
||||
__cpuid(CPUInfo, (int)0x80000001);
|
||||
if (CPUInfo[2] & 0x20)
|
||||
feature_list |= CPU_FEATURE_LZCNT;
|
||||
|
||||
__cpuid(CPUInfo, 1);
|
||||
features = (sljit_u32)CPUInfo[3];
|
||||
value = (sljit_u32)CPUInfo[3];
|
||||
|
||||
#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C)
|
||||
|
||||
/* AT&T syntax. */
|
||||
__asm__ (
|
||||
"movl $0x0, %%eax\n"
|
||||
"lzcnt %%eax, %%eax\n"
|
||||
"setnz %%al\n"
|
||||
"movl %%eax, %0\n"
|
||||
: "=g" (value)
|
||||
:
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
: "eax"
|
||||
#else
|
||||
: "rax"
|
||||
#endif
|
||||
);
|
||||
|
||||
if (value & 0x1)
|
||||
feature_list |= CPU_FEATURE_LZCNT;
|
||||
|
||||
__asm__ (
|
||||
"movl $0x0, %%eax\n"
|
||||
"tzcnt %%eax, %%eax\n"
|
||||
"setnz %%al\n"
|
||||
"movl %%eax, %0\n"
|
||||
: "=g" (value)
|
||||
:
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
: "eax"
|
||||
#else
|
||||
: "rax"
|
||||
#endif
|
||||
);
|
||||
|
||||
if (value & 0x1)
|
||||
feature_list |= CPU_FEATURE_TZCNT;
|
||||
|
||||
__asm__ (
|
||||
"movl $0x1, %%eax\n"
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
|
@ -349,7 +404,7 @@ static void get_cpu_features(void)
|
|||
"pop %%ebx\n"
|
||||
#endif
|
||||
"movl %%edx, %0\n"
|
||||
: "=g" (features)
|
||||
: "=g" (value)
|
||||
:
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
: "%eax", "%ecx", "%edx"
|
||||
|
@ -361,47 +416,83 @@ static void get_cpu_features(void)
|
|||
#else /* _MSC_VER && _MSC_VER >= 1400 */
|
||||
|
||||
/* Intel syntax. */
|
||||
__asm {
|
||||
mov eax, 0
|
||||
lzcnt eax, eax
|
||||
setnz al
|
||||
mov value, eax
|
||||
}
|
||||
|
||||
if (value & 0x1)
|
||||
feature_list |= CPU_FEATURE_LZCNT;
|
||||
|
||||
__asm {
|
||||
mov eax, 0
|
||||
tzcnt eax, eax
|
||||
setnz al
|
||||
mov value, eax
|
||||
}
|
||||
|
||||
if (value & 0x1)
|
||||
feature_list |= CPU_FEATURE_TZCNT;
|
||||
|
||||
__asm {
|
||||
mov eax, 1
|
||||
cpuid
|
||||
mov features, edx
|
||||
mov value, edx
|
||||
}
|
||||
|
||||
#endif /* _MSC_VER && _MSC_VER >= 1400 */
|
||||
|
||||
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
|
||||
cpu_has_sse2 = (features >> 26) & 0x1;
|
||||
if (value & 0x4000000)
|
||||
feature_list |= CPU_FEATURE_SSE2;
|
||||
#endif
|
||||
cpu_has_cmov = (features >> 15) & 0x1;
|
||||
if (value & 0x8000)
|
||||
feature_list |= CPU_FEATURE_CMOV;
|
||||
|
||||
cpu_feature_list = feature_list;
|
||||
}
|
||||
|
||||
static sljit_u8 get_jump_code(sljit_uw type)
|
||||
{
|
||||
switch (type) {
|
||||
case SLJIT_EQUAL:
|
||||
case SLJIT_EQUAL_F64:
|
||||
case SLJIT_F_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_EQUAL:
|
||||
case SLJIT_ORDERED_EQUAL: /* Not supported. */
|
||||
return 0x84 /* je */;
|
||||
|
||||
case SLJIT_NOT_EQUAL:
|
||||
case SLJIT_NOT_EQUAL_F64:
|
||||
case SLJIT_F_NOT_EQUAL:
|
||||
case SLJIT_ORDERED_NOT_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_NOT_EQUAL: /* Not supported. */
|
||||
return 0x85 /* jne */;
|
||||
|
||||
case SLJIT_LESS:
|
||||
case SLJIT_CARRY:
|
||||
case SLJIT_LESS_F64:
|
||||
case SLJIT_F_LESS:
|
||||
case SLJIT_UNORDERED_OR_LESS:
|
||||
case SLJIT_UNORDERED_OR_GREATER:
|
||||
return 0x82 /* jc */;
|
||||
|
||||
case SLJIT_GREATER_EQUAL:
|
||||
case SLJIT_NOT_CARRY:
|
||||
case SLJIT_GREATER_EQUAL_F64:
|
||||
case SLJIT_F_GREATER_EQUAL:
|
||||
case SLJIT_ORDERED_GREATER_EQUAL:
|
||||
case SLJIT_ORDERED_LESS_EQUAL:
|
||||
return 0x83 /* jae */;
|
||||
|
||||
case SLJIT_GREATER:
|
||||
case SLJIT_GREATER_F64:
|
||||
case SLJIT_F_GREATER:
|
||||
case SLJIT_ORDERED_LESS:
|
||||
case SLJIT_ORDERED_GREATER:
|
||||
return 0x87 /* jnbe */;
|
||||
|
||||
case SLJIT_LESS_EQUAL:
|
||||
case SLJIT_LESS_EQUAL_F64:
|
||||
case SLJIT_F_LESS_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_GREATER_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_LESS_EQUAL:
|
||||
return 0x86 /* jbe */;
|
||||
|
||||
case SLJIT_SIG_LESS:
|
||||
|
@ -422,10 +513,10 @@ static sljit_u8 get_jump_code(sljit_uw type)
|
|||
case SLJIT_NOT_OVERFLOW:
|
||||
return 0x81 /* jno */;
|
||||
|
||||
case SLJIT_UNORDERED_F64:
|
||||
case SLJIT_UNORDERED:
|
||||
return 0x8a /* jp */;
|
||||
|
||||
case SLJIT_ORDERED_F64:
|
||||
case SLJIT_ORDERED:
|
||||
return 0x8b /* jpo */;
|
||||
}
|
||||
return 0;
|
||||
|
@ -449,13 +540,13 @@ static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code
|
|||
else
|
||||
label_addr = jump->u.target - (sljit_uw)executable_offset;
|
||||
|
||||
short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127;
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
if ((sljit_sw)(label_addr - (jump->addr + 1)) > HALFWORD_MAX || (sljit_sw)(label_addr - (jump->addr + 1)) < HALFWORD_MIN)
|
||||
return generate_far_jump_code(jump, code_ptr);
|
||||
#endif
|
||||
|
||||
short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127;
|
||||
|
||||
if (type == SLJIT_JUMP) {
|
||||
if (short_jump)
|
||||
*code_ptr++ = JMP_i8;
|
||||
|
@ -581,32 +672,33 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
|
|||
|
||||
jump = compiler->jumps;
|
||||
while (jump) {
|
||||
jump_addr = jump->addr + (sljit_uw)executable_offset;
|
||||
if (jump->flags & (PATCH_MB | PATCH_MW)) {
|
||||
if (jump->flags & JUMP_LABEL)
|
||||
jump_addr = jump->u.label->addr;
|
||||
else
|
||||
jump_addr = jump->u.target;
|
||||
|
||||
if (jump->flags & PATCH_MB) {
|
||||
SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) <= 127);
|
||||
*(sljit_u8*)jump->addr = U8(jump->u.label->addr - (jump_addr + sizeof(sljit_s8)));
|
||||
} else if (jump->flags & PATCH_MW) {
|
||||
if (jump->flags & JUMP_LABEL) {
|
||||
jump_addr -= jump->addr + (sljit_uw)executable_offset;
|
||||
|
||||
if (jump->flags & PATCH_MB) {
|
||||
jump_addr -= sizeof(sljit_s8);
|
||||
SLJIT_ASSERT((sljit_sw)jump_addr >= -128 && (sljit_sw)jump_addr <= 127);
|
||||
*(sljit_u8*)jump->addr = U8(jump_addr);
|
||||
} else {
|
||||
jump_addr -= sizeof(sljit_s32);
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_sw))));
|
||||
sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump_addr);
|
||||
#else
|
||||
SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
|
||||
sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))));
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_sw))));
|
||||
#else
|
||||
SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
|
||||
sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.target - (jump_addr + sizeof(sljit_s32))));
|
||||
SLJIT_ASSERT((sljit_sw)jump_addr >= HALFWORD_MIN && (sljit_sw)jump_addr <= HALFWORD_MAX);
|
||||
sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)jump_addr);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
else if (jump->flags & PATCH_MD)
|
||||
sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump->u.label->addr);
|
||||
else if (jump->flags & PATCH_MD) {
|
||||
SLJIT_ASSERT(jump->flags & JUMP_LABEL);
|
||||
sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump->u.label->addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
jump = jump->next;
|
||||
|
@ -647,9 +739,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
|
|||
#ifdef SLJIT_IS_FPU_AVAILABLE
|
||||
return SLJIT_IS_FPU_AVAILABLE;
|
||||
#elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
|
||||
if (cpu_has_sse2 == -1)
|
||||
if (cpu_feature_list == 0)
|
||||
get_cpu_features();
|
||||
return cpu_has_sse2;
|
||||
return (cpu_feature_list & CPU_FEATURE_SSE2) != 0;
|
||||
#else /* SLJIT_DETECT_SSE2 */
|
||||
return 1;
|
||||
#endif /* SLJIT_DETECT_SSE2 */
|
||||
|
@ -657,31 +749,57 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
|
|||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
case SLJIT_HAS_VIRTUAL_REGISTERS:
|
||||
return 1;
|
||||
#endif
|
||||
#endif /* SLJIT_CONFIG_X86_32 */
|
||||
|
||||
case SLJIT_HAS_CLZ:
|
||||
case SLJIT_HAS_CMOV:
|
||||
if (cpu_has_cmov == -1)
|
||||
if (cpu_feature_list == 0)
|
||||
get_cpu_features();
|
||||
return cpu_has_cmov;
|
||||
|
||||
return (cpu_feature_list & CPU_FEATURE_LZCNT) ? 1 : 2;
|
||||
|
||||
case SLJIT_HAS_CTZ:
|
||||
if (cpu_feature_list == 0)
|
||||
get_cpu_features();
|
||||
|
||||
return (cpu_feature_list & CPU_FEATURE_TZCNT) ? 1 : 2;
|
||||
|
||||
case SLJIT_HAS_CMOV:
|
||||
if (cpu_feature_list == 0)
|
||||
get_cpu_features();
|
||||
return (cpu_feature_list & CPU_FEATURE_CMOV) != 0;
|
||||
|
||||
case SLJIT_HAS_ROT:
|
||||
case SLJIT_HAS_PREFETCH:
|
||||
return 1;
|
||||
|
||||
case SLJIT_HAS_SSE2:
|
||||
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
|
||||
if (cpu_has_sse2 == -1)
|
||||
if (cpu_feature_list == 0)
|
||||
get_cpu_features();
|
||||
return cpu_has_sse2;
|
||||
#else
|
||||
return (cpu_feature_list & CPU_FEATURE_SSE2) != 0;
|
||||
#else /* !SLJIT_DETECT_SSE2 */
|
||||
return 1;
|
||||
#endif
|
||||
#endif /* SLJIT_DETECT_SSE2 */
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
|
||||
{
|
||||
if (type < SLJIT_UNORDERED || type > SLJIT_ORDERED_LESS_EQUAL)
|
||||
return 0;
|
||||
|
||||
switch (type) {
|
||||
case SLJIT_ORDERED_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_NOT_EQUAL:
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Operators */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
@ -1385,47 +1503,75 @@ static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler,
|
|||
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
static const sljit_sw emit_clz_arg = 32 + 31;
|
||||
static const sljit_sw emit_ctz_arg = 32;
|
||||
#endif
|
||||
|
||||
static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags,
|
||||
static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 is_clz,
|
||||
sljit_s32 dst, sljit_sw dstw,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
sljit_u8* inst;
|
||||
sljit_s32 dst_r;
|
||||
sljit_sw max;
|
||||
|
||||
SLJIT_UNUSED_ARG(op_flags);
|
||||
|
||||
if (cpu_has_cmov == -1)
|
||||
if (cpu_feature_list == 0)
|
||||
get_cpu_features();
|
||||
|
||||
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
|
||||
|
||||
if (is_clz ? (cpu_feature_list & CPU_FEATURE_LZCNT) : (cpu_feature_list & CPU_FEATURE_TZCNT)) {
|
||||
/* Group prefix added separately. */
|
||||
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
|
||||
FAIL_IF(!inst);
|
||||
INC_SIZE(1);
|
||||
*inst++ = GROUP_F3;
|
||||
|
||||
inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
|
||||
FAIL_IF(!inst);
|
||||
*inst++ = GROUP_0F;
|
||||
*inst = is_clz ? LZCNT_r_rm : TZCNT_r_rm;
|
||||
|
||||
if (dst & SLJIT_MEM)
|
||||
EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
|
||||
FAIL_IF(!inst);
|
||||
*inst++ = GROUP_0F;
|
||||
*inst = BSR_r_rm;
|
||||
*inst = is_clz ? BSR_r_rm : BSF_r_rm;
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
if (cpu_has_cmov) {
|
||||
max = is_clz ? (32 + 31) : 32;
|
||||
|
||||
if (cpu_feature_list & CPU_FEATURE_CMOV) {
|
||||
if (dst_r != TMP_REG1) {
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 32 + 31);
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, max);
|
||||
inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0);
|
||||
}
|
||||
else
|
||||
inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), (sljit_sw)&emit_clz_arg);
|
||||
inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), is_clz ? (sljit_sw)&emit_clz_arg : (sljit_sw)&emit_ctz_arg);
|
||||
|
||||
FAIL_IF(!inst);
|
||||
*inst++ = GROUP_0F;
|
||||
*inst = CMOVE_r_rm;
|
||||
}
|
||||
else
|
||||
FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, 32 + 31));
|
||||
FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max));
|
||||
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0);
|
||||
if (is_clz) {
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0);
|
||||
FAIL_IF(!inst);
|
||||
*(inst + 1) |= XOR;
|
||||
}
|
||||
#else
|
||||
if (cpu_has_cmov) {
|
||||
EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, !(op_flags & SLJIT_32) ? (64 + 63) : (32 + 31));
|
||||
if (is_clz)
|
||||
max = compiler->mode32 ? (32 + 31) : (64 + 63);
|
||||
else
|
||||
max = compiler->mode32 ? 32 : 64;
|
||||
|
||||
if (cpu_feature_list & CPU_FEATURE_CMOV) {
|
||||
EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, max);
|
||||
|
||||
inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
|
||||
FAIL_IF(!inst);
|
||||
|
@ -1433,14 +1579,15 @@ static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags,
|
|||
*inst = CMOVE_r_rm;
|
||||
}
|
||||
else
|
||||
FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, !(op_flags & SLJIT_32) ? (64 + 63) : (32 + 31)));
|
||||
FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max));
|
||||
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_32) ? 63 : 31, dst_r, 0);
|
||||
if (is_clz) {
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, max >> 1, dst_r, 0);
|
||||
FAIL_IF(!inst);
|
||||
*(inst + 1) |= XOR;
|
||||
}
|
||||
#endif
|
||||
|
||||
FAIL_IF(!inst);
|
||||
*(inst + 1) |= XOR;
|
||||
|
||||
if (dst & SLJIT_MEM)
|
||||
EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
|
||||
return SLJIT_SUCCESS;
|
||||
|
@ -1578,7 +1725,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
|
|||
return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw);
|
||||
|
||||
case SLJIT_CLZ:
|
||||
return emit_clz(compiler, op_flags, dst, dstw, src, srcw);
|
||||
case SLJIT_CTZ:
|
||||
return emit_clz_ctz(compiler, (op == SLJIT_CLZ), dst, dstw, src, srcw);
|
||||
}
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
|
@ -2116,6 +2264,9 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
|
|||
sljit_s32 src1, sljit_sw src1w,
|
||||
sljit_s32 src2, sljit_sw src2w)
|
||||
{
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
sljit_s32 mode32;
|
||||
#endif
|
||||
sljit_u8* inst;
|
||||
|
||||
if ((src2 & SLJIT_IMM) || (src2 == SLJIT_PREF_SHIFT_REG)) {
|
||||
|
@ -2155,41 +2306,62 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
|
|||
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
FAIL_IF(!inst);
|
||||
*inst |= mode;
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
}
|
||||
else if (FAST_IS_REG(dst) && dst != src2 && dst != TMP_REG1 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
|
||||
|
||||
if (FAST_IS_REG(dst) && dst != src2 && dst != TMP_REG1 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
|
||||
if (src1 != dst)
|
||||
EMIT_MOV(compiler, dst, 0, src1, src1w);
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
mode32 = compiler->mode32;
|
||||
compiler->mode32 = 0;
|
||||
#endif
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = mode32;
|
||||
#endif
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, dst, 0);
|
||||
FAIL_IF(!inst);
|
||||
*inst |= mode;
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
}
|
||||
else {
|
||||
/* This case is complex since ecx itself may be used for
|
||||
addressing, and this case must be supported as well. */
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
FAIL_IF(!inst);
|
||||
*inst |= mode;
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
|
||||
#else
|
||||
EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
FAIL_IF(!inst);
|
||||
*inst |= mode;
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = 0;
|
||||
#endif
|
||||
if (dst != TMP_REG1)
|
||||
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = mode32;
|
||||
#endif
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
/* This case is complex since ecx itself may be used for
|
||||
addressing, and this case must be supported as well. */
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
|
||||
#else /* !SLJIT_CONFIG_X86_32 */
|
||||
mode32 = compiler->mode32;
|
||||
compiler->mode32 = 0;
|
||||
EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
|
||||
compiler->mode32 = mode32;
|
||||
#endif /* SLJIT_CONFIG_X86_32 */
|
||||
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
|
||||
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
FAIL_IF(!inst);
|
||||
*inst |= mode;
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
|
||||
#else
|
||||
compiler->mode32 = 0;
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
|
||||
compiler->mode32 = mode32;
|
||||
#endif /* SLJIT_CONFIG_X86_32 */
|
||||
|
||||
if (dst != TMP_REG1)
|
||||
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2202,12 +2374,13 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
|
|||
/* The CPU does not set flags if the shift count is 0. */
|
||||
if (src2 & SLJIT_IMM) {
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
if ((src2w & 0x3f) != 0 || (compiler->mode32 && (src2w & 0x1f) != 0))
|
||||
src2w &= compiler->mode32 ? 0x1f : 0x3f;
|
||||
#else /* !SLJIT_CONFIG_X86_64 */
|
||||
src2w &= 0x1f;
|
||||
#endif /* SLJIT_CONFIG_X86_64 */
|
||||
if (src2w != 0)
|
||||
return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
|
||||
#else
|
||||
if ((src2w & 0x1f) != 0)
|
||||
return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
|
||||
#endif
|
||||
|
||||
if (!set_flags)
|
||||
return emit_mov(compiler, dst, dstw, src1, src1w);
|
||||
/* OR dst, src, 0 */
|
||||
|
@ -2289,14 +2462,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
|
|||
return emit_cum_binary(compiler, BINARY_OPCODE(XOR),
|
||||
dst, dstw, src1, src1w, src2, src2w);
|
||||
case SLJIT_SHL:
|
||||
case SLJIT_MSHL:
|
||||
return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op),
|
||||
dst, dstw, src1, src1w, src2, src2w);
|
||||
case SLJIT_LSHR:
|
||||
case SLJIT_MLSHR:
|
||||
return emit_shift_with_flags(compiler, SHR, HAS_FLAGS(op),
|
||||
dst, dstw, src1, src1w, src2, src2w);
|
||||
case SLJIT_ASHR:
|
||||
case SLJIT_MASHR:
|
||||
return emit_shift_with_flags(compiler, SAR, HAS_FLAGS(op),
|
||||
dst, dstw, src1, src1w, src2, src2w);
|
||||
case SLJIT_ROTL:
|
||||
return emit_shift_with_flags(compiler, ROL, 0,
|
||||
dst, dstw, src1, src1w, src2, src2w);
|
||||
case SLJIT_ROTR:
|
||||
return emit_shift_with_flags(compiler, ROR, 0,
|
||||
dst, dstw, src1, src1w, src2, src2w);
|
||||
}
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
|
@ -2312,10 +2494,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil
|
|||
CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
|
||||
|
||||
if (opcode != SLJIT_SUB && opcode != SLJIT_AND) {
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w);
|
||||
}
|
||||
|
||||
|
@ -2334,6 +2513,122 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil
|
|||
return emit_test_binary(compiler, src1, src1w, src2, src2w);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 src_dst,
|
||||
sljit_s32 src1, sljit_sw src1w,
|
||||
sljit_s32 src2, sljit_sw src2w)
|
||||
{
|
||||
sljit_s32 restore_ecx = 0;
|
||||
sljit_s32 is_rotate, is_left;
|
||||
sljit_u8* inst;
|
||||
sljit_sw dstw = 0;
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
sljit_s32 tmp2 = SLJIT_MEM1(SLJIT_SP);
|
||||
#else /* !SLJIT_CONFIG_X86_32 */
|
||||
sljit_s32 tmp2 = TMP_REG2;
|
||||
#endif /* SLJIT_CONFIG_X86_32 */
|
||||
|
||||
CHECK_ERROR();
|
||||
CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w));
|
||||
ADJUST_LOCAL_OFFSET(src1, src1w);
|
||||
ADJUST_LOCAL_OFFSET(src2, src2w);
|
||||
|
||||
CHECK_EXTRA_REGS(src1, src1w, (void)0);
|
||||
CHECK_EXTRA_REGS(src2, src2w, (void)0);
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = op & SLJIT_32;
|
||||
#endif
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
src2w &= 0x1f;
|
||||
#else /* !SLJIT_CONFIG_X86_32 */
|
||||
src2w &= (op & SLJIT_32) ? 0x1f : 0x3f;
|
||||
#endif /* SLJIT_CONFIG_X86_32 */
|
||||
|
||||
if (src2w == 0)
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL);
|
||||
|
||||
is_rotate = (src_dst == src1);
|
||||
CHECK_EXTRA_REGS(src_dst, dstw, (void)0);
|
||||
|
||||
if (is_rotate)
|
||||
return emit_shift(compiler, is_left ? ROL : ROR, src_dst, dstw, src1, src1w, src2, src2w);
|
||||
|
||||
if ((src2 & SLJIT_IMM) || src2 == SLJIT_PREF_SHIFT_REG) {
|
||||
if (!FAST_IS_REG(src1)) {
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
|
||||
src1 = TMP_REG1;
|
||||
}
|
||||
} else if (FAST_IS_REG(src1)) {
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = 0;
|
||||
#endif
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = op & SLJIT_32;
|
||||
#endif
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
|
||||
|
||||
if (src1 == SLJIT_PREF_SHIFT_REG)
|
||||
src1 = TMP_REG1;
|
||||
|
||||
if (src_dst == SLJIT_PREF_SHIFT_REG)
|
||||
src_dst = TMP_REG1;
|
||||
|
||||
restore_ecx = 1;
|
||||
} else {
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = 0;
|
||||
#endif
|
||||
EMIT_MOV(compiler, tmp2, 0, SLJIT_PREF_SHIFT_REG, 0);
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = op & SLJIT_32;
|
||||
#endif
|
||||
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
|
||||
|
||||
src1 = TMP_REG1;
|
||||
|
||||
if (src_dst == SLJIT_PREF_SHIFT_REG) {
|
||||
src_dst = tmp2;
|
||||
SLJIT_ASSERT(dstw == 0);
|
||||
}
|
||||
|
||||
restore_ecx = 2;
|
||||
}
|
||||
|
||||
inst = emit_x86_instruction(compiler, 2, src1, 0, src_dst, dstw);
|
||||
FAIL_IF(!inst);
|
||||
inst[0] = GROUP_0F;
|
||||
|
||||
if (src2 & SLJIT_IMM) {
|
||||
inst[1] = U8((is_left ? SHLD : SHRD) - 1);
|
||||
|
||||
/* Immedate argument is added separately. */
|
||||
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
|
||||
FAIL_IF(!inst);
|
||||
INC_SIZE(1);
|
||||
*inst = U8(src2w);
|
||||
} else
|
||||
inst[1] = U8(is_left ? SHLD : SHRD);
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = 0;
|
||||
#endif
|
||||
|
||||
if (restore_ecx == 1)
|
||||
return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
|
||||
if (restore_ecx == 2)
|
||||
return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, tmp2, 0);
|
||||
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
|
||||
sljit_s32 src, sljit_sw srcw)
|
||||
{
|
||||
|
@ -2516,6 +2811,19 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
|
|||
sljit_s32 src1, sljit_sw src1w,
|
||||
sljit_s32 src2, sljit_sw src2w)
|
||||
{
|
||||
switch (GET_FLAG_TYPE(op)) {
|
||||
case SLJIT_ORDERED_LESS:
|
||||
case SLJIT_UNORDERED_OR_GREATER_EQUAL:
|
||||
case SLJIT_UNORDERED_OR_GREATER:
|
||||
case SLJIT_ORDERED_LESS_EQUAL:
|
||||
if (!FAST_IS_REG(src2)) {
|
||||
FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src2, src2w));
|
||||
src2 = TMP_FREG;
|
||||
}
|
||||
|
||||
return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_32), src2, src1, src1w);
|
||||
}
|
||||
|
||||
if (!FAST_IS_REG(src1)) {
|
||||
FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
|
||||
src1 = TMP_FREG;
|
||||
|
@ -2769,7 +3077,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
ADJUST_LOCAL_OFFSET(dst, dstw);
|
||||
CHECK_EXTRA_REGS(dst, dstw, (void)0);
|
||||
|
||||
type &= 0xff;
|
||||
/* setcc = jcc + 0x10. */
|
||||
cond_set = U8(get_jump_code((sljit_uw)type) + 0x10);
|
||||
|
||||
|
@ -2813,10 +3120,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
|
||||
}
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
|
||||
|
||||
#else
|
||||
|
@ -2839,10 +3143,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
}
|
||||
|
||||
/* Low byte is not accessible. */
|
||||
if (cpu_has_cmov == -1)
|
||||
if (cpu_feature_list == 0)
|
||||
get_cpu_features();
|
||||
|
||||
if (cpu_has_cmov) {
|
||||
if (cpu_feature_list & CPU_FEATURE_CMOV) {
|
||||
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1);
|
||||
/* a xor reg, reg operation would overwrite the flags. */
|
||||
EMIT_MOV(compiler, dst, 0, SLJIT_IMM, 0);
|
||||
|
@ -2927,10 +3231,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
|
|||
if (GET_OPCODE(op) < SLJIT_ADD)
|
||||
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
|
||||
|
||||
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|
||||
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
|
||||
compiler->skip_checks = 1;
|
||||
#endif
|
||||
SLJIT_SKIP_CHECKS(compiler);
|
||||
return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
|
||||
#endif /* SLJIT_CONFIG_X86_64 */
|
||||
}
|
||||
|
@ -2945,7 +3246,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
|
|||
CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
|
||||
dst_reg &= ~SLJIT_32;
|
||||
type &= ~SLJIT_32;
|
||||
|
||||
if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV) || (dst_reg >= SLJIT_R3 && dst_reg <= SLJIT_S3))
|
||||
return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
|
||||
|
@ -2958,8 +3259,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
|
|||
CHECK_EXTRA_REGS(src, srcw, (void)0);
|
||||
|
||||
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
|
||||
compiler->mode32 = dst_reg & SLJIT_32;
|
||||
dst_reg &= ~SLJIT_32;
|
||||
compiler->mode32 = type & SLJIT_32;
|
||||
type &= ~SLJIT_32;
|
||||
#endif
|
||||
|
||||
if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
|
||||
|
@ -2971,7 +3272,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
|
|||
inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw);
|
||||
FAIL_IF(!inst);
|
||||
*inst++ = GROUP_0F;
|
||||
*inst = U8(get_jump_code(type & 0xff) - 0x40);
|
||||
*inst = U8(get_jump_code((sljit_uw)type) - 0x40);
|
||||
return SLJIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,38 +59,15 @@
|
|||
#include <sys/mman.h>
|
||||
|
||||
#ifdef __NetBSD__
|
||||
#if defined(PROT_MPROTECT)
|
||||
#define check_se_protected(ptr, size) (0)
|
||||
#define SLJIT_PROT_WX PROT_MPROTECT(PROT_EXEC)
|
||||
#else /* !PROT_MPROTECT */
|
||||
#ifdef _NETBSD_SOURCE
|
||||
#include <sys/param.h>
|
||||
#else /* !_NETBSD_SOURCE */
|
||||
typedef unsigned int u_int;
|
||||
#define devmajor_t sljit_s32
|
||||
#endif /* _NETBSD_SOURCE */
|
||||
#include <sys/sysctl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define check_se_protected(ptr, size) netbsd_se_protected()
|
||||
|
||||
static SLJIT_INLINE int netbsd_se_protected(void)
|
||||
{
|
||||
int mib[3];
|
||||
int paxflags;
|
||||
size_t len = sizeof(paxflags);
|
||||
|
||||
mib[0] = CTL_PROC;
|
||||
mib[1] = getpid();
|
||||
mib[2] = PROC_PID_PAXFLAGS;
|
||||
|
||||
if (SLJIT_UNLIKELY(sysctl(mib, 3, &paxflags, &len, NULL, 0) < 0))
|
||||
return -1;
|
||||
|
||||
return (paxflags & CTL_PROC_PAXFLAGS_MPROTECT) ? -1 : 0;
|
||||
}
|
||||
#endif /* PROT_MPROTECT */
|
||||
#define check_se_protected(ptr, size) (0)
|
||||
#else /* POSIX */
|
||||
#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
|
||||
#include <pthread.h>
|
||||
#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock)
|
||||
#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock)
|
||||
#endif /* !SLJIT_SINGLE_THREADED */
|
||||
|
||||
#define check_se_protected(ptr, size) generic_se_protected(ptr, size)
|
||||
|
||||
static SLJIT_INLINE int generic_se_protected(void *ptr, sljit_uw size)
|
||||
|
@ -102,22 +79,20 @@ static SLJIT_INLINE int generic_se_protected(void *ptr, sljit_uw size)
|
|||
}
|
||||
#endif /* NetBSD */
|
||||
|
||||
#if defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED
|
||||
#ifndef SLJIT_SE_LOCK
|
||||
#define SLJIT_SE_LOCK()
|
||||
#endif
|
||||
#ifndef SLJIT_SE_UNLOCK
|
||||
#define SLJIT_SE_UNLOCK()
|
||||
#else /* !SLJIT_SINGLE_THREADED */
|
||||
#include <pthread.h>
|
||||
#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock)
|
||||
#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock)
|
||||
#endif /* SLJIT_SINGLE_THREADED */
|
||||
|
||||
#endif
|
||||
#ifndef SLJIT_PROT_WX
|
||||
#define SLJIT_PROT_WX 0
|
||||
#endif /* !SLJIT_PROT_WX */
|
||||
#endif
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
|
||||
{
|
||||
#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
|
||||
#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) \
|
||||
&& !defined(__NetBSD__)
|
||||
static pthread_mutex_t se_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
#endif
|
||||
static int se_protected = !SLJIT_PROT_WX;
|
||||
|
|
951
thirdparty/recastnavigation/Recast/Include/Recast.h
vendored
951
thirdparty/recastnavigation/Recast/Include/Recast.h
vendored
File diff suppressed because it is too large
Load diff
|
@ -19,11 +19,11 @@
|
|||
#ifndef RECASTALLOC_H
|
||||
#define RECASTALLOC_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "RecastAssert.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/// Provides hint values to the memory allocator on how long the
|
||||
/// memory is expected to be used.
|
||||
enum rcAllocHint
|
||||
|
@ -47,18 +47,27 @@ typedef void (rcFreeFunc)(void* ptr);
|
|||
/// Sets the base custom allocation functions to be used by Recast.
|
||||
/// @param[in] allocFunc The memory allocation function to be used by #rcAlloc
|
||||
/// @param[in] freeFunc The memory de-allocation function to be used by #rcFree
|
||||
///
|
||||
/// @see rcAlloc, rcFree
|
||||
void rcAllocSetCustom(rcAllocFunc *allocFunc, rcFreeFunc *freeFunc);
|
||||
|
||||
/// Allocates a memory block.
|
||||
/// @param[in] size The size, in bytes of memory, to allocate.
|
||||
/// @param[in] hint A hint to the allocator on how long the memory is expected to be in use.
|
||||
/// @return A pointer to the beginning of the allocated memory block, or null if the allocation failed.
|
||||
/// @see rcFree
|
||||
///
|
||||
/// @param[in] size The size, in bytes of memory, to allocate.
|
||||
/// @param[in] hint A hint to the allocator on how long the memory is expected to be in use.
|
||||
/// @return A pointer to the beginning of the allocated memory block, or null if the allocation failed.
|
||||
///
|
||||
/// @see rcFree, rcAllocSetCustom
|
||||
void* rcAlloc(size_t size, rcAllocHint hint);
|
||||
|
||||
/// Deallocates a memory block.
|
||||
/// @param[in] ptr A pointer to a memory block previously allocated using #rcAlloc.
|
||||
/// @see rcAlloc
|
||||
/// Deallocates a memory block. If @p ptr is NULL, this does nothing.
|
||||
///
|
||||
/// @warning This function leaves the value of @p ptr unchanged. So it still
|
||||
/// points to the same (now invalid) location, and not to null.
|
||||
///
|
||||
/// @param[in] ptr A pointer to a memory block previously allocated using #rcAlloc.
|
||||
///
|
||||
/// @see rcAlloc, rcAllocSetCustom
|
||||
void rcFree(void* ptr);
|
||||
|
||||
/// An implementation of operator new usable for placement new. The default one is part of STL (which we don't use).
|
||||
|
|
|
@ -19,13 +19,10 @@
|
|||
#ifndef RECASTASSERT_H
|
||||
#define RECASTASSERT_H
|
||||
|
||||
// Note: This header file's only purpose is to include define assert.
|
||||
// Feel free to change the file and include your own implementation instead.
|
||||
|
||||
#ifdef NDEBUG
|
||||
|
||||
// From http://cnicholson.net/2009/02/stupid-c-tricks-adventures-in-assert/
|
||||
# define rcAssert(x) do { (void)sizeof(x); } while((void)(__LINE__==-1),false)
|
||||
// From https://web.archive.org/web/20210117002833/http://cnicholson.net/2009/02/stupid-c-tricks-adventures-in-assert/
|
||||
# define rcAssert(x) do { (void)sizeof(x); } while ((void)(__LINE__==-1), false)
|
||||
|
||||
#else
|
||||
|
||||
|
@ -38,7 +35,7 @@ typedef void (rcAssertFailFunc)(const char* expression, const char* file, int li
|
|||
|
||||
/// Sets the base custom assertion failure function to be used by Recast.
|
||||
/// @param[in] assertFailFunc The function to be used in case of failure of #dtAssert
|
||||
void rcAssertFailSetCustom(rcAssertFailFunc *assertFailFunc);
|
||||
void rcAssertFailSetCustom(rcAssertFailFunc* assertFailFunc);
|
||||
|
||||
/// Gets the base custom assertion failure function to be used by Recast.
|
||||
rcAssertFailFunc* rcAssertFailGetCustom();
|
||||
|
@ -47,8 +44,8 @@ rcAssertFailFunc* rcAssertFailGetCustom();
|
|||
# define rcAssert(expression) \
|
||||
{ \
|
||||
rcAssertFailFunc* failFunc = rcAssertFailGetCustom(); \
|
||||
if(failFunc == NULL) { assert(expression); } \
|
||||
else if(!(expression)) { (*failFunc)(#expression, __FILE__, __LINE__); } \
|
||||
if (failFunc == NULL) { assert(expression); } \
|
||||
else if (!(expression)) { (*failFunc)(#expression, __FILE__, __LINE__); } \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
632
thirdparty/recastnavigation/Recast/Source/Recast.cpp
vendored
632
thirdparty/recastnavigation/Recast/Source/Recast.cpp
vendored
|
@ -16,81 +16,65 @@
|
|||
// 3. This notice may not be removed or altered from any source distribution.
|
||||
//
|
||||
|
||||
#include <float.h>
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include "Recast.h"
|
||||
#include "RecastAlloc.h"
|
||||
#include "RecastAssert.h"
|
||||
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
/// Allocates and constructs an object of the given type, returning a pointer.
|
||||
/// TODO: Support constructor args.
|
||||
/// @param[in] hint Hint to the allocator.
|
||||
template <typename T>
|
||||
T* rcNew(rcAllocHint hint) {
|
||||
T* ptr = (T*)rcAlloc(sizeof(T), hint);
|
||||
/// @param[in] allocLifetime Allocation lifetime hint
|
||||
template<typename T>
|
||||
T* rcNew(const rcAllocHint allocLifetime)
|
||||
{
|
||||
T* ptr = (T*)rcAlloc(sizeof(T), allocLifetime);
|
||||
::new(rcNewTag(), (void*)ptr) T();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/// Destroys and frees an object allocated with rcNew.
|
||||
/// @param[in] ptr The object pointer to delete.
|
||||
template <typename T>
|
||||
void rcDelete(T* ptr) {
|
||||
if (ptr) {
|
||||
template<typename T>
|
||||
void rcDelete(T* ptr)
|
||||
{
|
||||
if (ptr)
|
||||
{
|
||||
ptr->~T();
|
||||
rcFree((void*)ptr);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
float rcSqrt(float x)
|
||||
{
|
||||
return sqrtf(x);
|
||||
}
|
||||
|
||||
/// @class rcContext
|
||||
/// @par
|
||||
///
|
||||
/// This class does not provide logging or timer functionality on its
|
||||
/// own. Both must be provided by a concrete implementation
|
||||
/// by overriding the protected member functions. Also, this class does not
|
||||
/// provide an interface for extracting log messages. (Only adding them.)
|
||||
/// So concrete implementations must provide one.
|
||||
///
|
||||
/// If no logging or timers are required, just pass an instance of this
|
||||
/// class through the Recast build process.
|
||||
///
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// Example:
|
||||
/// @code
|
||||
/// // Where ctx is an instance of rcContext and filepath is a char array.
|
||||
/// ctx->log(RC_LOG_ERROR, "buildTiledNavigation: Could not load '%s'", filepath);
|
||||
/// @endcode
|
||||
void rcContext::log(const rcLogCategory category, const char* format, ...)
|
||||
{
|
||||
if (!m_logEnabled)
|
||||
{
|
||||
return;
|
||||
}
|
||||
static const int MSG_SIZE = 512;
|
||||
char msg[MSG_SIZE];
|
||||
va_list ap;
|
||||
va_start(ap, format);
|
||||
int len = vsnprintf(msg, MSG_SIZE, format, ap);
|
||||
va_list argList;
|
||||
va_start(argList, format);
|
||||
int len = vsnprintf(msg, MSG_SIZE, format, argList);
|
||||
if (len >= MSG_SIZE)
|
||||
{
|
||||
len = MSG_SIZE-1;
|
||||
msg[MSG_SIZE-1] = '\0';
|
||||
len = MSG_SIZE - 1;
|
||||
msg[MSG_SIZE - 1] = '\0';
|
||||
|
||||
const char* errorMessage = "Log message was truncated";
|
||||
doLog(RC_LOG_ERROR, errorMessage, (int)strlen(errorMessage));
|
||||
}
|
||||
va_end(ap);
|
||||
va_end(argList);
|
||||
doLog(category, msg, len);
|
||||
}
|
||||
|
||||
|
@ -103,16 +87,22 @@ rcHeightfield* rcAllocHeightfield()
|
|||
{
|
||||
return rcNew<rcHeightfield>(RC_ALLOC_PERM);
|
||||
}
|
||||
|
||||
void rcFreeHeightField(rcHeightfield* heightfield)
|
||||
{
|
||||
rcDelete(heightfield);
|
||||
}
|
||||
|
||||
rcHeightfield::rcHeightfield()
|
||||
: width()
|
||||
, height()
|
||||
, bmin()
|
||||
, bmax()
|
||||
, cs()
|
||||
, ch()
|
||||
, spans()
|
||||
, pools()
|
||||
, freelist()
|
||||
: width()
|
||||
, height()
|
||||
, bmin()
|
||||
, bmax()
|
||||
, cs()
|
||||
, ch()
|
||||
, spans()
|
||||
, pools()
|
||||
, freelist()
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -129,40 +119,36 @@ rcHeightfield::~rcHeightfield()
|
|||
}
|
||||
}
|
||||
|
||||
void rcFreeHeightField(rcHeightfield* hf)
|
||||
{
|
||||
rcDelete(hf);
|
||||
}
|
||||
|
||||
rcCompactHeightfield* rcAllocCompactHeightfield()
|
||||
{
|
||||
return rcNew<rcCompactHeightfield>(RC_ALLOC_PERM);
|
||||
}
|
||||
|
||||
void rcFreeCompactHeightfield(rcCompactHeightfield* chf)
|
||||
void rcFreeCompactHeightfield(rcCompactHeightfield* compactHeightfield)
|
||||
{
|
||||
rcDelete(chf);
|
||||
rcDelete(compactHeightfield);
|
||||
}
|
||||
|
||||
rcCompactHeightfield::rcCompactHeightfield()
|
||||
: width(),
|
||||
height(),
|
||||
spanCount(),
|
||||
walkableHeight(),
|
||||
walkableClimb(),
|
||||
borderSize(),
|
||||
maxDistance(),
|
||||
maxRegions(),
|
||||
bmin(),
|
||||
bmax(),
|
||||
cs(),
|
||||
ch(),
|
||||
cells(),
|
||||
spans(),
|
||||
dist(),
|
||||
areas()
|
||||
: width()
|
||||
, height()
|
||||
, spanCount()
|
||||
, walkableHeight()
|
||||
, walkableClimb()
|
||||
, borderSize()
|
||||
, maxDistance()
|
||||
, maxRegions()
|
||||
, bmin()
|
||||
, bmax()
|
||||
, cs()
|
||||
, ch()
|
||||
, cells()
|
||||
, spans()
|
||||
, dist()
|
||||
, areas()
|
||||
{
|
||||
}
|
||||
|
||||
rcCompactHeightfield::~rcCompactHeightfield()
|
||||
{
|
||||
rcFree(cells);
|
||||
|
@ -175,13 +161,18 @@ rcHeightfieldLayerSet* rcAllocHeightfieldLayerSet()
|
|||
{
|
||||
return rcNew<rcHeightfieldLayerSet>(RC_ALLOC_PERM);
|
||||
}
|
||||
void rcFreeHeightfieldLayerSet(rcHeightfieldLayerSet* lset)
|
||||
|
||||
void rcFreeHeightfieldLayerSet(rcHeightfieldLayerSet* layerSet)
|
||||
{
|
||||
rcDelete(lset);
|
||||
rcDelete(layerSet);
|
||||
}
|
||||
|
||||
rcHeightfieldLayerSet::rcHeightfieldLayerSet()
|
||||
: layers(), nlayers() {}
|
||||
: layers()
|
||||
, nlayers()
|
||||
{
|
||||
}
|
||||
|
||||
rcHeightfieldLayerSet::~rcHeightfieldLayerSet()
|
||||
{
|
||||
for (int i = 0; i < nlayers; ++i)
|
||||
|
@ -198,22 +189,26 @@ rcContourSet* rcAllocContourSet()
|
|||
{
|
||||
return rcNew<rcContourSet>(RC_ALLOC_PERM);
|
||||
}
|
||||
void rcFreeContourSet(rcContourSet* cset)
|
||||
|
||||
void rcFreeContourSet(rcContourSet* contourSet)
|
||||
{
|
||||
rcDelete(cset);
|
||||
rcDelete(contourSet);
|
||||
}
|
||||
|
||||
rcContourSet::rcContourSet()
|
||||
: conts(),
|
||||
nconts(),
|
||||
bmin(),
|
||||
bmax(),
|
||||
cs(),
|
||||
ch(),
|
||||
width(),
|
||||
height(),
|
||||
borderSize(),
|
||||
maxError() {}
|
||||
: conts()
|
||||
, nconts()
|
||||
, bmin()
|
||||
, bmax()
|
||||
, cs()
|
||||
, ch()
|
||||
, width()
|
||||
, height()
|
||||
, borderSize()
|
||||
, maxError()
|
||||
{
|
||||
}
|
||||
|
||||
rcContourSet::~rcContourSet()
|
||||
{
|
||||
for (int i = 0; i < nconts; ++i)
|
||||
|
@ -224,32 +219,34 @@ rcContourSet::~rcContourSet()
|
|||
rcFree(conts);
|
||||
}
|
||||
|
||||
|
||||
rcPolyMesh* rcAllocPolyMesh()
|
||||
{
|
||||
return rcNew<rcPolyMesh>(RC_ALLOC_PERM);
|
||||
}
|
||||
void rcFreePolyMesh(rcPolyMesh* pmesh)
|
||||
|
||||
void rcFreePolyMesh(rcPolyMesh* polyMesh)
|
||||
{
|
||||
rcDelete(pmesh);
|
||||
rcDelete(polyMesh);
|
||||
}
|
||||
|
||||
rcPolyMesh::rcPolyMesh()
|
||||
: verts(),
|
||||
polys(),
|
||||
regs(),
|
||||
flags(),
|
||||
areas(),
|
||||
nverts(),
|
||||
npolys(),
|
||||
maxpolys(),
|
||||
nvp(),
|
||||
bmin(),
|
||||
bmax(),
|
||||
cs(),
|
||||
ch(),
|
||||
borderSize(),
|
||||
maxEdgeError() {}
|
||||
: verts()
|
||||
, polys()
|
||||
, regs()
|
||||
, flags()
|
||||
, areas()
|
||||
, nverts()
|
||||
, npolys()
|
||||
, maxpolys()
|
||||
, nvp()
|
||||
, bmin()
|
||||
, bmax()
|
||||
, cs()
|
||||
, ch()
|
||||
, borderSize()
|
||||
, maxEdgeError()
|
||||
{
|
||||
}
|
||||
|
||||
rcPolyMesh::~rcPolyMesh()
|
||||
{
|
||||
|
@ -262,319 +259,284 @@ rcPolyMesh::~rcPolyMesh()
|
|||
|
||||
rcPolyMeshDetail* rcAllocPolyMeshDetail()
|
||||
{
|
||||
rcPolyMeshDetail* dmesh = (rcPolyMeshDetail*)rcAlloc(sizeof(rcPolyMeshDetail), RC_ALLOC_PERM);
|
||||
memset(dmesh, 0, sizeof(rcPolyMeshDetail));
|
||||
return dmesh;
|
||||
return rcNew<rcPolyMeshDetail>(RC_ALLOC_PERM);
|
||||
}
|
||||
|
||||
void rcFreePolyMeshDetail(rcPolyMeshDetail* dmesh)
|
||||
void rcFreePolyMeshDetail(rcPolyMeshDetail* detailMesh)
|
||||
{
|
||||
if (!dmesh) return;
|
||||
rcFree(dmesh->meshes);
|
||||
rcFree(dmesh->verts);
|
||||
rcFree(dmesh->tris);
|
||||
rcFree(dmesh);
|
||||
if (detailMesh == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
rcFree(detailMesh->meshes);
|
||||
rcFree(detailMesh->verts);
|
||||
rcFree(detailMesh->tris);
|
||||
rcFree(detailMesh);
|
||||
}
|
||||
|
||||
void rcCalcBounds(const float* verts, int nv, float* bmin, float* bmax)
|
||||
rcPolyMeshDetail::rcPolyMeshDetail()
|
||||
: meshes()
|
||||
, verts()
|
||||
, tris()
|
||||
, nmeshes()
|
||||
, nverts()
|
||||
, ntris()
|
||||
{
|
||||
}
|
||||
|
||||
void rcCalcBounds(const float* verts, int numVerts, float* minBounds, float* maxBounds)
|
||||
{
|
||||
// Calculate bounding box.
|
||||
rcVcopy(bmin, verts);
|
||||
rcVcopy(bmax, verts);
|
||||
for (int i = 1; i < nv; ++i)
|
||||
rcVcopy(minBounds, verts);
|
||||
rcVcopy(maxBounds, verts);
|
||||
for (int i = 1; i < numVerts; ++i)
|
||||
{
|
||||
const float* v = &verts[i*3];
|
||||
rcVmin(bmin, v);
|
||||
rcVmax(bmax, v);
|
||||
const float* v = &verts[i * 3];
|
||||
rcVmin(minBounds, v);
|
||||
rcVmax(maxBounds, v);
|
||||
}
|
||||
}
|
||||
|
||||
void rcCalcGridSize(const float* bmin, const float* bmax, float cs, int* w, int* h)
|
||||
void rcCalcGridSize(const float* minBounds, const float* maxBounds, const float cellSize, int* sizeX, int* sizeZ)
|
||||
{
|
||||
*w = (int)((bmax[0] - bmin[0])/cs+0.5f);
|
||||
*h = (int)((bmax[2] - bmin[2])/cs+0.5f);
|
||||
*sizeX = (int)((maxBounds[0] - minBounds[0]) / cellSize + 0.5f);
|
||||
*sizeZ = (int)((maxBounds[2] - minBounds[2]) / cellSize + 0.5f);
|
||||
}
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// See the #rcConfig documentation for more information on the configuration parameters.
|
||||
///
|
||||
/// @see rcAllocHeightfield, rcHeightfield
|
||||
bool rcCreateHeightfield(rcContext* ctx, rcHeightfield& hf, int width, int height,
|
||||
const float* bmin, const float* bmax,
|
||||
float cs, float ch)
|
||||
bool rcCreateHeightfield(rcContext* context, rcHeightfield& heightfield, int sizeX, int sizeZ,
|
||||
const float* minBounds, const float* maxBounds,
|
||||
float cellSize, float cellHeight)
|
||||
{
|
||||
rcIgnoreUnused(ctx);
|
||||
|
||||
hf.width = width;
|
||||
hf.height = height;
|
||||
rcVcopy(hf.bmin, bmin);
|
||||
rcVcopy(hf.bmax, bmax);
|
||||
hf.cs = cs;
|
||||
hf.ch = ch;
|
||||
hf.spans = (rcSpan**)rcAlloc(sizeof(rcSpan*)*hf.width*hf.height, RC_ALLOC_PERM);
|
||||
if (!hf.spans)
|
||||
rcIgnoreUnused(context);
|
||||
|
||||
heightfield.width = sizeX;
|
||||
heightfield.height = sizeZ;
|
||||
rcVcopy(heightfield.bmin, minBounds);
|
||||
rcVcopy(heightfield.bmax, maxBounds);
|
||||
heightfield.cs = cellSize;
|
||||
heightfield.ch = cellHeight;
|
||||
heightfield.spans = (rcSpan**)rcAlloc(sizeof(rcSpan*) * heightfield.width * heightfield.height, RC_ALLOC_PERM);
|
||||
if (!heightfield.spans)
|
||||
{
|
||||
return false;
|
||||
memset(hf.spans, 0, sizeof(rcSpan*)*hf.width*hf.height);
|
||||
}
|
||||
memset(heightfield.spans, 0, sizeof(rcSpan*) * heightfield.width * heightfield.height);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void calcTriNormal(const float* v0, const float* v1, const float* v2, float* norm)
|
||||
static void calcTriNormal(const float* v0, const float* v1, const float* v2, float* faceNormal)
|
||||
{
|
||||
float e0[3], e1[3];
|
||||
rcVsub(e0, v1, v0);
|
||||
rcVsub(e1, v2, v0);
|
||||
rcVcross(norm, e0, e1);
|
||||
rcVnormalize(norm);
|
||||
rcVcross(faceNormal, e0, e1);
|
||||
rcVnormalize(faceNormal);
|
||||
}
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// Only sets the area id's for the walkable triangles. Does not alter the
|
||||
/// area id's for unwalkable triangles.
|
||||
///
|
||||
/// See the #rcConfig documentation for more information on the configuration parameters.
|
||||
///
|
||||
/// @see rcHeightfield, rcClearUnwalkableTriangles, rcRasterizeTriangles
|
||||
void rcMarkWalkableTriangles(rcContext* ctx, const float walkableSlopeAngle,
|
||||
const float* verts, int nv,
|
||||
const int* tris, int nt,
|
||||
unsigned char* areas)
|
||||
void rcMarkWalkableTriangles(rcContext* context, const float walkableSlopeAngle,
|
||||
const float* verts, const int numVerts,
|
||||
const int* tris, const int numTris,
|
||||
unsigned char* triAreaIDs)
|
||||
{
|
||||
rcIgnoreUnused(ctx);
|
||||
rcIgnoreUnused(nv);
|
||||
|
||||
const float walkableThr = cosf(walkableSlopeAngle/180.0f*RC_PI);
|
||||
rcIgnoreUnused(context);
|
||||
rcIgnoreUnused(numVerts);
|
||||
|
||||
const float walkableThr = cosf(walkableSlopeAngle / 180.0f * RC_PI);
|
||||
|
||||
float norm[3];
|
||||
|
||||
for (int i = 0; i < nt; ++i)
|
||||
|
||||
for (int i = 0; i < numTris; ++i)
|
||||
{
|
||||
const int* tri = &tris[i*3];
|
||||
calcTriNormal(&verts[tri[0]*3], &verts[tri[1]*3], &verts[tri[2]*3], norm);
|
||||
const int* tri = &tris[i * 3];
|
||||
calcTriNormal(&verts[tri[0] * 3], &verts[tri[1] * 3], &verts[tri[2] * 3], norm);
|
||||
// Check if the face is walkable.
|
||||
if (norm[1] > walkableThr)
|
||||
areas[i] = RC_WALKABLE_AREA;
|
||||
}
|
||||
}
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// Only sets the area id's for the unwalkable triangles. Does not alter the
|
||||
/// area id's for walkable triangles.
|
||||
///
|
||||
/// See the #rcConfig documentation for more information on the configuration parameters.
|
||||
///
|
||||
/// @see rcHeightfield, rcClearUnwalkableTriangles, rcRasterizeTriangles
|
||||
void rcClearUnwalkableTriangles(rcContext* ctx, const float walkableSlopeAngle,
|
||||
const float* verts, int /*nv*/,
|
||||
const int* tris, int nt,
|
||||
unsigned char* areas)
|
||||
{
|
||||
rcIgnoreUnused(ctx);
|
||||
|
||||
const float walkableThr = cosf(walkableSlopeAngle/180.0f*RC_PI);
|
||||
|
||||
float norm[3];
|
||||
|
||||
for (int i = 0; i < nt; ++i)
|
||||
{
|
||||
const int* tri = &tris[i*3];
|
||||
calcTriNormal(&verts[tri[0]*3], &verts[tri[1]*3], &verts[tri[2]*3], norm);
|
||||
// Check if the face is walkable.
|
||||
if (norm[1] <= walkableThr)
|
||||
areas[i] = RC_NULL_AREA;
|
||||
}
|
||||
}
|
||||
|
||||
int rcGetHeightFieldSpanCount(rcContext* ctx, rcHeightfield& hf)
|
||||
{
|
||||
rcIgnoreUnused(ctx);
|
||||
|
||||
const int w = hf.width;
|
||||
const int h = hf.height;
|
||||
int spanCount = 0;
|
||||
for (int y = 0; y < h; ++y)
|
||||
{
|
||||
for (int x = 0; x < w; ++x)
|
||||
{
|
||||
for (rcSpan* s = hf.spans[x + y*w]; s; s = s->next)
|
||||
triAreaIDs[i] = RC_WALKABLE_AREA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void rcClearUnwalkableTriangles(rcContext* context, const float walkableSlopeAngle,
|
||||
const float* verts, int numVerts,
|
||||
const int* tris, int numTris,
|
||||
unsigned char* triAreaIDs)
|
||||
{
|
||||
rcIgnoreUnused(context);
|
||||
rcIgnoreUnused(numVerts);
|
||||
|
||||
// The minimum Y value for a face normal of a triangle with a walkable slope.
|
||||
const float walkableLimitY = cosf(walkableSlopeAngle / 180.0f * RC_PI);
|
||||
|
||||
float faceNormal[3];
|
||||
for (int i = 0; i < numTris; ++i)
|
||||
{
|
||||
const int* tri = &tris[i * 3];
|
||||
calcTriNormal(&verts[tri[0] * 3], &verts[tri[1] * 3], &verts[tri[2] * 3], faceNormal);
|
||||
// Check if the face is walkable.
|
||||
if (faceNormal[1] <= walkableLimitY)
|
||||
{
|
||||
triAreaIDs[i] = RC_NULL_AREA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int rcGetHeightFieldSpanCount(rcContext* context, const rcHeightfield& heightfield)
|
||||
{
|
||||
rcIgnoreUnused(context);
|
||||
|
||||
const int numCols = heightfield.width * heightfield.height;
|
||||
int spanCount = 0;
|
||||
for (int columnIndex = 0; columnIndex < numCols; ++columnIndex)
|
||||
{
|
||||
for (rcSpan* span = heightfield.spans[columnIndex]; span != NULL; span = span->next)
|
||||
{
|
||||
if (span->area != RC_NULL_AREA)
|
||||
{
|
||||
if (s->area != RC_NULL_AREA)
|
||||
spanCount++;
|
||||
spanCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return spanCount;
|
||||
}
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// This is just the beginning of the process of fully building a compact heightfield.
|
||||
/// Various filters may be applied, then the distance field and regions built.
|
||||
/// E.g: #rcBuildDistanceField and #rcBuildRegions
|
||||
///
|
||||
/// See the #rcConfig documentation for more information on the configuration parameters.
|
||||
///
|
||||
/// @see rcAllocCompactHeightfield, rcHeightfield, rcCompactHeightfield, rcConfig
|
||||
bool rcBuildCompactHeightfield(rcContext* ctx, const int walkableHeight, const int walkableClimb,
|
||||
rcHeightfield& hf, rcCompactHeightfield& chf)
|
||||
bool rcBuildCompactHeightfield(rcContext* context, const int walkableHeight, const int walkableClimb,
|
||||
const rcHeightfield& heightfield, rcCompactHeightfield& compactHeightfield)
|
||||
{
|
||||
rcAssert(ctx);
|
||||
|
||||
rcScopedTimer timer(ctx, RC_TIMER_BUILD_COMPACTHEIGHTFIELD);
|
||||
|
||||
const int w = hf.width;
|
||||
const int h = hf.height;
|
||||
const int spanCount = rcGetHeightFieldSpanCount(ctx, hf);
|
||||
rcAssert(context);
|
||||
|
||||
rcScopedTimer timer(context, RC_TIMER_BUILD_COMPACTHEIGHTFIELD);
|
||||
|
||||
const int xSize = heightfield.width;
|
||||
const int zSize = heightfield.height;
|
||||
const int spanCount = rcGetHeightFieldSpanCount(context, heightfield);
|
||||
|
||||
// Fill in header.
|
||||
chf.width = w;
|
||||
chf.height = h;
|
||||
chf.spanCount = spanCount;
|
||||
chf.walkableHeight = walkableHeight;
|
||||
chf.walkableClimb = walkableClimb;
|
||||
chf.maxRegions = 0;
|
||||
rcVcopy(chf.bmin, hf.bmin);
|
||||
rcVcopy(chf.bmax, hf.bmax);
|
||||
chf.bmax[1] += walkableHeight*hf.ch;
|
||||
chf.cs = hf.cs;
|
||||
chf.ch = hf.ch;
|
||||
chf.cells = (rcCompactCell*)rcAlloc(sizeof(rcCompactCell)*w*h, RC_ALLOC_PERM);
|
||||
if (!chf.cells)
|
||||
compactHeightfield.width = xSize;
|
||||
compactHeightfield.height = zSize;
|
||||
compactHeightfield.spanCount = spanCount;
|
||||
compactHeightfield.walkableHeight = walkableHeight;
|
||||
compactHeightfield.walkableClimb = walkableClimb;
|
||||
compactHeightfield.maxRegions = 0;
|
||||
rcVcopy(compactHeightfield.bmin, heightfield.bmin);
|
||||
rcVcopy(compactHeightfield.bmax, heightfield.bmax);
|
||||
compactHeightfield.bmax[1] += walkableHeight * heightfield.ch;
|
||||
compactHeightfield.cs = heightfield.cs;
|
||||
compactHeightfield.ch = heightfield.ch;
|
||||
compactHeightfield.cells = (rcCompactCell*)rcAlloc(sizeof(rcCompactCell) * xSize * zSize, RC_ALLOC_PERM);
|
||||
if (!compactHeightfield.cells)
|
||||
{
|
||||
ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.cells' (%d)", w*h);
|
||||
context->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.cells' (%d)", xSize * zSize);
|
||||
return false;
|
||||
}
|
||||
memset(chf.cells, 0, sizeof(rcCompactCell)*w*h);
|
||||
chf.spans = (rcCompactSpan*)rcAlloc(sizeof(rcCompactSpan)*spanCount, RC_ALLOC_PERM);
|
||||
if (!chf.spans)
|
||||
memset(compactHeightfield.cells, 0, sizeof(rcCompactCell) * xSize * zSize);
|
||||
compactHeightfield.spans = (rcCompactSpan*)rcAlloc(sizeof(rcCompactSpan) * spanCount, RC_ALLOC_PERM);
|
||||
if (!compactHeightfield.spans)
|
||||
{
|
||||
ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.spans' (%d)", spanCount);
|
||||
context->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.spans' (%d)", spanCount);
|
||||
return false;
|
||||
}
|
||||
memset(chf.spans, 0, sizeof(rcCompactSpan)*spanCount);
|
||||
chf.areas = (unsigned char*)rcAlloc(sizeof(unsigned char)*spanCount, RC_ALLOC_PERM);
|
||||
if (!chf.areas)
|
||||
memset(compactHeightfield.spans, 0, sizeof(rcCompactSpan) * spanCount);
|
||||
compactHeightfield.areas = (unsigned char*)rcAlloc(sizeof(unsigned char) * spanCount, RC_ALLOC_PERM);
|
||||
if (!compactHeightfield.areas)
|
||||
{
|
||||
ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.areas' (%d)", spanCount);
|
||||
context->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.areas' (%d)", spanCount);
|
||||
return false;
|
||||
}
|
||||
memset(chf.areas, RC_NULL_AREA, sizeof(unsigned char)*spanCount);
|
||||
|
||||
memset(compactHeightfield.areas, RC_NULL_AREA, sizeof(unsigned char) * spanCount);
|
||||
|
||||
const int MAX_HEIGHT = 0xffff;
|
||||
|
||||
|
||||
// Fill in cells and spans.
|
||||
int idx = 0;
|
||||
for (int y = 0; y < h; ++y)
|
||||
int currentCellIndex = 0;
|
||||
const int numColumns = xSize * zSize;
|
||||
for (int columnIndex = 0; columnIndex < numColumns; ++columnIndex)
|
||||
{
|
||||
for (int x = 0; x < w; ++x)
|
||||
const rcSpan* span = heightfield.spans[columnIndex];
|
||||
|
||||
// If there are no spans at this cell, just leave the data to index=0, count=0.
|
||||
if (span == NULL)
|
||||
{
|
||||
const rcSpan* s = hf.spans[x + y*w];
|
||||
// If there are no spans at this cell, just leave the data to index=0, count=0.
|
||||
if (!s) continue;
|
||||
rcCompactCell& c = chf.cells[x+y*w];
|
||||
c.index = idx;
|
||||
c.count = 0;
|
||||
while (s)
|
||||
continue;
|
||||
}
|
||||
|
||||
rcCompactCell& cell = compactHeightfield.cells[columnIndex];
|
||||
cell.index = currentCellIndex;
|
||||
cell.count = 0;
|
||||
|
||||
for (; span != NULL; span = span->next)
|
||||
{
|
||||
if (span->area != RC_NULL_AREA)
|
||||
{
|
||||
if (s->area != RC_NULL_AREA)
|
||||
{
|
||||
const int bot = (int)s->smax;
|
||||
const int top = s->next ? (int)s->next->smin : MAX_HEIGHT;
|
||||
chf.spans[idx].y = (unsigned short)rcClamp(bot, 0, 0xffff);
|
||||
chf.spans[idx].h = (unsigned char)rcClamp(top - bot, 0, 0xff);
|
||||
chf.areas[idx] = s->area;
|
||||
idx++;
|
||||
c.count++;
|
||||
}
|
||||
s = s->next;
|
||||
const int bot = (int)span->smax;
|
||||
const int top = span->next ? (int)span->next->smin : MAX_HEIGHT;
|
||||
compactHeightfield.spans[currentCellIndex].y = (unsigned short)rcClamp(bot, 0, 0xffff);
|
||||
compactHeightfield.spans[currentCellIndex].h = (unsigned char)rcClamp(top - bot, 0, 0xff);
|
||||
compactHeightfield.areas[currentCellIndex] = span->area;
|
||||
currentCellIndex++;
|
||||
cell.count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Find neighbour connections.
|
||||
const int MAX_LAYERS = RC_NOT_CONNECTED-1;
|
||||
int tooHighNeighbour = 0;
|
||||
for (int y = 0; y < h; ++y)
|
||||
const int MAX_LAYERS = RC_NOT_CONNECTED - 1;
|
||||
int maxLayerIndex = 0;
|
||||
const int zStride = xSize; // for readability
|
||||
for (int z = 0; z < zSize; ++z)
|
||||
{
|
||||
for (int x = 0; x < w; ++x)
|
||||
for (int x = 0; x < xSize; ++x)
|
||||
{
|
||||
const rcCompactCell& c = chf.cells[x+y*w];
|
||||
for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i)
|
||||
const rcCompactCell& cell = compactHeightfield.cells[x + z * zStride];
|
||||
for (int i = (int)cell.index, ni = (int)(cell.index + cell.count); i < ni; ++i)
|
||||
{
|
||||
rcCompactSpan& s = chf.spans[i];
|
||||
|
||||
rcCompactSpan& span = compactHeightfield.spans[i];
|
||||
|
||||
for (int dir = 0; dir < 4; ++dir)
|
||||
{
|
||||
rcSetCon(s, dir, RC_NOT_CONNECTED);
|
||||
const int nx = x + rcGetDirOffsetX(dir);
|
||||
const int ny = y + rcGetDirOffsetY(dir);
|
||||
rcSetCon(span, dir, RC_NOT_CONNECTED);
|
||||
const int neighborX = x + rcGetDirOffsetX(dir);
|
||||
const int neighborZ = z + rcGetDirOffsetY(dir);
|
||||
// First check that the neighbour cell is in bounds.
|
||||
if (nx < 0 || ny < 0 || nx >= w || ny >= h)
|
||||
if (neighborX < 0 || neighborZ < 0 || neighborX >= xSize || neighborZ >= zSize)
|
||||
{
|
||||
continue;
|
||||
|
||||
}
|
||||
|
||||
// Iterate over all neighbour spans and check if any of the is
|
||||
// accessible from current cell.
|
||||
const rcCompactCell& nc = chf.cells[nx+ny*w];
|
||||
for (int k = (int)nc.index, nk = (int)(nc.index+nc.count); k < nk; ++k)
|
||||
const rcCompactCell& neighborCell = compactHeightfield.cells[neighborX + neighborZ * zStride];
|
||||
for (int k = (int)neighborCell.index, nk = (int)(neighborCell.index + neighborCell.count); k < nk; ++k)
|
||||
{
|
||||
const rcCompactSpan& ns = chf.spans[k];
|
||||
const int bot = rcMax(s.y, ns.y);
|
||||
const int top = rcMin(s.y+s.h, ns.y+ns.h);
|
||||
const rcCompactSpan& neighborSpan = compactHeightfield.spans[k];
|
||||
const int bot = rcMax(span.y, neighborSpan.y);
|
||||
const int top = rcMin(span.y + span.h, neighborSpan.y + neighborSpan.h);
|
||||
|
||||
// Check that the gap between the spans is walkable,
|
||||
// and that the climb height between the gaps is not too high.
|
||||
if ((top - bot) >= walkableHeight && rcAbs((int)ns.y - (int)s.y) <= walkableClimb)
|
||||
if ((top - bot) >= walkableHeight && rcAbs((int)neighborSpan.y - (int)span.y) <= walkableClimb)
|
||||
{
|
||||
// Mark direction as walkable.
|
||||
const int lidx = k - (int)nc.index;
|
||||
if (lidx < 0 || lidx > MAX_LAYERS)
|
||||
const int layerIndex = k - (int)neighborCell.index;
|
||||
if (layerIndex < 0 || layerIndex > MAX_LAYERS)
|
||||
{
|
||||
tooHighNeighbour = rcMax(tooHighNeighbour, lidx);
|
||||
maxLayerIndex = rcMax(maxLayerIndex, layerIndex);
|
||||
continue;
|
||||
}
|
||||
rcSetCon(s, dir, lidx);
|
||||
rcSetCon(span, dir, layerIndex);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tooHighNeighbour > MAX_LAYERS)
|
||||
|
||||
if (maxLayerIndex > MAX_LAYERS)
|
||||
{
|
||||
ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Heightfield has too many layers %d (max: %d)",
|
||||
tooHighNeighbour, MAX_LAYERS);
|
||||
context->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Heightfield has too many layers %d (max: %d)",
|
||||
maxLayerIndex, MAX_LAYERS);
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
static int getHeightfieldMemoryUsage(const rcHeightfield& hf)
|
||||
{
|
||||
int size = 0;
|
||||
size += sizeof(hf);
|
||||
size += hf.width * hf.height * sizeof(rcSpan*);
|
||||
|
||||
rcSpanPool* pool = hf.pools;
|
||||
while (pool)
|
||||
{
|
||||
size += (sizeof(rcSpanPool) - sizeof(rcSpan)) + sizeof(rcSpan)*RC_SPANS_PER_POOL;
|
||||
pool = pool->next;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static int getCompactHeightFieldMemoryusage(const rcCompactHeightfield& chf)
|
||||
{
|
||||
int size = 0;
|
||||
size += sizeof(rcCompactHeightfield);
|
||||
size += sizeof(rcCompactSpan) * chf.spanCount;
|
||||
size += sizeof(rcCompactCell) * chf.width * chf.height;
|
||||
return size;
|
||||
}
|
||||
*/
|
||||
|
|
|
@ -16,12 +16,9 @@
|
|||
// 3. This notice may not be removed or altered from any source distribution.
|
||||
//
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "RecastAlloc.h"
|
||||
#include "RecastAssert.h"
|
||||
|
||||
static void *rcAllocDefault(size_t size, rcAllocHint)
|
||||
static void* rcAllocDefault(size_t size, rcAllocHint)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
@ -34,27 +31,21 @@ static void rcFreeDefault(void *ptr)
|
|||
static rcAllocFunc* sRecastAllocFunc = rcAllocDefault;
|
||||
static rcFreeFunc* sRecastFreeFunc = rcFreeDefault;
|
||||
|
||||
/// @see rcAlloc, rcFree
|
||||
void rcAllocSetCustom(rcAllocFunc *allocFunc, rcFreeFunc *freeFunc)
|
||||
void rcAllocSetCustom(rcAllocFunc* allocFunc, rcFreeFunc* freeFunc)
|
||||
{
|
||||
sRecastAllocFunc = allocFunc ? allocFunc : rcAllocDefault;
|
||||
sRecastFreeFunc = freeFunc ? freeFunc : rcFreeDefault;
|
||||
}
|
||||
|
||||
/// @see rcAllocSetCustom
|
||||
void* rcAlloc(size_t size, rcAllocHint hint)
|
||||
{
|
||||
return sRecastAllocFunc(size, hint);
|
||||
}
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// @warning This function leaves the value of @p ptr unchanged. So it still
|
||||
/// points to the same (now invalid) location, and not to null.
|
||||
///
|
||||
/// @see rcAllocSetCustom
|
||||
void rcFree(void* ptr)
|
||||
{
|
||||
if (ptr)
|
||||
if (ptr != NULL)
|
||||
{
|
||||
sRecastFreeFunc(ptr);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
//
|
||||
|
||||
#include <float.h>
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
static rcAssertFailFunc* sRecastAssertFailFunc = 0;
|
||||
|
||||
void rcAssertFailSetCustom(rcAssertFailFunc *assertFailFunc)
|
||||
void rcAssertFailSetCustom(rcAssertFailFunc* assertFailFunc)
|
||||
{
|
||||
sRecastAssertFailFunc = assertFailFunc;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
// 3. This notice may not be removed or altered from any source distribution.
|
||||
//
|
||||
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
@ -102,7 +101,7 @@ static int getCornerHeight(int x, int y, int i, int dir,
|
|||
}
|
||||
|
||||
static void walkContour(int x, int y, int i,
|
||||
rcCompactHeightfield& chf,
|
||||
const rcCompactHeightfield& chf,
|
||||
unsigned char* flags, rcIntArray& points)
|
||||
{
|
||||
// Choose the first non-connected edge
|
||||
|
@ -542,7 +541,7 @@ static bool vequal(const int* a, const int* b)
|
|||
return a[0] == b[0] && a[2] == b[2];
|
||||
}
|
||||
|
||||
static bool intersectSegCountour(const int* d0, const int* d1, int i, int n, const int* verts)
|
||||
static bool intersectSegContour(const int* d0, const int* d1, int i, int n, const int* verts)
|
||||
{
|
||||
// For each edge (k,k+1) of P
|
||||
for (int k = 0; k < n; k++)
|
||||
|
@ -778,9 +777,9 @@ static void mergeRegionHoles(rcContext* ctx, rcContourRegion& region)
|
|||
for (int j = 0; j < ndiags; j++)
|
||||
{
|
||||
const int* pt = &outline->verts[diags[j].vert*4];
|
||||
bool intersect = intersectSegCountour(pt, corner, diags[i].vert, outline->nverts, outline->verts);
|
||||
bool intersect = intersectSegContour(pt, corner, diags[i].vert, outline->nverts, outline->verts);
|
||||
for (int k = i; k < region.nholes && !intersect; k++)
|
||||
intersect |= intersectSegCountour(pt, corner, -1, region.holes[k].contour->nverts, region.holes[k].contour->verts);
|
||||
intersect |= intersectSegContour(pt, corner, -1, region.holes[k].contour->nverts, region.holes[k].contour->verts);
|
||||
if (!intersect)
|
||||
{
|
||||
index = diags[j].vert;
|
||||
|
@ -821,7 +820,7 @@ static void mergeRegionHoles(rcContext* ctx, rcContourRegion& region)
|
|||
/// See the #rcConfig documentation for more information on the configuration parameters.
|
||||
///
|
||||
/// @see rcAllocContourSet, rcCompactHeightfield, rcContourSet, rcConfig
|
||||
bool rcBuildContours(rcContext* ctx, rcCompactHeightfield& chf,
|
||||
bool rcBuildContours(rcContext* ctx, const rcCompactHeightfield& chf,
|
||||
const float maxError, const int maxEdgeLen,
|
||||
rcContourSet& cset, const int buildFlags)
|
||||
{
|
||||
|
|
|
@ -16,186 +16,168 @@
|
|||
// 3. This notice may not be removed or altered from any source distribution.
|
||||
//
|
||||
|
||||
#define _USE_MATH_DEFINES
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include "Recast.h"
|
||||
#include "RecastAssert.h"
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// Allows the formation of walkable regions that will flow over low lying
|
||||
/// objects such as curbs, and up structures such as stairways.
|
||||
///
|
||||
/// Two neighboring spans are walkable if: <tt>rcAbs(currentSpan.smax - neighborSpan.smax) < waklableClimb</tt>
|
||||
///
|
||||
/// @warning Will override the effect of #rcFilterLedgeSpans. So if both filters are used, call
|
||||
/// #rcFilterLedgeSpans after calling this filter.
|
||||
///
|
||||
/// @see rcHeightfield, rcConfig
|
||||
void rcFilterLowHangingWalkableObstacles(rcContext* ctx, const int walkableClimb, rcHeightfield& solid)
|
||||
{
|
||||
rcAssert(ctx);
|
||||
#include <stdlib.h>
|
||||
|
||||
rcScopedTimer timer(ctx, RC_TIMER_FILTER_LOW_OBSTACLES);
|
||||
|
||||
const int w = solid.width;
|
||||
const int h = solid.height;
|
||||
|
||||
for (int y = 0; y < h; ++y)
|
||||
void rcFilterLowHangingWalkableObstacles(rcContext* context, const int walkableClimb, rcHeightfield& heightfield)
|
||||
{
|
||||
rcAssert(context);
|
||||
|
||||
rcScopedTimer timer(context, RC_TIMER_FILTER_LOW_OBSTACLES);
|
||||
|
||||
const int xSize = heightfield.width;
|
||||
const int zSize = heightfield.height;
|
||||
|
||||
for (int z = 0; z < zSize; ++z)
|
||||
{
|
||||
for (int x = 0; x < w; ++x)
|
||||
for (int x = 0; x < xSize; ++x)
|
||||
{
|
||||
rcSpan* ps = 0;
|
||||
bool previousWalkable = false;
|
||||
rcSpan* previousSpan = NULL;
|
||||
bool previousWasWalkable = false;
|
||||
unsigned char previousArea = RC_NULL_AREA;
|
||||
|
||||
for (rcSpan* s = solid.spans[x + y*w]; s; ps = s, s = s->next)
|
||||
|
||||
for (rcSpan* span = heightfield.spans[x + z * xSize]; span != NULL; previousSpan = span, span = span->next)
|
||||
{
|
||||
const bool walkable = s->area != RC_NULL_AREA;
|
||||
const bool walkable = span->area != RC_NULL_AREA;
|
||||
// If current span is not walkable, but there is walkable
|
||||
// span just below it, mark the span above it walkable too.
|
||||
if (!walkable && previousWalkable)
|
||||
if (!walkable && previousWasWalkable)
|
||||
{
|
||||
if (rcAbs((int)s->smax - (int)ps->smax) <= walkableClimb)
|
||||
s->area = previousArea;
|
||||
if (rcAbs((int)span->smax - (int)previousSpan->smax) <= walkableClimb)
|
||||
{
|
||||
span->area = previousArea;
|
||||
}
|
||||
}
|
||||
// Copy walkable flag so that it cannot propagate
|
||||
// past multiple non-walkable objects.
|
||||
previousWalkable = walkable;
|
||||
previousArea = s->area;
|
||||
previousWasWalkable = walkable;
|
||||
previousArea = span->area;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// A ledge is a span with one or more neighbors whose maximum is further away than @p walkableClimb
|
||||
/// from the current span's maximum.
|
||||
/// This method removes the impact of the overestimation of conservative voxelization
|
||||
/// so the resulting mesh will not have regions hanging in the air over ledges.
|
||||
///
|
||||
/// A span is a ledge if: <tt>rcAbs(currentSpan.smax - neighborSpan.smax) > walkableClimb</tt>
|
||||
///
|
||||
/// @see rcHeightfield, rcConfig
|
||||
void rcFilterLedgeSpans(rcContext* ctx, const int walkableHeight, const int walkableClimb,
|
||||
rcHeightfield& solid)
|
||||
void rcFilterLedgeSpans(rcContext* context, const int walkableHeight, const int walkableClimb,
|
||||
rcHeightfield& heightfield)
|
||||
{
|
||||
rcAssert(ctx);
|
||||
rcAssert(context);
|
||||
|
||||
rcScopedTimer timer(ctx, RC_TIMER_FILTER_BORDER);
|
||||
rcScopedTimer timer(context, RC_TIMER_FILTER_BORDER);
|
||||
|
||||
const int w = solid.width;
|
||||
const int h = solid.height;
|
||||
const int MAX_HEIGHT = 0xffff;
|
||||
const int xSize = heightfield.width;
|
||||
const int zSize = heightfield.height;
|
||||
const int MAX_HEIGHT = 0xffff; // TODO (graham): Move this to a more visible constant and update usages.
|
||||
|
||||
// Mark border spans.
|
||||
for (int y = 0; y < h; ++y)
|
||||
for (int z = 0; z < zSize; ++z)
|
||||
{
|
||||
for (int x = 0; x < w; ++x)
|
||||
for (int x = 0; x < xSize; ++x)
|
||||
{
|
||||
for (rcSpan* s = solid.spans[x + y*w]; s; s = s->next)
|
||||
for (rcSpan* span = heightfield.spans[x + z * xSize]; span; span = span->next)
|
||||
{
|
||||
// Skip non walkable spans.
|
||||
if (s->area == RC_NULL_AREA)
|
||||
if (span->area == RC_NULL_AREA)
|
||||
{
|
||||
continue;
|
||||
|
||||
const int bot = (int)(s->smax);
|
||||
const int top = s->next ? (int)(s->next->smin) : MAX_HEIGHT;
|
||||
|
||||
}
|
||||
|
||||
const int bot = (int)(span->smax);
|
||||
const int top = span->next ? (int)(span->next->smin) : MAX_HEIGHT;
|
||||
|
||||
// Find neighbours minimum height.
|
||||
int minh = MAX_HEIGHT;
|
||||
int minNeighborHeight = MAX_HEIGHT;
|
||||
|
||||
// Min and max height of accessible neighbours.
|
||||
int asmin = s->smax;
|
||||
int asmax = s->smax;
|
||||
int accessibleNeighborMinHeight = span->smax;
|
||||
int accessibleNeighborMaxHeight = span->smax;
|
||||
|
||||
for (int dir = 0; dir < 4; ++dir)
|
||||
for (int direction = 0; direction < 4; ++direction)
|
||||
{
|
||||
int dx = x + rcGetDirOffsetX(dir);
|
||||
int dy = y + rcGetDirOffsetY(dir);
|
||||
int dx = x + rcGetDirOffsetX(direction);
|
||||
int dy = z + rcGetDirOffsetY(direction);
|
||||
// Skip neighbours which are out of bounds.
|
||||
if (dx < 0 || dy < 0 || dx >= w || dy >= h)
|
||||
if (dx < 0 || dy < 0 || dx >= xSize || dy >= zSize)
|
||||
{
|
||||
minh = rcMin(minh, -walkableClimb - bot);
|
||||
minNeighborHeight = rcMin(minNeighborHeight, -walkableClimb - bot);
|
||||
continue;
|
||||
}
|
||||
|
||||
// From minus infinity to the first span.
|
||||
rcSpan* ns = solid.spans[dx + dy*w];
|
||||
int nbot = -walkableClimb;
|
||||
int ntop = ns ? (int)ns->smin : MAX_HEIGHT;
|
||||
// Skip neightbour if the gap between the spans is too small.
|
||||
if (rcMin(top,ntop) - rcMax(bot,nbot) > walkableHeight)
|
||||
minh = rcMin(minh, nbot - bot);
|
||||
const rcSpan* neighborSpan = heightfield.spans[dx + dy * xSize];
|
||||
int neighborBot = -walkableClimb;
|
||||
int neighborTop = neighborSpan ? (int)neighborSpan->smin : MAX_HEIGHT;
|
||||
|
||||
// Rest of the spans.
|
||||
for (ns = solid.spans[dx + dy*w]; ns; ns = ns->next)
|
||||
// Skip neighbour if the gap between the spans is too small.
|
||||
if (rcMin(top, neighborTop) - rcMax(bot, neighborBot) > walkableHeight)
|
||||
{
|
||||
nbot = (int)ns->smax;
|
||||
ntop = ns->next ? (int)ns->next->smin : MAX_HEIGHT;
|
||||
// Skip neightbour if the gap between the spans is too small.
|
||||
if (rcMin(top,ntop) - rcMax(bot,nbot) > walkableHeight)
|
||||
{
|
||||
minh = rcMin(minh, nbot - bot);
|
||||
minNeighborHeight = rcMin(minNeighborHeight, neighborBot - bot);
|
||||
}
|
||||
|
||||
// Rest of the spans.
|
||||
for (neighborSpan = heightfield.spans[dx + dy * xSize]; neighborSpan; neighborSpan = neighborSpan->next)
|
||||
{
|
||||
neighborBot = (int)neighborSpan->smax;
|
||||
neighborTop = neighborSpan->next ? (int)neighborSpan->next->smin : MAX_HEIGHT;
|
||||
|
||||
// Skip neighbour if the gap between the spans is too small.
|
||||
if (rcMin(top, neighborTop) - rcMax(bot, neighborBot) > walkableHeight)
|
||||
{
|
||||
minNeighborHeight = rcMin(minNeighborHeight, neighborBot - bot);
|
||||
|
||||
// Find min/max accessible neighbour height.
|
||||
if (rcAbs(nbot - bot) <= walkableClimb)
|
||||
if (rcAbs(neighborBot - bot) <= walkableClimb)
|
||||
{
|
||||
if (nbot < asmin) asmin = nbot;
|
||||
if (nbot > asmax) asmax = nbot;
|
||||
if (neighborBot < accessibleNeighborMinHeight) accessibleNeighborMinHeight = neighborBot;
|
||||
if (neighborBot > accessibleNeighborMaxHeight) accessibleNeighborMaxHeight = neighborBot;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// The current span is close to a ledge if the drop to any
|
||||
// neighbour span is less than the walkableClimb.
|
||||
if (minh < -walkableClimb)
|
||||
if (minNeighborHeight < -walkableClimb)
|
||||
{
|
||||
s->area = RC_NULL_AREA;
|
||||
span->area = RC_NULL_AREA;
|
||||
}
|
||||
// If the difference between all neighbours is too large,
|
||||
// we are at steep slope, mark the span as ledge.
|
||||
else if ((asmax - asmin) > walkableClimb)
|
||||
else if ((accessibleNeighborMaxHeight - accessibleNeighborMinHeight) > walkableClimb)
|
||||
{
|
||||
s->area = RC_NULL_AREA;
|
||||
span->area = RC_NULL_AREA;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @par
|
||||
///
|
||||
/// For this filter, the clearance above the span is the distance from the span's
|
||||
/// maximum to the next higher span's minimum. (Same grid column.)
|
||||
///
|
||||
/// @see rcHeightfield, rcConfig
|
||||
void rcFilterWalkableLowHeightSpans(rcContext* ctx, int walkableHeight, rcHeightfield& solid)
|
||||
void rcFilterWalkableLowHeightSpans(rcContext* context, const int walkableHeight, rcHeightfield& heightfield)
|
||||
{
|
||||
rcAssert(ctx);
|
||||
rcAssert(context);
|
||||
|
||||
rcScopedTimer timer(ctx, RC_TIMER_FILTER_WALKABLE);
|
||||
rcScopedTimer timer(context, RC_TIMER_FILTER_WALKABLE);
|
||||
|
||||
const int w = solid.width;
|
||||
const int h = solid.height;
|
||||
const int xSize = heightfield.width;
|
||||
const int zSize = heightfield.height;
|
||||
const int MAX_HEIGHT = 0xffff;
|
||||
|
||||
// Remove walkable flag from spans which do not have enough
|
||||
// space above them for the agent to stand there.
|
||||
for (int y = 0; y < h; ++y)
|
||||
for (int z = 0; z < zSize; ++z)
|
||||
{
|
||||
for (int x = 0; x < w; ++x)
|
||||
for (int x = 0; x < xSize; ++x)
|
||||
{
|
||||
for (rcSpan* s = solid.spans[x + y*w]; s; s = s->next)
|
||||
for (rcSpan* span = heightfield.spans[x + z*xSize]; span; span = span->next)
|
||||
{
|
||||
const int bot = (int)(s->smax);
|
||||
const int top = s->next ? (int)(s->next->smin) : MAX_HEIGHT;
|
||||
if ((top - bot) <= walkableHeight)
|
||||
s->area = RC_NULL_AREA;
|
||||
const int bot = (int)(span->smax);
|
||||
const int top = span->next ? (int)(span->next->smin) : MAX_HEIGHT;
|
||||
if ((top - bot) < walkableHeight)
|
||||
{
|
||||
span->area = RC_NULL_AREA;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue