Upgrade Embree to the latest official release.
Since Embree v3.13.0 supports AARCH64, switch back to the
official repo instead of using Embree-aarch64.
`thirdparty/embree/patches/godot-changes.patch` should now contain
an accurate diff of the changes done to the library.
(cherry picked from commit 767e374dce
)
This commit is contained in:
parent
2660fafcc0
commit
a69cc9f13d
343 changed files with 12085 additions and 10390 deletions
|
@ -137,7 +137,7 @@ License: Expat
|
|||
|
||||
Files: ./thirdparty/embree/
|
||||
Comment: Embree
|
||||
Copyright: 2009-2021 Intel Corporation
|
||||
Copyright: 2009-2021, Intel Corporation
|
||||
License: Apache-2.0
|
||||
|
||||
Files: ./thirdparty/enet/
|
||||
|
|
|
@ -28,16 +28,6 @@ if env["builtin_embree"]:
|
|||
"common/lexers/stringstream.cpp",
|
||||
"common/lexers/tokenstream.cpp",
|
||||
"common/tasking/taskschedulerinternal.cpp",
|
||||
"common/algorithms/parallel_for.cpp",
|
||||
"common/algorithms/parallel_reduce.cpp",
|
||||
"common/algorithms/parallel_prefix_sum.cpp",
|
||||
"common/algorithms/parallel_for_for.cpp",
|
||||
"common/algorithms/parallel_for_for_prefix_sum.cpp",
|
||||
"common/algorithms/parallel_partition.cpp",
|
||||
"common/algorithms/parallel_sort.cpp",
|
||||
"common/algorithms/parallel_set.cpp",
|
||||
"common/algorithms/parallel_map.cpp",
|
||||
"common/algorithms/parallel_filter.cpp",
|
||||
"kernels/common/device.cpp",
|
||||
"kernels/common/stat.cpp",
|
||||
"kernels/common/acceln.cpp",
|
||||
|
@ -82,13 +72,17 @@ if env["builtin_embree"]:
|
|||
if env["platform"] == "windows":
|
||||
if env.msvc:
|
||||
env.Append(LINKFLAGS=["psapi.lib"])
|
||||
env_raycast.Append(CPPDEFINES=["__SSE2__", "__SSE__"])
|
||||
else:
|
||||
env.Append(LIBS=["psapi"])
|
||||
|
||||
env_thirdparty = env_raycast.Clone()
|
||||
env_thirdparty.disable_warnings()
|
||||
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_sources)
|
||||
|
||||
if not env["arch"] in ["x86", "x86_64"] or env.msvc:
|
||||
# Embree needs those, it will automatically use SSE2NEON in ARM
|
||||
env_thirdparty.Append(CPPDEFINES=["__SSE2__", "__SSE__"])
|
||||
|
||||
env.modules_sources += thirdparty_obj
|
||||
|
||||
|
||||
|
|
|
@ -2,8 +2,7 @@ def can_build(env, platform):
|
|||
if not env["tools"]:
|
||||
return False
|
||||
|
||||
# Depends on Embree library, which supports only x86_64 (originally)
|
||||
# and aarch64 (thanks to the embree-aarch64 fork).
|
||||
# Depends on Embree library, which only supports x86_64 and aarch64.
|
||||
|
||||
if platform == "android":
|
||||
return env["android_arch"] in ["arm64v8", "x86_64"]
|
||||
|
|
|
@ -11,6 +11,7 @@ include_dirs = [
|
|||
"common/algorithms",
|
||||
"common/lexers",
|
||||
"common/simd",
|
||||
"common/simd/arm",
|
||||
"include/embree3",
|
||||
"kernels/subdiv",
|
||||
"kernels/geometry",
|
||||
|
@ -32,16 +33,6 @@ cpp_files = [
|
|||
"common/lexers/stringstream.cpp",
|
||||
"common/lexers/tokenstream.cpp",
|
||||
"common/tasking/taskschedulerinternal.cpp",
|
||||
"common/algorithms/parallel_for.cpp",
|
||||
"common/algorithms/parallel_reduce.cpp",
|
||||
"common/algorithms/parallel_prefix_sum.cpp",
|
||||
"common/algorithms/parallel_for_for.cpp",
|
||||
"common/algorithms/parallel_for_for_prefix_sum.cpp",
|
||||
"common/algorithms/parallel_partition.cpp",
|
||||
"common/algorithms/parallel_sort.cpp",
|
||||
"common/algorithms/parallel_set.cpp",
|
||||
"common/algorithms/parallel_map.cpp",
|
||||
"common/algorithms/parallel_filter.cpp",
|
||||
"kernels/common/device.cpp",
|
||||
"kernels/common/stat.cpp",
|
||||
"kernels/common/acceln.cpp",
|
||||
|
@ -78,7 +69,7 @@ dir_name = "embree"
|
|||
if os.path.exists(dir_name):
|
||||
shutil.rmtree(dir_name)
|
||||
|
||||
subprocess.run(["git", "clone", "https://github.com/lighttransport/embree-aarch64.git", "embree-tmp"])
|
||||
subprocess.run(["git", "clone", "https://github.com/embree/embree.git", "embree-tmp"])
|
||||
os.chdir("embree-tmp")
|
||||
|
||||
commit_hash = str(subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True)).strip()
|
||||
|
@ -197,7 +188,7 @@ with open("CMakeLists.txt", "r") as cmake_file:
|
|||
with open(os.path.join(dest_dir, "include/embree3/rtcore_config.h"), "w") as config_file:
|
||||
config_file.write(
|
||||
f"""
|
||||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
9
thirdparty/README.md
vendored
9
thirdparty/README.md
vendored
|
@ -39,10 +39,11 @@ Files extracted from upstream source:
|
|||
|
||||
- all .cpp, .h, and .txt files in ConvectionKernels/
|
||||
|
||||
|
||||
## embree
|
||||
|
||||
- Upstream: https://github.com/lighttransport/embree-aarch64
|
||||
- Version: 3.12.1 (6ef362f99af80c9dfe8dd2bfc582d9067897edc6, 2020)
|
||||
- Upstream: https://github.com/embree/embree
|
||||
- Version: 3.13.0 (7c53133eb21424f7f0ae1e25bf357e358feaf6ab, 2021)
|
||||
- License: Apache 2.0
|
||||
|
||||
Files extracted from upstream:
|
||||
|
@ -50,8 +51,8 @@ Files extracted from upstream:
|
|||
- All cpp files listed in `modules/raycast/godot_update_embree.py`
|
||||
- All header files in the directories listed in `modules/raycast/godot_update_embree.py`
|
||||
|
||||
The `modules/raycast/godot_update_embree.py`script can be used to pull the
|
||||
relevant files from the latest Embree-aarch64 release and apply some automatic changes.
|
||||
The `modules/raycast/godot_update_embree.py` script can be used to pull the
|
||||
relevant files from the latest Embree release and apply some automatic changes.
|
||||
|
||||
Some changes have been made in order to remove exceptions and fix minor build errors.
|
||||
They are marked with `// -- GODOT start --` and `// -- GODOT end --`
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_filter.h"
|
||||
#include "../sys/regression.h"
|
||||
#include <map>
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_filter_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_filter_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
auto pred = [&]( uint32_t v ) { return (v & 0x3) == 0; };
|
||||
|
||||
for (size_t N=10; N<1000000; N=size_t(2.1*N))
|
||||
{
|
||||
size_t N0 = rand() % N;
|
||||
|
||||
/* initialize array with random numbers */
|
||||
std::vector<uint32_t> src(N);
|
||||
std::map<uint32_t,int> m;
|
||||
for (size_t i=0; i<N; i++) src[i] = rand();
|
||||
|
||||
/* count elements up */
|
||||
for (size_t i=N0; i<N; i++)
|
||||
if (pred(src[i]))
|
||||
m[src[i]] = 0;
|
||||
for (size_t i=N0; i<N; i++)
|
||||
if (pred(src[i]))
|
||||
m[src[i]]++;
|
||||
|
||||
/* filter array */
|
||||
//size_t M = sequential_filter(src.data(),N0,N,pred);
|
||||
size_t M = parallel_filter(src.data(),N0,N,size_t(1024),pred);
|
||||
|
||||
/* check if filtered data is correct */
|
||||
for (size_t i=N0; i<M; i++) {
|
||||
passed &= pred(src[i]);
|
||||
m[src[i]]--;
|
||||
}
|
||||
for (size_t i=N0; i<M; i++)
|
||||
passed &= (m[src[i]] == 0);
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_filter_regression_test parallel_filter_regression("parallel_filter_regression");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_for.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_for_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_for_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
|
||||
const size_t M = 10;
|
||||
for (size_t N=10; N<10000000; N=size_t(2.1*N))
|
||||
{
|
||||
/* sequentially calculate sum of squares */
|
||||
size_t sum0 = 0;
|
||||
for (size_t i=0; i<N; i++) {
|
||||
sum0 += i*i;
|
||||
}
|
||||
|
||||
/* parallel calculation of sum of squares */
|
||||
for (size_t m=0; m<M; m++)
|
||||
{
|
||||
std::atomic<size_t> sum1(0);
|
||||
parallel_for( size_t(0), size_t(N), size_t(1024), [&](const range<size_t>& r)
|
||||
{
|
||||
size_t s = 0;
|
||||
for (size_t i=r.begin(); i<r.end(); i++)
|
||||
s += i*i;
|
||||
sum1 += s;
|
||||
});
|
||||
passed = sum0 == sum1;
|
||||
}
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_for_regression_test parallel_for_regression("parallel_for_regression_test");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -8,12 +8,6 @@
|
|||
#include "../math/math.h"
|
||||
#include "../math/range.h"
|
||||
|
||||
#if defined(TASKING_GCD) && defined(BUILD_IOS)
|
||||
#include <dispatch/dispatch.h>
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
#endif
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* parallel_for without range */
|
||||
|
@ -29,28 +23,10 @@ namespace embree
|
|||
if (!TaskScheduler::wait())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
}
|
||||
#elif defined(TASKING_GCD) && defined(BUILD_IOS)
|
||||
|
||||
const size_t baselineNumBlocks = (TaskScheduler::threadCount() > 1)? TaskScheduler::threadCount() : 1;
|
||||
const size_t length = N;
|
||||
const size_t blockSize = (length + baselineNumBlocks-1) / baselineNumBlocks;
|
||||
const size_t numBlocks = (length + blockSize-1) / blockSize;
|
||||
|
||||
dispatch_apply(numBlocks, DISPATCH_APPLY_AUTO, ^(size_t currentBlock) {
|
||||
|
||||
const size_t start = (currentBlock * blockSize);
|
||||
const size_t blockLength = std::min(length - start, blockSize);
|
||||
const size_t end = start + blockLength;
|
||||
|
||||
for(size_t i=start; i < end; i++)
|
||||
{
|
||||
func(i);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
#elif defined(TASKING_TBB)
|
||||
#if TBB_INTERFACE_VERSION >= 12002
|
||||
tbb::task_group_context context;
|
||||
|
@ -60,7 +36,7 @@ namespace embree
|
|||
if (context.is_group_execution_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#else
|
||||
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
|
||||
|
@ -69,7 +45,7 @@ namespace embree
|
|||
if (tbb::task::self().is_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#endif
|
||||
|
||||
|
@ -92,28 +68,9 @@ namespace embree
|
|||
if (!TaskScheduler::wait())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
|
||||
#elif defined(TASKING_GCD) && defined(BUILD_IOS)
|
||||
|
||||
const size_t baselineNumBlocks = (TaskScheduler::threadCount() > 1)? 4*TaskScheduler::threadCount() : 1;
|
||||
const size_t length = last - first;
|
||||
const size_t blockSizeByThreads = (length + baselineNumBlocks-1) / baselineNumBlocks;
|
||||
size_t blockSize = std::max<size_t>(minStepSize,blockSizeByThreads);
|
||||
blockSize += blockSize % 4;
|
||||
|
||||
const size_t numBlocks = (length + blockSize-1) / blockSize;
|
||||
|
||||
dispatch_apply(numBlocks, DISPATCH_APPLY_AUTO, ^(size_t currentBlock) {
|
||||
|
||||
const size_t start = first + (currentBlock * blockSize);
|
||||
const size_t end = std::min<size_t>(last, start + blockSize);
|
||||
|
||||
func( embree::range<Index>(start,end) );
|
||||
});
|
||||
|
||||
|
||||
#elif defined(TASKING_TBB)
|
||||
#if TBB_INTERFACE_VERSION >= 12002
|
||||
tbb::task_group_context context;
|
||||
|
@ -123,7 +80,7 @@ namespace embree
|
|||
if (context.is_group_execution_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#else
|
||||
tbb::parallel_for(tbb::blocked_range<Index>(first,last,minStepSize),[&](const tbb::blocked_range<Index>& r) {
|
||||
|
@ -132,7 +89,7 @@ namespace embree
|
|||
if (tbb::task::self().is_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#endif
|
||||
|
||||
|
@ -167,7 +124,7 @@ namespace embree
|
|||
if (context.is_group_execution_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#else
|
||||
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
|
||||
|
@ -176,7 +133,7 @@ namespace embree
|
|||
if (tbb::task::self().is_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#endif
|
||||
}
|
||||
|
@ -192,10 +149,10 @@ namespace embree
|
|||
func(i);
|
||||
},ap,context);
|
||||
if (context.is_group_execution_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#else
|
||||
tbb::parallel_for(Index(0),N,Index(1),[&](Index i) {
|
||||
func(i);
|
||||
|
@ -203,7 +160,7 @@ namespace embree
|
|||
if (tbb::task::self().is_cancelled())
|
||||
// -- GODOT start --
|
||||
// throw std::runtime_error("task cancelled");
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_for_for.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_for_for_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_for_for_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
|
||||
/* create vector with random numbers */
|
||||
size_t sum0 = 0;
|
||||
size_t K = 0;
|
||||
const size_t M = 1000;
|
||||
std::vector<std::vector<size_t>* > array2(M);
|
||||
for (size_t i=0; i<M; i++) {
|
||||
const size_t N = rand() % 1024;
|
||||
K+=N;
|
||||
array2[i] = new std::vector<size_t>(N);
|
||||
for (size_t j=0; j<N; j++)
|
||||
sum0 += (*array2[i])[j] = rand();
|
||||
}
|
||||
|
||||
/* array to test global index */
|
||||
std::vector<atomic<size_t>> verify_k(K);
|
||||
for (size_t i=0; i<K; i++) verify_k[i].store(0);
|
||||
|
||||
/* add all numbers using parallel_for_for */
|
||||
std::atomic<size_t> sum1(0);
|
||||
parallel_for_for( array2, size_t(1), [&](std::vector<size_t>* v, const range<size_t>& r, size_t k) -> size_t
|
||||
{
|
||||
size_t s = 0;
|
||||
for (size_t i=r.begin(); i<r.end(); i++) {
|
||||
s += (*v)[i];
|
||||
verify_k[k++]++;
|
||||
}
|
||||
sum1 += s;
|
||||
return sum1;
|
||||
});
|
||||
passed &= (sum0 == sum1);
|
||||
|
||||
/* check global index */
|
||||
for (size_t i=0; i<K; i++)
|
||||
passed &= (verify_k[i] == 1);
|
||||
|
||||
/* delete vectors again */
|
||||
for (size_t i=0; i<array2.size(); i++)
|
||||
delete array2[i];
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_for_for_regression_test parallel_for_for_regression("parallel_for_for_regression_test");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_for_for_prefix_sum.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_for_for_prefix_sum_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_for_for_prefix_sum_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
|
||||
/* create vector with random numbers */
|
||||
const size_t M = 10;
|
||||
std::vector<atomic<size_t>> flattened;
|
||||
typedef std::vector<std::vector<size_t>* > ArrayArray;
|
||||
ArrayArray array2(M);
|
||||
size_t K = 0;
|
||||
for (size_t i=0; i<M; i++) {
|
||||
const size_t N = rand() % 10;
|
||||
K += N;
|
||||
array2[i] = new std::vector<size_t>(N);
|
||||
for (size_t j=0; j<N; j++)
|
||||
(*array2[i])[j] = rand() % 10;
|
||||
}
|
||||
|
||||
/* array to test global index */
|
||||
std::vector<atomic<size_t>> verify_k(K);
|
||||
for (size_t i=0; i<K; i++) verify_k[i].store(0);
|
||||
|
||||
ParallelForForPrefixSumState<size_t> state(array2,size_t(1));
|
||||
|
||||
/* dry run only counts */
|
||||
size_t S = parallel_for_for_prefix_sum0( state, array2, size_t(0), [&](std::vector<size_t>* v, const range<size_t>& r, size_t k, size_t i) -> size_t
|
||||
{
|
||||
size_t s = 0;
|
||||
for (size_t i=r.begin(); i<r.end(); i++) {
|
||||
s += (*v)[i];
|
||||
verify_k[k++]++;
|
||||
}
|
||||
return s;
|
||||
}, [](size_t v0, size_t v1) { return v0+v1; });
|
||||
|
||||
/* create properly sized output array */
|
||||
flattened.resize(S);
|
||||
for (auto& a : flattened) a.store(0);
|
||||
|
||||
/* now we actually fill the flattened array */
|
||||
parallel_for_for_prefix_sum1( state, array2, size_t(0), [&](std::vector<size_t>* v, const range<size_t>& r, size_t k, size_t i, const size_t base) -> size_t
|
||||
{
|
||||
size_t s = 0;
|
||||
for (size_t i=r.begin(); i<r.end(); i++) {
|
||||
for (size_t j=0; j<(*v)[i]; j++) {
|
||||
flattened[base+s+j]++;
|
||||
}
|
||||
s += (*v)[i];
|
||||
verify_k[k++]++;
|
||||
}
|
||||
return s;
|
||||
}, [](size_t v0, size_t v1) { return v0+v1; });
|
||||
|
||||
/* check global index */
|
||||
for (size_t i=0; i<K; i++)
|
||||
passed &= (verify_k[i] == 2);
|
||||
|
||||
/* check if each element was assigned exactly once */
|
||||
for (size_t i=0; i<flattened.size(); i++)
|
||||
passed &= (flattened[i] == 1);
|
||||
|
||||
/* delete arrays again */
|
||||
for (size_t i=0; i<array2.size(); i++)
|
||||
delete array2[i];
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_for_for_prefix_sum_regression_test parallel_for_for_prefix_sum_regression("parallel_for_for_prefix_sum_regression_test");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_map.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_map_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_map_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
|
||||
/* create key/value vectors with random numbers */
|
||||
const size_t N = 10000;
|
||||
std::vector<uint32_t> keys(N);
|
||||
std::vector<uint32_t> vals(N);
|
||||
for (size_t i=0; i<N; i++) keys[i] = 2*unsigned(i)*647382649;
|
||||
for (size_t i=0; i<N; i++) std::swap(keys[i],keys[rand()%N]);
|
||||
for (size_t i=0; i<N; i++) vals[i] = 2*rand();
|
||||
|
||||
/* create map */
|
||||
parallel_map<uint32_t,uint32_t> map;
|
||||
map.init(keys,vals);
|
||||
|
||||
/* check that all keys are properly mapped */
|
||||
for (size_t i=0; i<N; i++) {
|
||||
const uint32_t* val = map.lookup(keys[i]);
|
||||
passed &= val && (*val == vals[i]);
|
||||
}
|
||||
|
||||
/* check that these keys are not in the map */
|
||||
for (size_t i=0; i<N; i++) {
|
||||
passed &= !map.lookup(keys[i]+1);
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_map_regression_test parallel_map_regression("parallel_map_regression_test");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_partition.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_partition_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_partition_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
|
||||
for (size_t i=0; i<100; i++)
|
||||
{
|
||||
/* create random permutation */
|
||||
size_t N = std::rand() % 1000000;
|
||||
std::vector<unsigned> array(N);
|
||||
for (unsigned i=0; i<N; i++) array[i] = i;
|
||||
for (auto& v : array) std::swap(v,array[std::rand()%array.size()]);
|
||||
size_t split = std::rand() % (N+1);
|
||||
|
||||
/* perform parallel partitioning */
|
||||
size_t left_sum = 0, right_sum = 0;
|
||||
size_t mid = parallel_partitioning(array.data(),0,array.size(),0,left_sum,right_sum,
|
||||
[&] ( size_t i ) { return i < split; },
|
||||
[] ( size_t& sum, unsigned v) { sum += v; },
|
||||
[] ( size_t& sum, size_t v) { sum += v; },
|
||||
128);
|
||||
|
||||
/*serial_partitioning(array.data(),0,array.size(),left_sum,right_sum,
|
||||
[&] ( size_t i ) { return i < split; },
|
||||
[] ( size_t& left_sum, int v) { left_sum += v; });*/
|
||||
|
||||
/* verify result */
|
||||
passed &= mid == split;
|
||||
passed &= left_sum == split*(split-1)/2;
|
||||
passed &= right_sum == N*(N-1)/2-left_sum;
|
||||
for (size_t i=0; i<split; i++) passed &= array[i] < split;
|
||||
for (size_t i=split; i<N; i++) passed &= array[i] >= split;
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_partition_regression_test parallel_partition_regression("parallel_partition_regression_test");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_prefix_sum.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_prefix_sum_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_prefix_sum_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
const size_t M = 10;
|
||||
|
||||
for (size_t N=10; N<10000000; N=size_t(2.1*N))
|
||||
{
|
||||
/* initialize array with random numbers */
|
||||
uint32_t sum0 = 0;
|
||||
std::vector<uint32_t> src(N);
|
||||
for (size_t i=0; i<N; i++) {
|
||||
sum0 += src[i] = rand();
|
||||
}
|
||||
|
||||
/* calculate parallel prefix sum */
|
||||
std::vector<uint32_t> dst(N);
|
||||
for (auto& v : dst) v = 0;
|
||||
|
||||
for (size_t i=0; i<M; i++) {
|
||||
uint32_t sum1 = parallel_prefix_sum(src,dst,N,0,std::plus<uint32_t>());
|
||||
passed &= (sum0 == sum1);
|
||||
}
|
||||
|
||||
/* check if prefix sum is correct */
|
||||
for (size_t i=0, sum=0; i<N; sum+=src[i++])
|
||||
passed &= ((uint32_t)sum == dst[i]);
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_prefix_sum_regression_test parallel_prefix_sum_regression("parallel_prefix_sum_regression");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_reduce.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_reduce_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_reduce_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
|
||||
const size_t M = 10;
|
||||
for (size_t N=10; N<10000000; N=size_t(2.1*N))
|
||||
{
|
||||
/* sequentially calculate sum of squares */
|
||||
size_t sum0 = 0;
|
||||
for (size_t i=0; i<N; i++) {
|
||||
sum0 += i*i;
|
||||
}
|
||||
|
||||
/* parallel calculation of sum of squares */
|
||||
for (size_t m=0; m<M; m++)
|
||||
{
|
||||
size_t sum1 = parallel_reduce( size_t(0), size_t(N), size_t(1024), size_t(0), [&](const range<size_t>& r) -> size_t
|
||||
{
|
||||
size_t s = 0;
|
||||
for (size_t i=r.begin(); i<r.end(); i++)
|
||||
s += i*i;
|
||||
return s;
|
||||
},
|
||||
[](const size_t v0, const size_t v1) {
|
||||
return v0+v1;
|
||||
});
|
||||
passed = sum0 == sum1;
|
||||
}
|
||||
}
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_reduce_regression_test parallel_reduce_regression("parallel_reduce_regression_test");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -43,7 +43,7 @@ namespace embree
|
|||
template<typename Index, typename Value, typename Func, typename Reduction>
|
||||
__forceinline Value parallel_reduce( const Index first, const Index last, const Index minStepSize, const Value& identity, const Func& func, const Reduction& reduction )
|
||||
{
|
||||
#if defined(TASKING_INTERNAL) || (defined(TASKING_GCD) && defined(BUILD_IOS))
|
||||
#if defined(TASKING_INTERNAL)
|
||||
|
||||
/* fast path for small number of iterations */
|
||||
Index taskCount = (last-first+minStepSize-1)/minStepSize;
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_set.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
struct parallel_set_regression_test : public RegressionTest
|
||||
{
|
||||
parallel_set_regression_test(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
|
||||
/* create vector with random numbers */
|
||||
const size_t N = 10000;
|
||||
std::vector<uint32_t> unsorted(N);
|
||||
for (size_t i=0; i<N; i++) unsorted[i] = 2*rand();
|
||||
|
||||
/* created set from numbers */
|
||||
parallel_set<uint32_t> sorted;
|
||||
sorted.init(unsorted);
|
||||
|
||||
/* check that all elements are in the set */
|
||||
for (size_t i=0; i<N; i++) {
|
||||
passed &= sorted.lookup(unsorted[i]);
|
||||
}
|
||||
|
||||
/* check that these elements are not in the set */
|
||||
for (size_t i=0; i<N; i++) {
|
||||
passed &= !sorted.lookup(unsorted[i]+1);
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
parallel_set_regression_test parallel_set_regression("parallel_set_regression_test");
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "parallel_sort.h"
|
||||
#include "../sys/regression.h"
|
||||
|
||||
namespace embree
|
||||
{
|
||||
template<typename Key>
|
||||
struct RadixSortRegressionTest : public RegressionTest
|
||||
{
|
||||
RadixSortRegressionTest(const char* name) : RegressionTest(name) {
|
||||
registerRegressionTest(this);
|
||||
}
|
||||
|
||||
bool run ()
|
||||
{
|
||||
bool passed = true;
|
||||
const size_t M = 10;
|
||||
|
||||
for (size_t N=10; N<1000000; N=size_t(2.1*N))
|
||||
{
|
||||
std::vector<Key> src(N); memset(src.data(),0,N*sizeof(Key));
|
||||
std::vector<Key> tmp(N); memset(tmp.data(),0,N*sizeof(Key));
|
||||
for (size_t i=0; i<N; i++) src[i] = uint64_t(rand())*uint64_t(rand());
|
||||
|
||||
/* calculate checksum */
|
||||
Key sum0 = 0; for (size_t i=0; i<N; i++) sum0 += src[i];
|
||||
|
||||
/* sort numbers */
|
||||
for (size_t i=0; i<M; i++) {
|
||||
radix_sort<Key>(src.data(),tmp.data(),N);
|
||||
}
|
||||
|
||||
/* calculate checksum */
|
||||
Key sum1 = 0; for (size_t i=0; i<N; i++) sum1 += src[i];
|
||||
if (sum0 != sum1) passed = false;
|
||||
|
||||
/* check if numbers are sorted */
|
||||
for (size_t i=1; i<N; i++)
|
||||
passed &= src[i-1] <= src[i];
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
};
|
||||
|
||||
RadixSortRegressionTest<uint32_t> test_u32("RadixSortRegressionTestU32");
|
||||
RadixSortRegressionTest<uint64_t> test_u64("RadixSortRegressionTestU64");
|
||||
}
|
|
@ -1,13 +1,10 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../simd/simd.h"
|
||||
#include "parallel_for.h"
|
||||
#if defined(TASKING_GCD) && defined(BUILD_IOS)
|
||||
#include "../sys/alloc.h"
|
||||
#endif
|
||||
#include <algorithm>
|
||||
|
||||
namespace embree
|
||||
|
@ -323,7 +320,7 @@ namespace embree
|
|||
#pragma nounroll
|
||||
#endif
|
||||
for (size_t i=startID; i<endID; i++) {
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
#if defined(__64BIT__)
|
||||
const size_t index = ((size_t)(Key)src[i] >> (size_t)shift) & (size_t)mask;
|
||||
#else
|
||||
const Key index = ((Key)src[i] >> shift) & mask;
|
||||
|
@ -385,7 +382,7 @@ namespace embree
|
|||
#endif
|
||||
for (size_t i=startID; i<endID; i++) {
|
||||
const Ty elt = src[i];
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
#if defined(__64BIT__)
|
||||
const size_t index = ((size_t)(Key)src[i] >> (size_t)shift) & (size_t)mask;
|
||||
#else
|
||||
const size_t index = ((Key)src[i] >> shift) & mask;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/lexers/stream.h
vendored
2
thirdparty/embree/common/lexers/stream.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "stringstream.h"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "tokenstream.h"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
986
thirdparty/embree/common/math/AVX2NEON.h
vendored
986
thirdparty/embree/common/math/AVX2NEON.h
vendored
|
@ -1,986 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "SSE2NEON.h"
|
||||
|
||||
|
||||
#define AVX2NEON_ABI static inline __attribute__((always_inline))
|
||||
|
||||
|
||||
struct __m256d;
|
||||
|
||||
struct __m256 {
|
||||
__m128 lo,hi;
|
||||
__m256() {}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
struct __m256i {
|
||||
__m128i lo,hi;
|
||||
explicit __m256i(const __m256 a) : lo(__m128i(a.lo)),hi(__m128i(a.hi)) {}
|
||||
operator __m256() const {__m256 res; res.lo = __m128(lo);res.hi = __m128(hi); return res;}
|
||||
__m256i() {}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
struct __m256d {
|
||||
float64x2_t lo,hi;
|
||||
__m256d() {}
|
||||
__m256d(const __m256& a) : lo(float64x2_t(a.lo)),hi(float64x2_t(a.hi)) {}
|
||||
__m256d(const __m256i& a) : lo(float64x2_t(a.lo)),hi(float64x2_t(a.hi)) {}
|
||||
};
|
||||
|
||||
#define UNARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a) {type res;res.lo=basic_func(a.lo);res.hi=basic_func(a.hi);return res;}
|
||||
|
||||
|
||||
#define BINARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a,const type& b) {type res;res.lo=basic_func(a.lo,b.lo);res.hi=basic_func(a.hi,b.hi);return res;}
|
||||
#define BINARY_AVX_OP_CAST(type,func,basic_func,bdst,bsrc) AVX2NEON_ABI type func(const type& a,const type& b) {type res;res.lo=bdst(basic_func(bsrc(a.lo),bsrc(b.lo)));res.hi=bdst(basic_func(bsrc(a.hi),bsrc(b.hi)));return res;}
|
||||
|
||||
#define TERNARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a,const type& b,const type& c) {type res;res.lo=basic_func(a.lo,b.lo,c.lo);res.hi=basic_func(a.hi,b.hi,c.hi);return res;}
|
||||
|
||||
|
||||
#define CAST_SIMD_TYPE(to,name,from,basic_dst) AVX2NEON_ABI to name(const from& a) { to res; res.lo = basic_dst(a.lo); res.hi=basic_dst(a.hi); return res;}
|
||||
|
||||
|
||||
|
||||
#define _mm_stream_load_si128 _mm_load_si128
|
||||
#define _mm256_stream_load_si256 _mm256_load_si256
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_blend_ps (__m128 a, __m128 b, const int imm8)
|
||||
{
|
||||
__m128 res;
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
if (imm8 & (1<<i))
|
||||
{
|
||||
res[i] = b[i];
|
||||
}
|
||||
else{
|
||||
res[i] = a[i];
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8)
|
||||
{
|
||||
__m128i res;
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
if (imm8 & (1<<i))
|
||||
{
|
||||
res[i] = b[i];
|
||||
}
|
||||
else{
|
||||
res[i] = a[i];
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_cmpngt_ps (__m128 a, __m128 b)
|
||||
{
|
||||
return __m128(vmvnq_s32(__m128i(_mm_cmpgt_ps(a,b))));
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128i _mm_loadl_epi64 (__m128i const* mem_addr)
|
||||
{
|
||||
int64x2_t y;
|
||||
y[0] = *(int64_t *)mem_addr;
|
||||
y[1] = 0;
|
||||
return __m128i(y);
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
int _mm_movemask_popcnt(__m128 a)
|
||||
{
|
||||
return __builtin_popcount(_mm_movemask_ps(a));
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_maskload_ps (float const * mem_addr, __m128i mask)
|
||||
{
|
||||
__m128 res;
|
||||
for (int i=0;i<4;i++) {
|
||||
if (mask[i] & 0x80000000) res[i] = mem_addr[i]; else res[i] = 0;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
void _mm_maskstore_ps (float * mem_addr, __m128i mask, __m128 a)
|
||||
{
|
||||
for (int i=0;i<4;i++) {
|
||||
if (mask[i] & 0x80000000) mem_addr[i] = a[i];
|
||||
}
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
void _mm_maskstore_epi32 (int * mem_addr, __m128i mask, __m128i a)
|
||||
{
|
||||
for (int i=0;i<4;i++) {
|
||||
if (mask[i] & 0x80000000) mem_addr[i] = a[i];
|
||||
}
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c)
|
||||
{
|
||||
return vnegq_f32(vfmaq_f32(c,a,b));
|
||||
}
|
||||
|
||||
#define _mm_fnmsub_ss _mm_fnmsub_ps
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c)
|
||||
{
|
||||
return vfmsq_f32(c,a,b);
|
||||
}
|
||||
|
||||
#define _mm_fnmadd_ss _mm_fnmadd_ps
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_broadcast_ss (float const * mem_addr)
|
||||
{
|
||||
return vdupq_n_f32(*mem_addr);
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c)
|
||||
{
|
||||
return vfmaq_f32(vnegq_f32(c),a,b);
|
||||
}
|
||||
|
||||
#define _mm_fmsub_ss _mm_fmsub_ps
|
||||
#define _mm_fmadd_ps _mm_madd_ps
|
||||
#define _mm_fmadd_ss _mm_madd_ps
|
||||
|
||||
|
||||
|
||||
template<int code>
|
||||
AVX2NEON_ABI float32x4_t dpps_neon(const float32x4_t& a,const float32x4_t& b)
|
||||
{
|
||||
float v;
|
||||
v = 0;
|
||||
v += (code & 0x10) ? a[0]*b[0] : 0;
|
||||
v += (code & 0x20) ? a[1]*b[1] : 0;
|
||||
v += (code & 0x40) ? a[2]*b[2] : 0;
|
||||
v += (code & 0x80) ? a[3]*b[3] : 0;
|
||||
float32x4_t res;
|
||||
res[0] = (code & 0x1) ? v : 0;
|
||||
res[1] = (code & 0x2) ? v : 0;
|
||||
res[2] = (code & 0x4) ? v : 0;
|
||||
res[3] = (code & 0x8) ? v : 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline float32x4_t dpps_neon<0x7f>(const float32x4_t& a,const float32x4_t& b)
|
||||
{
|
||||
float v;
|
||||
float32x4_t m = _mm_mul_ps(a,b);
|
||||
m[3] = 0;
|
||||
v = vaddvq_f32(m);
|
||||
return _mm_set1_ps(v);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline float32x4_t dpps_neon<0xff>(const float32x4_t& a,const float32x4_t& b)
|
||||
{
|
||||
float v;
|
||||
float32x4_t m = _mm_mul_ps(a,b);
|
||||
v = vaddvq_f32(m);
|
||||
return _mm_set1_ps(v);
|
||||
}
|
||||
|
||||
#define _mm_dp_ps(a,b,c) dpps_neon<c>((a),(b))
|
||||
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_cmpnge_ps (__m128 a, __m128 b)
|
||||
{
|
||||
return __m128(vmvnq_s32(__m128i(_mm_cmpge_ps(a,b))));
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm_permutevar_ps (__m128 a, __m128i b)
|
||||
{
|
||||
__m128 x;
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
x[i] = a[b[i&3]];
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_setzero_si256()
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = res.hi = vdupq_n_s32(0);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_setzero_ps()
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = res.hi = vdupq_n_f32(0.0f);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_undefined_si256()
|
||||
{
|
||||
return _mm256_setzero_si256();
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_undefined_ps()
|
||||
{
|
||||
return _mm256_setzero_ps();
|
||||
}
|
||||
|
||||
CAST_SIMD_TYPE(__m256d,_mm256_castps_pd,__m256,float64x2_t)
|
||||
CAST_SIMD_TYPE(__m256i,_mm256_castps_si256,__m256,__m128i)
|
||||
CAST_SIMD_TYPE(__m256, _mm256_castsi256_ps, __m256i,__m128)
|
||||
CAST_SIMD_TYPE(__m256, _mm256_castpd_ps ,__m256d,__m128)
|
||||
CAST_SIMD_TYPE(__m256d, _mm256_castsi256_pd, __m256i,float64x2_t)
|
||||
CAST_SIMD_TYPE(__m256i, _mm256_castpd_si256, __m256d,__m128i)
|
||||
|
||||
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm256_castps256_ps128 (__m256 a)
|
||||
{
|
||||
return a.lo;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_castsi128_si256 (__m128i a)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = a ;
|
||||
res.hi = vdupq_n_s32(0);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128i _mm256_castsi256_si128 (__m256i a)
|
||||
{
|
||||
return a.lo;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_castps128_ps256 (__m128 a)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = a;
|
||||
res.hi = vdupq_n_f32(0);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_broadcast_ss (float const * mem_addr)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = res.hi = vdupq_n_f32(*mem_addr);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0)
|
||||
{
|
||||
__m128i lo = {e0,e1,e2,e3}, hi = {e4,e5,e6,e7};
|
||||
__m256i res;
|
||||
res.lo = lo; res.hi = hi;
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_set1_epi32 (int a)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = res.hi = vdupq_n_s32(a);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
int _mm256_movemask_ps(const __m256& v)
|
||||
{
|
||||
return (_mm_movemask_ps(v.hi) << 4) | _mm_movemask_ps(v.lo);
|
||||
}
|
||||
|
||||
template<int imm8>
|
||||
AVX2NEON_ABI
|
||||
__m256 __mm256_permute_ps (const __m256& a)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = _mm_shuffle_ps(a.lo,a.lo,imm8);
|
||||
res.hi = _mm_shuffle_ps(a.hi,a.hi,imm8);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
#define _mm256_permute_ps(a,c) __mm256_permute_ps<c>(a)
|
||||
|
||||
|
||||
template<int imm8>
|
||||
AVX2NEON_ABI
|
||||
__m256 __mm256_shuffle_ps (const __m256 a,const __m256& b)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = _mm_shuffle_ps(a.lo,b.lo,imm8);
|
||||
res.hi = _mm_shuffle_ps(a.hi,b.hi,imm8);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
#define _mm256_shuffle_ps(a,b,c) __mm256_shuffle_ps<c>(a,b)
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_set1_epi64x (long long a)
|
||||
{
|
||||
__m256i res;
|
||||
int64x2_t t = vdupq_n_s64(a);
|
||||
res.lo = res.hi = __m128i(t);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8)
|
||||
{
|
||||
__m256 res;
|
||||
__m128 tmp;
|
||||
switch (imm8 & 0x7)
|
||||
{
|
||||
case 0: tmp = a.lo; break;
|
||||
case 1: tmp = a.hi; break;
|
||||
case 2: tmp = b.lo; break;
|
||||
case 3: tmp = b.hi; break;
|
||||
}
|
||||
if (imm8 & 0x8)
|
||||
tmp = _mm_setzero_ps();
|
||||
|
||||
|
||||
|
||||
res.lo = tmp;
|
||||
imm8 >>= 4;
|
||||
|
||||
switch (imm8 & 0x7)
|
||||
{
|
||||
case 0: tmp = a.lo; break;
|
||||
case 1: tmp = a.hi; break;
|
||||
case 2: tmp = b.lo; break;
|
||||
case 3: tmp = b.hi; break;
|
||||
}
|
||||
if (imm8 & 0x8)
|
||||
tmp = _mm_setzero_ps();
|
||||
|
||||
res.hi = tmp;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_moveldup_ps (__m256 a)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo[0] = res.lo[1] = a.lo[0];
|
||||
res.lo[2] = res.lo[3] = a.lo[2];
|
||||
res.hi[0] = res.hi[1] = a.hi[0];
|
||||
res.hi[2] = res.hi[3] = a.hi[2];
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_movehdup_ps (__m256 a)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo[0] = res.lo[1] = a.lo[1];
|
||||
res.lo[2] = res.lo[3] = a.lo[3];
|
||||
res.hi[0] = res.hi[1] = a.hi[1];
|
||||
res.hi[2] = res.hi[3] = a.hi[3];
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8)
|
||||
{
|
||||
__m256 res = a;
|
||||
if (imm8 & 1) res.hi = b;
|
||||
else res.lo = b;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128 _mm256_extractf128_ps (__m256 a, const int imm8)
|
||||
{
|
||||
if (imm8 & 1) return a.hi;
|
||||
return a.lo;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256d _mm256_movedup_pd (__m256d a)
|
||||
{
|
||||
__m256d res;
|
||||
res.hi = a.hi;
|
||||
res.lo[0] = res.lo[1] = a.lo[0];
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_abs_epi32(__m256i a)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = vabsq_s32(a.lo);
|
||||
res.hi = vabsq_s32(a.hi);
|
||||
return res;
|
||||
}
|
||||
|
||||
UNARY_AVX_OP(__m256,_mm256_sqrt_ps,_mm_sqrt_ps)
|
||||
UNARY_AVX_OP(__m256,_mm256_rsqrt_ps,_mm_rsqrt_ps)
|
||||
UNARY_AVX_OP(__m256,_mm256_rcp_ps,_mm_rcp_ps)
|
||||
UNARY_AVX_OP(__m256,_mm256_floor_ps,vrndmq_f32)
|
||||
UNARY_AVX_OP(__m256,_mm256_ceil_ps,vrndpq_f32)
|
||||
|
||||
|
||||
BINARY_AVX_OP(__m256i,_mm256_add_epi32,_mm_add_epi32)
|
||||
BINARY_AVX_OP(__m256i,_mm256_sub_epi32,_mm_sub_epi32)
|
||||
BINARY_AVX_OP(__m256i,_mm256_mullo_epi32,_mm_mullo_epi32)
|
||||
|
||||
BINARY_AVX_OP(__m256i,_mm256_min_epi32,_mm_min_epi32)
|
||||
BINARY_AVX_OP(__m256i,_mm256_max_epi32,_mm_max_epi32)
|
||||
BINARY_AVX_OP_CAST(__m256i,_mm256_min_epu32,vminq_u32,__m128i,uint32x4_t)
|
||||
BINARY_AVX_OP_CAST(__m256i,_mm256_max_epu32,vmaxq_u32,__m128i,uint32x4_t)
|
||||
|
||||
BINARY_AVX_OP(__m256,_mm256_min_ps,_mm_min_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_max_ps,_mm_max_ps)
|
||||
|
||||
BINARY_AVX_OP(__m256,_mm256_add_ps,_mm_add_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_mul_ps,_mm_mul_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_sub_ps,_mm_sub_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_div_ps,_mm_div_ps)
|
||||
|
||||
BINARY_AVX_OP(__m256,_mm256_and_ps,_mm_and_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_andnot_ps,_mm_andnot_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_or_ps,_mm_or_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_xor_ps,_mm_xor_ps)
|
||||
|
||||
BINARY_AVX_OP_CAST(__m256d,_mm256_and_pd,vandq_s64,float64x2_t,int64x2_t)
|
||||
BINARY_AVX_OP_CAST(__m256d,_mm256_or_pd,vorrq_s64,float64x2_t,int64x2_t)
|
||||
BINARY_AVX_OP_CAST(__m256d,_mm256_xor_pd,veorq_s64,float64x2_t,int64x2_t)
|
||||
|
||||
|
||||
|
||||
BINARY_AVX_OP(__m256i,_mm256_and_si256,_mm_and_si128)
|
||||
BINARY_AVX_OP(__m256i,_mm256_or_si256,_mm_or_si128)
|
||||
BINARY_AVX_OP(__m256i,_mm256_xor_si256,_mm_xor_si128)
|
||||
|
||||
|
||||
BINARY_AVX_OP(__m256,_mm256_unpackhi_ps,_mm_unpackhi_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_unpacklo_ps,_mm_unpacklo_ps)
|
||||
TERNARY_AVX_OP(__m256,_mm256_blendv_ps,_mm_blendv_ps)
|
||||
|
||||
|
||||
TERNARY_AVX_OP(__m256,_mm256_fmadd_ps,_mm_fmadd_ps)
|
||||
TERNARY_AVX_OP(__m256,_mm256_fnmadd_ps,_mm_fnmadd_ps)
|
||||
TERNARY_AVX_OP(__m256,_mm256_fmsub_ps,_mm_fmsub_ps)
|
||||
TERNARY_AVX_OP(__m256,_mm256_fnmsub_ps,_mm_fnmsub_ps)
|
||||
|
||||
|
||||
BINARY_AVX_OP(__m256i,_mm256_unpackhi_epi32,_mm_unpackhi_epi32)
|
||||
BINARY_AVX_OP(__m256i,_mm256_unpacklo_epi32,_mm_unpacklo_epi32)
|
||||
|
||||
|
||||
BINARY_AVX_OP(__m256i,_mm256_cmpeq_epi32,_mm_cmpeq_epi32)
|
||||
BINARY_AVX_OP(__m256i,_mm256_cmpgt_epi32,_mm_cmpgt_epi32)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpeq_ps,_mm_cmpeq_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpneq_ps,_mm_cmpneq_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpnlt_ps,_mm_cmpnlt_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpngt_ps,_mm_cmpngt_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpge_ps,_mm_cmpge_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpnge_ps,_mm_cmpnge_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmplt_ps,_mm_cmplt_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmple_ps,_mm_cmple_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpgt_ps,_mm_cmpgt_ps)
|
||||
BINARY_AVX_OP(__m256,_mm256_cmpnle_ps,_mm_cmpnle_ps)
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cvtps_epi32 (__m256 a)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = _mm_cvtps_epi32(a.lo);
|
||||
res.hi = _mm_cvtps_epi32(a.hi);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cvttps_epi32 (__m256 a)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = _mm_cvttps_epi32(a.lo);
|
||||
res.hi = _mm_cvttps_epi32(a.hi);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_loadu_ps (float const * mem_addr)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = *(__m128 *)(mem_addr + 0);
|
||||
res.hi = *(__m128 *)(mem_addr + 4);
|
||||
return res;
|
||||
}
|
||||
#define _mm256_load_ps _mm256_loadu_ps
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
int _mm256_testz_ps (const __m256& a, const __m256& b)
|
||||
{
|
||||
__m256 t = a;
|
||||
if (&a != &b)
|
||||
t = _mm256_and_ps(a,b);
|
||||
|
||||
__m128i l = vshrq_n_s32(__m128i(t.lo),31);
|
||||
__m128i h = vshrq_n_s32(__m128i(t.hi),31);
|
||||
return vaddvq_s32(vaddq_s32(l,h)) == 0;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_set_epi64x (int64_t e3, int64_t e2, int64_t e1, int64_t e0)
|
||||
{
|
||||
__m256i res;
|
||||
int64x2_t t0 = {e0,e1};
|
||||
int64x2_t t1 = {e2,e3};
|
||||
res.lo = __m128i(t0);
|
||||
res.hi = __m128i(t1);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256d _mm256_setzero_pd ()
|
||||
{
|
||||
__m256d res;
|
||||
res.lo = res.hi = vdupq_n_f64(0);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
int _mm256_movemask_pd (__m256d a)
|
||||
{
|
||||
int res = 0;
|
||||
uint64x2_t x;
|
||||
x = uint64x2_t(a.lo);
|
||||
res |= (x[0] >> 63) ? 1 : 0;
|
||||
res |= (x[0] >> 63) ? 2 : 0;
|
||||
x = uint64x2_t(a.hi);
|
||||
res |= (x[0] >> 63) ? 4 : 0;
|
||||
res |= (x[0] >> 63) ? 8 : 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = __m128i(vceqq_s64(int64x2_t(a.lo),int64x2_t(b.lo)));
|
||||
res.hi = __m128i(vceqq_s64(int64x2_t(a.hi),int64x2_t(b.hi)));
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cmpeq_pd (__m256d a, __m256d b)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = __m128i(vceqq_f64(a.lo,b.lo));
|
||||
res.hi = __m128i(vceqq_f64(a.hi,b.hi));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
int _mm256_testz_pd (const __m256d& a, const __m256d& b)
|
||||
{
|
||||
__m256d t = a;
|
||||
|
||||
if (&a != &b)
|
||||
t = _mm256_and_pd(a,b);
|
||||
|
||||
return _mm256_movemask_pd(t) == 0;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256d _mm256_blendv_pd (__m256d a, __m256d b, __m256d mask)
|
||||
{
|
||||
__m256d res;
|
||||
uint64x2_t t = uint64x2_t(mask.lo);
|
||||
res.lo[0] = (t[0] >> 63) ? b.lo[0] : a.lo[0];
|
||||
res.lo[1] = (t[1] >> 63) ? b.lo[1] : a.lo[1];
|
||||
t = uint64x2_t(mask.hi);
|
||||
res.hi[0] = (t[0] >> 63) ? b.hi[0] : a.hi[0];
|
||||
res.hi[1] = (t[1] >> 63) ? b.hi[1] : a.hi[1];
|
||||
return res;
|
||||
}
|
||||
|
||||
template<int imm8>
|
||||
__m256 __mm256_dp_ps (__m256 a, __m256 b)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = _mm_dp_ps(a.lo,b.lo,imm8);
|
||||
res.hi = _mm_dp_ps(a.hi,b.hi,imm8);
|
||||
return res;
|
||||
}
|
||||
|
||||
#define _mm256_dp_ps(a,b,c) __mm256_dp_ps<c>(a,b)
|
||||
|
||||
AVX2NEON_ABI
|
||||
double _mm256_permute4x64_pd_select(__m256d a, const int imm8)
|
||||
{
|
||||
switch (imm8 & 3) {
|
||||
case 0:
|
||||
return a.lo[0];
|
||||
case 1:
|
||||
return a.lo[1];
|
||||
case 2:
|
||||
return a.hi[0];
|
||||
case 3:
|
||||
return a.hi[1];
|
||||
}
|
||||
__builtin_unreachable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256d _mm256_permute4x64_pd (__m256d a, const int imm8)
|
||||
{
|
||||
__m256d res;
|
||||
res.lo[0] = _mm256_permute4x64_pd_select(a,imm8 >> 0);
|
||||
res.lo[1] = _mm256_permute4x64_pd_select(a,imm8 >> 2);
|
||||
res.hi[0] = _mm256_permute4x64_pd_select(a,imm8 >> 4);
|
||||
res.hi[1] = _mm256_permute4x64_pd_select(a,imm8 >> 6);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8)
|
||||
{
|
||||
return __m256i(_mm256_insertf128_ps((__m256)a,(__m128)b,imm8));
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_loadu_si256 (__m256i const * mem_addr)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = *(__m128i *)((int32_t *)mem_addr + 0);
|
||||
res.hi = *(__m128i *)((int32_t *)mem_addr + 4);
|
||||
return res;
|
||||
}
|
||||
|
||||
#define _mm256_load_si256 _mm256_loadu_si256
|
||||
|
||||
AVX2NEON_ABI
|
||||
void _mm256_storeu_ps (float * mem_addr, __m256 a)
|
||||
{
|
||||
*(__m128 *)(mem_addr + 0) = a.lo;
|
||||
*(__m128 *)(mem_addr + 4) = a.hi;
|
||||
|
||||
}
|
||||
|
||||
#define _mm256_store_ps _mm256_storeu_ps
|
||||
#define _mm256_stream_ps _mm256_storeu_ps
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a)
|
||||
{
|
||||
*(__m128i *)((int *)mem_addr + 0) = a.lo;
|
||||
*(__m128i *)((int *)mem_addr + 4) = a.hi;
|
||||
|
||||
}
|
||||
|
||||
#define _mm256_store_si256 _mm256_storeu_si256
|
||||
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_maskload_ps (float const * mem_addr, __m256i mask)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = _mm_maskload_ps(mem_addr,mask.lo);
|
||||
res.hi = _mm_maskload_ps(mem_addr + 4,mask.hi);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cvtepu8_epi32 (__m128i a)
|
||||
{
|
||||
__m256i res;
|
||||
uint8x16_t x = uint8x16_t(a);
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
res.lo[i] = x[i];
|
||||
res.hi[i] = x[i+4];
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cvtepi8_epi32 (__m128i a)
|
||||
{
|
||||
__m256i res;
|
||||
int8x16_t x = int8x16_t(a);
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
res.lo[i] = x[i];
|
||||
res.hi[i] = x[i+4];
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cvtepu16_epi32 (__m128i a)
|
||||
{
|
||||
__m256i res;
|
||||
uint16x8_t x = uint16x8_t(a);
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
res.lo[i] = x[i];
|
||||
res.hi[i] = x[i+4];
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_cvtepi16_epi32 (__m128i a)
|
||||
{
|
||||
__m256i res;
|
||||
int16x8_t x = int16x8_t(a);
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
res.lo[i] = x[i];
|
||||
res.hi[i] = x[i+4];
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a)
|
||||
{
|
||||
_mm_maskstore_epi32(mem_addr,mask.lo,a.lo);
|
||||
_mm_maskstore_epi32(mem_addr + 4,mask.hi,a.hi);
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_slli_epi32 (__m256i a, int imm8)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = _mm_slli_epi32(a.lo,imm8);
|
||||
res.hi = _mm_slli_epi32(a.hi,imm8);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_srli_epi32 (__m256i a, int imm8)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = _mm_srli_epi32(a.lo,imm8);
|
||||
res.hi = _mm_srli_epi32(a.hi,imm8);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_srai_epi32 (__m256i a, int imm8)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = _mm_srai_epi32(a.lo,imm8);
|
||||
res.hi = _mm_srai_epi32(a.hi,imm8);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_sllv_epi32 (__m256i a, __m256i count)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = vshlq_s32(a.lo,count.lo);
|
||||
res.hi = vshlq_s32(a.hi,count.hi);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_srav_epi32 (__m256i a, __m256i count)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = vshlq_s32(a.lo,vnegq_s32(count.lo));
|
||||
res.hi = vshlq_s32(a.hi,vnegq_s32(count.hi));
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_srlv_epi32 (__m256i a, __m256i count)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = __m128i(vshlq_u32(uint32x4_t(a.lo),vnegq_s32(count.lo)));
|
||||
res.hi = __m128i(vshlq_u32(uint32x4_t(a.hi),vnegq_s32(count.hi)));
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8)
|
||||
{
|
||||
return __m256i(_mm256_permute2f128_ps(__m256(a),__m256(b),imm8));
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m128i _mm256_extractf128_si256 (__m256i a, const int imm8)
|
||||
{
|
||||
if (imm8 & 1) return a.hi;
|
||||
return a.lo;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_set1_ps(float x)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = res.hi = vdupq_n_f32(x);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_set_ps (float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = _mm_set_ps(e3,e2,e1,e0);
|
||||
res.hi = _mm_set_ps(e7,e6,e5,e4);
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_broadcast_ps (__m128 const * mem_addr)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = res.hi = *mem_addr;
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_cvtepi32_ps (__m256i a)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = _mm_cvtepi32_ps(a.lo);
|
||||
res.hi = _mm_cvtepi32_ps(a.hi);
|
||||
return res;
|
||||
}
|
||||
AVX2NEON_ABI
|
||||
void _mm256_maskstore_ps (float * mem_addr, __m256i mask, __m256 a)
|
||||
{
|
||||
for (int i=0;i<4;i++) {
|
||||
if (mask.lo[i] & 0x80000000) mem_addr[i] = a.lo[i];
|
||||
if (mask.hi[i] & 0x80000000) mem_addr[i+4] = a.hi[i];
|
||||
}
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256d _mm256_andnot_pd (__m256d a, __m256d b)
|
||||
{
|
||||
__m256d res;
|
||||
res.lo = float64x2_t(_mm_andnot_ps(__m128(a.lo),__m128(b.lo)));
|
||||
res.hi = float64x2_t(_mm_andnot_ps(__m128(a.hi),__m128(b.hi)));
|
||||
return res;
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8)
|
||||
{
|
||||
__m256 res;
|
||||
res.lo = _mm_blend_ps(a.lo,b.lo,imm8 & 0xf);
|
||||
res.hi = _mm_blend_ps(a.hi,b.hi,imm8 >> 4);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8)
|
||||
{
|
||||
__m256i res;
|
||||
res.lo = _mm_blend_epi32(a.lo,b.lo,imm8 & 0xf);
|
||||
res.hi = _mm_blend_epi32(a.hi,b.hi,imm8 >> 4);
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale)
|
||||
{
|
||||
__m256i res;
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
res.lo[i] = *(int *)((char *) base_addr + (vindex.lo[i]*scale));
|
||||
res.hi[i] = *(int *)((char *) base_addr + (vindex.hi[i]*scale));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
AVX2NEON_ABI
|
||||
__m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale)
|
||||
{
|
||||
__m256i res = _mm256_setzero_si256();
|
||||
for (int i=0;i<4;i++)
|
||||
{
|
||||
if (mask.lo[i] >> 31) res.lo[i] = *(int *)((char *) base_addr + (vindex.lo[i]*scale));
|
||||
if (mask.hi[i] >> 31) res.hi[i] = *(int *)((char *) base_addr + (vindex.hi[i]*scale));
|
||||
}
|
||||
|
||||
return res;
|
||||
|
||||
}
|
||||
|
||||
|
1753
thirdparty/embree/common/math/SSE2NEON.h
vendored
1753
thirdparty/embree/common/math/SSE2NEON.h
vendored
File diff suppressed because it is too large
Load diff
2
thirdparty/embree/common/math/affinespace.h
vendored
2
thirdparty/embree/common/math/affinespace.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
8
thirdparty/embree/common/math/bbox.h
vendored
8
thirdparty/embree/common/math/bbox.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -77,7 +77,7 @@ namespace embree
|
|||
return lower > upper;
|
||||
}
|
||||
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
#if defined(__SSE__)
|
||||
template<> __forceinline bool BBox<Vec3fa>::empty() const {
|
||||
return !all(le_mask(lower,upper));
|
||||
}
|
||||
|
@ -228,11 +228,11 @@ namespace embree
|
|||
/// SSE / AVX / MIC specializations
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined (__SSE__) || defined(__ARM_NEON)
|
||||
#if defined __SSE__
|
||||
#include "../simd/sse.h"
|
||||
#endif
|
||||
|
||||
#if defined (__AVX__)
|
||||
#if defined __AVX__
|
||||
#include "../simd/avx.h"
|
||||
#endif
|
||||
|
||||
|
|
4
thirdparty/embree/common/math/col3.h
vendored
4
thirdparty/embree/common/math/col3.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -42,6 +42,6 @@ namespace embree
|
|||
}
|
||||
|
||||
/*! default template instantiations */
|
||||
typedef Col3<uint8_t > Col3uc;
|
||||
typedef Col3<unsigned char> Col3uc;
|
||||
typedef Col3<float > Col3f;
|
||||
}
|
||||
|
|
4
thirdparty/embree/common/math/col4.h
vendored
4
thirdparty/embree/common/math/col4.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -42,6 +42,6 @@ namespace embree
|
|||
}
|
||||
|
||||
/*! default template instantiations */
|
||||
typedef Col4<uint8_t > Col4uc;
|
||||
typedef Col4<unsigned char> Col4uc;
|
||||
typedef Col4<float > Col4f;
|
||||
}
|
||||
|
|
44
thirdparty/embree/common/math/color.h
vendored
44
thirdparty/embree/common/math/color.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -52,17 +52,17 @@ namespace embree
|
|||
__forceinline void set(Col3uc& d) const
|
||||
{
|
||||
vfloat4 s = clamp(vfloat4(m128))*255.0f;
|
||||
d.r = (uint8_t)(s[0]);
|
||||
d.g = (uint8_t)(s[1]);
|
||||
d.b = (uint8_t)(s[2]);
|
||||
d.r = (unsigned char)(s[0]);
|
||||
d.g = (unsigned char)(s[1]);
|
||||
d.b = (unsigned char)(s[2]);
|
||||
}
|
||||
__forceinline void set(Col4uc& d) const
|
||||
{
|
||||
vfloat4 s = clamp(vfloat4(m128))*255.0f;
|
||||
d.r = (uint8_t)(s[0]);
|
||||
d.g = (uint8_t)(s[1]);
|
||||
d.b = (uint8_t)(s[2]);
|
||||
d.a = (uint8_t)(s[3]);
|
||||
d.r = (unsigned char)(s[0]);
|
||||
d.g = (unsigned char)(s[1]);
|
||||
d.b = (unsigned char)(s[2]);
|
||||
d.a = (unsigned char)(s[3]);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -114,16 +114,16 @@ namespace embree
|
|||
__forceinline void set(Col3uc& d) const
|
||||
{
|
||||
vfloat4 s = clamp(vfloat4(m128))*255.0f;
|
||||
d.r = (uint8_t)(s[0]);
|
||||
d.g = (uint8_t)(s[1]);
|
||||
d.b = (uint8_t)(s[2]);
|
||||
d.r = (unsigned char)(s[0]);
|
||||
d.g = (unsigned char)(s[1]);
|
||||
d.b = (unsigned char)(s[2]);
|
||||
}
|
||||
__forceinline void set(Col4uc& d) const
|
||||
{
|
||||
vfloat4 s = clamp(vfloat4(m128))*255.0f;
|
||||
d.r = (uint8_t)(s[0]);
|
||||
d.g = (uint8_t)(s[1]);
|
||||
d.b = (uint8_t)(s[2]);
|
||||
d.r = (unsigned char)(s[0]);
|
||||
d.g = (unsigned char)(s[1]);
|
||||
d.b = (unsigned char)(s[2]);
|
||||
d.a = 255;
|
||||
}
|
||||
|
||||
|
@ -152,37 +152,21 @@ namespace embree
|
|||
}
|
||||
__forceinline const Color rcp ( const Color& a )
|
||||
{
|
||||
#if defined(__aarch64__) && defined(BUILD_IOS)
|
||||
__m128 reciprocal = _mm_rcp_ps(a.m128);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
|
||||
return (const Color)reciprocal;
|
||||
#else
|
||||
#if defined(__AVX512VL__)
|
||||
const Color r = _mm_rcp14_ps(a.m128);
|
||||
#else
|
||||
const Color r = _mm_rcp_ps(a.m128);
|
||||
#endif
|
||||
return _mm_sub_ps(_mm_add_ps(r, r), _mm_mul_ps(_mm_mul_ps(r, r), a));
|
||||
#endif //defined(__aarch64__) && defined(BUILD_IOS)
|
||||
}
|
||||
__forceinline const Color rsqrt( const Color& a )
|
||||
{
|
||||
#if defined(__aarch64__) && defined(BUILD_IOS)
|
||||
__m128 r = _mm_rsqrt_ps(a.m128);
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
|
||||
return r;
|
||||
#else
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
__m128 r = _mm_rsqrt14_ps(a.m128);
|
||||
#else
|
||||
__m128 r = _mm_rsqrt_ps(a.m128);
|
||||
#endif
|
||||
return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
|
||||
#endif //defined(__aarch64__) && defined(BUILD_IOS)
|
||||
}
|
||||
__forceinline const Color sqrt ( const Color& a ) { return _mm_sqrt_ps(a.m128); }
|
||||
|
||||
|
|
36
thirdparty/embree/common/math/constants.cpp
vendored
36
thirdparty/embree/common/math/constants.cpp
vendored
|
@ -1,10 +1,6 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#if defined(__aarch64__)
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
#include "constants.h"
|
||||
|
||||
namespace embree
|
||||
|
@ -28,34 +24,4 @@ namespace embree
|
|||
ReverseStepTy reverse_step;
|
||||
EmptyTy empty;
|
||||
UndefinedTy undefined;
|
||||
|
||||
#if defined(__aarch64__)
|
||||
const uint32x4_t movemask_mask = { 1, 2, 4, 8 };
|
||||
const uint32x4_t vzero = { 0, 0, 0, 0 };
|
||||
const uint32x4_t v0x80000000 = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
|
||||
const uint32x4_t v0x7fffffff = { 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff };
|
||||
const uint32x4_t v000F = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
|
||||
const uint32x4_t v00F0 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0x00000000 };
|
||||
const uint32x4_t v00FF = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
|
||||
const uint32x4_t v0F00 = { 0x00000000, 0xFFFFFFFF, 0x00000000, 0x00000000 };
|
||||
const uint32x4_t v0F0F = { 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF };
|
||||
const uint32x4_t v0FF0 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
|
||||
const uint32x4_t v0FFF = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
|
||||
const uint32x4_t vF000 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
|
||||
const uint32x4_t vF00F = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0xFFFFFFFF };
|
||||
const uint32x4_t vF0F0 = { 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000 };
|
||||
const uint32x4_t vF0FF = { 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
|
||||
const uint32x4_t vFF00 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
|
||||
const uint32x4_t vFF0F = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF };
|
||||
const uint32x4_t vFFF0 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
|
||||
const uint32x4_t vFFFF = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
|
||||
const uint8x16_t v0022 = {0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11};
|
||||
const uint8x16_t v1133 = {4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15};
|
||||
const uint8x16_t v0101 = {0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7};
|
||||
const float32x4_t vOne = { 1.0f, 1.0f, 1.0f, 1.0f };
|
||||
const float32x4_t vmOne = { -1.0f, -1.0f, -1.0f, -1.0f };
|
||||
const float32x4_t vInf = { INFINITY, INFINITY, INFINITY, INFINITY };
|
||||
const float32x4_t vmInf = { -INFINITY, -INFINITY, -INFINITY, -INFINITY };
|
||||
#endif
|
||||
|
||||
}
|
||||
|
|
60
thirdparty/embree/common/math/constants.h
vendored
60
thirdparty/embree/common/math/constants.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -12,19 +12,6 @@
|
|||
#include <cfloat>
|
||||
#include <climits>
|
||||
|
||||
// Math constants may not be defined in libcxx + mingw + strict C++ standard
|
||||
#if defined(__MINGW32__)
|
||||
|
||||
// TODO(LTE): use constexpr
|
||||
#ifndef M_PI
|
||||
#define M_PI 3.14159265358979323846
|
||||
#endif
|
||||
#ifndef M_1_PI
|
||||
#define M_1_PI 0.31830988618379067154
|
||||
#endif
|
||||
|
||||
#endif // __MINGW32__
|
||||
|
||||
namespace embree
|
||||
{
|
||||
static MAYBE_UNUSED const float one_over_255 = 1.0f/255.0f;
|
||||
|
@ -57,8 +44,8 @@ namespace embree
|
|||
__forceinline operator unsigned int ( ) const { return 0; }
|
||||
__forceinline operator short ( ) const { return 0; }
|
||||
__forceinline operator unsigned short ( ) const { return 0; }
|
||||
__forceinline operator int8_t ( ) const { return 0; }
|
||||
__forceinline operator uint8_t ( ) const { return 0; }
|
||||
__forceinline operator char ( ) const { return 0; }
|
||||
__forceinline operator unsigned char ( ) const { return 0; }
|
||||
};
|
||||
|
||||
extern MAYBE_UNUSED ZeroTy zero;
|
||||
|
@ -75,8 +62,8 @@ namespace embree
|
|||
__forceinline operator unsigned int ( ) const { return 1; }
|
||||
__forceinline operator short ( ) const { return 1; }
|
||||
__forceinline operator unsigned short ( ) const { return 1; }
|
||||
__forceinline operator int8_t ( ) const { return 1; }
|
||||
__forceinline operator uint8_t ( ) const { return 1; }
|
||||
__forceinline operator char ( ) const { return 1; }
|
||||
__forceinline operator unsigned char ( ) const { return 1; }
|
||||
};
|
||||
|
||||
extern MAYBE_UNUSED OneTy one;
|
||||
|
@ -93,8 +80,8 @@ namespace embree
|
|||
__forceinline operator unsigned int ( ) const { return std::numeric_limits<unsigned int>::min(); }
|
||||
__forceinline operator short ( ) const { return std::numeric_limits<short>::min(); }
|
||||
__forceinline operator unsigned short ( ) const { return std::numeric_limits<unsigned short>::min(); }
|
||||
__forceinline operator int8_t ( ) const { return std::numeric_limits<int8_t>::min(); }
|
||||
__forceinline operator uint8_t ( ) const { return std::numeric_limits<uint8_t>::min(); }
|
||||
__forceinline operator char ( ) const { return std::numeric_limits<char>::min(); }
|
||||
__forceinline operator unsigned char ( ) const { return std::numeric_limits<unsigned char>::min(); }
|
||||
|
||||
};
|
||||
|
||||
|
@ -112,8 +99,8 @@ namespace embree
|
|||
__forceinline operator unsigned int ( ) const { return std::numeric_limits<unsigned int>::max(); }
|
||||
__forceinline operator short ( ) const { return std::numeric_limits<short>::max(); }
|
||||
__forceinline operator unsigned short ( ) const { return std::numeric_limits<unsigned short>::max(); }
|
||||
__forceinline operator int8_t ( ) const { return std::numeric_limits<int8_t>::max(); }
|
||||
__forceinline operator uint8_t ( ) const { return std::numeric_limits<uint8_t>::max(); }
|
||||
__forceinline operator char ( ) const { return std::numeric_limits<char>::max(); }
|
||||
__forceinline operator unsigned char ( ) const { return std::numeric_limits<unsigned char>::max(); }
|
||||
};
|
||||
|
||||
extern MAYBE_UNUSED PosInfTy inf;
|
||||
|
@ -207,33 +194,4 @@ namespace embree
|
|||
};
|
||||
|
||||
extern MAYBE_UNUSED UndefinedTy undefined;
|
||||
|
||||
#if defined(__aarch64__)
|
||||
extern const uint32x4_t movemask_mask;
|
||||
extern const uint32x4_t vzero;
|
||||
extern const uint32x4_t v0x80000000;
|
||||
extern const uint32x4_t v0x7fffffff;
|
||||
extern const uint32x4_t v000F;
|
||||
extern const uint32x4_t v00F0;
|
||||
extern const uint32x4_t v00FF;
|
||||
extern const uint32x4_t v0F00;
|
||||
extern const uint32x4_t v0F0F;
|
||||
extern const uint32x4_t v0FF0;
|
||||
extern const uint32x4_t v0FFF;
|
||||
extern const uint32x4_t vF000;
|
||||
extern const uint32x4_t vF00F;
|
||||
extern const uint32x4_t vF0F0;
|
||||
extern const uint32x4_t vF0FF;
|
||||
extern const uint32x4_t vFF00;
|
||||
extern const uint32x4_t vFF0F;
|
||||
extern const uint32x4_t vFFF0;
|
||||
extern const uint32x4_t vFFFF;
|
||||
extern const uint8x16_t v0022;
|
||||
extern const uint8x16_t v1133;
|
||||
extern const uint8x16_t v0101;
|
||||
extern const float32x4_t vOne;
|
||||
extern const float32x4_t vmOne;
|
||||
extern const float32x4_t vInf;
|
||||
extern const float32x4_t vmInf;
|
||||
#endif
|
||||
}
|
||||
|
|
2
thirdparty/embree/common/math/interval.h
vendored
2
thirdparty/embree/common/math/interval.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/math/lbbox.h
vendored
2
thirdparty/embree/common/math/lbbox.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/math/linearspace2.h
vendored
2
thirdparty/embree/common/math/linearspace2.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/math/linearspace3.h
vendored
2
thirdparty/embree/common/math/linearspace3.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
140
thirdparty/embree/common/math/math.h
vendored
140
thirdparty/embree/common/math/math.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -9,18 +9,15 @@
|
|||
#include <cmath>
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
#include "SSE2NEON.h"
|
||||
#if defined(NEON_AVX2_EMULATION)
|
||||
#include "AVX2NEON.h"
|
||||
#endif
|
||||
#include "../simd/arm/emulation.h"
|
||||
#else
|
||||
#include <emmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
|
||||
#if defined(__WIN32__) && !defined(__MINGW32__)
|
||||
#if (__MSV_VER <= 1700)
|
||||
#if defined(__WIN32__)
|
||||
#if defined(_MSC_VER) && (_MSC_VER <= 1700)
|
||||
namespace std
|
||||
{
|
||||
__forceinline bool isinf ( const float x ) { return _finite(x) == 0; }
|
||||
|
@ -47,7 +44,7 @@ namespace embree
|
|||
__forceinline int toInt (const float& a) { return int(a); }
|
||||
__forceinline float toFloat(const int& a) { return float(a); }
|
||||
|
||||
#if defined(__WIN32__) && !defined(__MINGW32__)
|
||||
#if defined(__WIN32__)
|
||||
__forceinline bool finite ( const float x ) { return _finite(x) != 0; }
|
||||
#endif
|
||||
|
||||
|
@ -56,16 +53,6 @@ namespace embree
|
|||
|
||||
__forceinline float rcp ( const float x )
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
// Move scalar to vector register and do rcp.
|
||||
__m128 a;
|
||||
a[0] = x;
|
||||
float32x4_t reciprocal = vrecpeq_f32(a);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
|
||||
return reciprocal[0];
|
||||
#else
|
||||
|
||||
const __m128 a = _mm_set_ss(x);
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
@ -79,74 +66,33 @@ namespace embree
|
|||
#else
|
||||
return _mm_cvtss_f32(_mm_mul_ss(r,_mm_sub_ss(_mm_set_ss(2.0f), _mm_mul_ss(r, a))));
|
||||
#endif
|
||||
|
||||
#endif //defined(__aarch64__)
|
||||
}
|
||||
|
||||
__forceinline float signmsk ( const float x ) {
|
||||
#if defined(__aarch64__)
|
||||
// FP and Neon shares same vector register in arm64
|
||||
__m128 a;
|
||||
__m128i b;
|
||||
a[0] = x;
|
||||
b[0] = 0x80000000;
|
||||
a = _mm_and_ps(a, vreinterpretq_f32_s32(b));
|
||||
return a[0];
|
||||
#else
|
||||
return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(0x80000000))));
|
||||
#endif
|
||||
}
|
||||
__forceinline float xorf( const float x, const float y ) {
|
||||
#if defined(__aarch64__)
|
||||
// FP and Neon shares same vector register in arm64
|
||||
__m128 a;
|
||||
__m128 b;
|
||||
a[0] = x;
|
||||
b[0] = y;
|
||||
a = _mm_xor_ps(a, b);
|
||||
return a[0];
|
||||
#else
|
||||
return _mm_cvtss_f32(_mm_xor_ps(_mm_set_ss(x),_mm_set_ss(y)));
|
||||
#endif
|
||||
}
|
||||
__forceinline float andf( const float x, const unsigned y ) {
|
||||
#if defined(__aarch64__)
|
||||
// FP and Neon shares same vector register in arm64
|
||||
__m128 a;
|
||||
__m128i b;
|
||||
a[0] = x;
|
||||
b[0] = y;
|
||||
a = _mm_and_ps(a, vreinterpretq_f32_s32(b));
|
||||
return a[0];
|
||||
#else
|
||||
return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(y))));
|
||||
#endif
|
||||
}
|
||||
__forceinline float rsqrt( const float x )
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
// FP and Neon shares same vector register in arm64
|
||||
__m128 a;
|
||||
a[0] = x;
|
||||
__m128 value = _mm_rsqrt_ps(a);
|
||||
value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value));
|
||||
value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value));
|
||||
return value[0];
|
||||
#else
|
||||
|
||||
const __m128 a = _mm_set_ss(x);
|
||||
#if defined(__AVX512VL__)
|
||||
const __m128 r = _mm_rsqrt14_ss(_mm_set_ss(0.0f),a);
|
||||
__m128 r = _mm_rsqrt14_ss(_mm_set_ss(0.0f),a);
|
||||
#else
|
||||
const __m128 r = _mm_rsqrt_ss(a);
|
||||
__m128 r = _mm_rsqrt_ss(a);
|
||||
#endif
|
||||
const __m128 c = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r),
|
||||
_mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r)));
|
||||
return _mm_cvtss_f32(c);
|
||||
r = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r), _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r)));
|
||||
#if defined(__ARM_NEON)
|
||||
r = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r), _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r)));
|
||||
#endif
|
||||
return _mm_cvtss_f32(r);
|
||||
}
|
||||
|
||||
#if defined(__WIN32__) && (__MSC_VER <= 1700) && !defined(__MINGW32__)
|
||||
#if defined(__WIN32__) && defined(_MSC_VER) && (_MSC_VER <= 1700)
|
||||
__forceinline float nextafter(float x, float y) { if ((x<y) == (x>0)) return x*(1.1f+float(ulp)); else return x*(0.9f-float(ulp)); }
|
||||
__forceinline double nextafter(double x, double y) { return _nextafter(x, y); }
|
||||
__forceinline int roundf(float f) { return (int)(f + 0.5f); }
|
||||
|
@ -200,17 +146,7 @@ namespace embree
|
|||
__forceinline double floor( const double x ) { return ::floor (x); }
|
||||
__forceinline double ceil ( const double x ) { return ::ceil (x); }
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline float mini(float a, float b) {
|
||||
// FP and Neon shares same vector register in arm64
|
||||
__m128 x;
|
||||
__m128 y;
|
||||
x[0] = a;
|
||||
y[0] = b;
|
||||
x = _mm_min_ps(x, y);
|
||||
return x[0];
|
||||
}
|
||||
#elif defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline float mini(float a, float b) {
|
||||
const __m128i ai = _mm_castps_si128(_mm_set_ss(a));
|
||||
const __m128i bi = _mm_castps_si128(_mm_set_ss(b));
|
||||
|
@ -219,17 +155,7 @@ namespace embree
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline float maxi(float a, float b) {
|
||||
// FP and Neon shares same vector register in arm64
|
||||
__m128 x;
|
||||
__m128 y;
|
||||
x[0] = a;
|
||||
y[0] = b;
|
||||
x = _mm_max_ps(x, y);
|
||||
return x[0];
|
||||
}
|
||||
#elif defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline float maxi(float a, float b) {
|
||||
const __m128i ai = _mm_castps_si128(_mm_set_ss(a));
|
||||
const __m128i bi = _mm_castps_si128(_mm_set_ss(b));
|
||||
|
@ -246,7 +172,7 @@ namespace embree
|
|||
__forceinline int64_t min(int64_t a, int64_t b) { return a<b ? a:b; }
|
||||
__forceinline float min(float a, float b) { return a<b ? a:b; }
|
||||
__forceinline double min(double a, double b) { return a<b ? a:b; }
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
#if defined(__64BIT__)
|
||||
__forceinline size_t min(size_t a, size_t b) { return a<b ? a:b; }
|
||||
#endif
|
||||
|
||||
|
@ -263,7 +189,7 @@ namespace embree
|
|||
__forceinline int64_t max(int64_t a, int64_t b) { return a<b ? b:a; }
|
||||
__forceinline float max(float a, float b) { return a<b ? b:a; }
|
||||
__forceinline double max(double a, double b) { return a<b ? b:a; }
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
#if defined(__64BIT__)
|
||||
__forceinline size_t max(size_t a, size_t b) { return a<b ? b:a; }
|
||||
#endif
|
||||
|
||||
|
@ -305,16 +231,6 @@ namespace embree
|
|||
__forceinline float msub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
|
||||
__forceinline float nmadd ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmadd_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
|
||||
__forceinline float nmsub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); }
|
||||
#elif defined (__aarch64__) && defined(__clang__)
|
||||
#pragma clang fp contract(fast)
|
||||
|
||||
|
||||
__forceinline float madd ( const float a, const float b, const float c) { return a*b + c; }
|
||||
__forceinline float msub ( const float a, const float b, const float c) { return a*b - c; }
|
||||
__forceinline float nmadd ( const float a, const float b, const float c) { return c - a*b; }
|
||||
__forceinline float nmsub ( const float a, const float b, const float c) { return -(c + a*b); }
|
||||
|
||||
#pragma clang fp contract(on)
|
||||
#else
|
||||
__forceinline float madd ( const float a, const float b, const float c) { return a*b+c; }
|
||||
__forceinline float msub ( const float a, const float b, const float c) { return a*b-c; }
|
||||
|
@ -363,15 +279,17 @@ __forceinline float nmsub ( const float a, const float b, const float c) { retur
|
|||
/*! exchange */
|
||||
template<typename T> __forceinline void xchg ( T& a, T& b ) { const T tmp = a; a = b; b = tmp; }
|
||||
|
||||
|
||||
template<typename T> __forceinline T prod_diff(const T& a,const T& b,const T& c,const T& d) {
|
||||
#if 1//!defined(__aarch64__)
|
||||
return msub(a,b,c*d);
|
||||
#else
|
||||
return nmadd(c,d,a*b);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* load/store */
|
||||
template<typename Ty> struct mem;
|
||||
|
||||
template<> struct mem<float> {
|
||||
static __forceinline float load (bool mask, const void* ptr) { return mask ? *(float*)ptr : 0.0f; }
|
||||
static __forceinline float loadu(bool mask, const void* ptr) { return mask ? *(float*)ptr : 0.0f; }
|
||||
|
||||
static __forceinline void store (bool mask, void* ptr, const float v) { if (mask) *(float*)ptr = v; }
|
||||
static __forceinline void storeu(bool mask, void* ptr, const float v) { if (mask) *(float*)ptr = v; }
|
||||
};
|
||||
|
||||
/*! bit reverse operation */
|
||||
template<class T>
|
||||
__forceinline T bitReverse(const T& vin)
|
||||
|
@ -389,7 +307,7 @@ __forceinline float nmsub ( const float a, const float b, const float c) { retur
|
|||
template<class T>
|
||||
__forceinline T bitInterleave(const T& xin, const T& yin, const T& zin)
|
||||
{
|
||||
T x = xin, y = yin, z = zin;
|
||||
T x = xin, y = yin, z = zin;
|
||||
x = (x | (x << 16)) & 0x030000FF;
|
||||
x = (x | (x << 8)) & 0x0300F00F;
|
||||
x = (x | (x << 4)) & 0x030C30C3;
|
||||
|
@ -408,7 +326,7 @@ __forceinline float nmsub ( const float a, const float b, const float c) { retur
|
|||
return x | (y << 1) | (z << 2);
|
||||
}
|
||||
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
|
||||
template<>
|
||||
__forceinline unsigned int bitInterleave(const unsigned int &xi, const unsigned int& yi, const unsigned int& zi)
|
||||
|
|
2
thirdparty/embree/common/math/obbox.h
vendored
2
thirdparty/embree/common/math/obbox.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/math/quaternion.h
vendored
2
thirdparty/embree/common/math/quaternion.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/math/range.h
vendored
2
thirdparty/embree/common/math/range.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -418,7 +418,7 @@ __forceinline void __rangeReduceLog(const T &input,
|
|||
}
|
||||
|
||||
template <typename T> struct ExponentType { };
|
||||
template <int N> struct ExponentType<vfloat<N>> { typedef vint<N> Ty; };
|
||||
template <int N> struct ExponentType<vfloat_impl<N>> { typedef vint<N> Ty; };
|
||||
template <> struct ExponentType<float> { typedef int Ty; };
|
||||
|
||||
template <typename T>
|
||||
|
|
8
thirdparty/embree/common/math/vec2.h
vendored
8
thirdparty/embree/common/math/vec2.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -205,11 +205,11 @@ namespace embree
|
|||
|
||||
#include "vec2fa.h"
|
||||
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
#if defined __SSE__
|
||||
#include "../simd/sse.h"
|
||||
#endif
|
||||
|
||||
#if defined(__AVX__)
|
||||
#if defined __AVX__
|
||||
#include "../simd/avx.h"
|
||||
#endif
|
||||
|
||||
|
@ -221,7 +221,7 @@ namespace embree
|
|||
{
|
||||
template<> __forceinline Vec2<float>::Vec2(const Vec2fa& a) : x(a.x), y(a.y) {}
|
||||
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
#if defined(__SSE__)
|
||||
template<> __forceinline Vec2<vfloat4>::Vec2(const Vec2fa& a) : x(a.x), y(a.y) {}
|
||||
#endif
|
||||
|
||||
|
|
28
thirdparty/embree/common/math/vec2fa.h
vendored
28
thirdparty/embree/common/math/vec2fa.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -97,12 +97,6 @@ namespace embree
|
|||
|
||||
__forceinline Vec2fa rcp ( const Vec2fa& a )
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
__m128 reciprocal = _mm_rcp_ps(a.m128);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
|
||||
return (const Vec2fa)reciprocal;
|
||||
#else
|
||||
#if defined(__AVX512VL__)
|
||||
const Vec2fa r = _mm_rcp14_ps(a.m128);
|
||||
#else
|
||||
|
@ -117,7 +111,6 @@ namespace embree
|
|||
#endif
|
||||
|
||||
return res;
|
||||
#endif //defined(__aarch64__)
|
||||
}
|
||||
|
||||
__forceinline Vec2fa sqrt ( const Vec2fa& a ) { return _mm_sqrt_ps(a.m128); }
|
||||
|
@ -125,21 +118,12 @@ namespace embree
|
|||
|
||||
__forceinline Vec2fa rsqrt( const Vec2fa& a )
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
__m128 r = _mm_rsqrt_ps(a.m128);
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
|
||||
return r;
|
||||
#else
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
__m128 r = _mm_rsqrt14_ps(a.m128);
|
||||
#else
|
||||
__m128 r = _mm_rsqrt_ps(a.m128);
|
||||
#endif
|
||||
return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline Vec2fa zero_fix(const Vec2fa& a) {
|
||||
|
@ -172,7 +156,7 @@ namespace embree
|
|||
__forceinline Vec2fa min( const Vec2fa& a, const Vec2fa& b ) { return _mm_min_ps(a.m128,b.m128); }
|
||||
__forceinline Vec2fa max( const Vec2fa& a, const Vec2fa& b ) { return _mm_max_ps(a.m128,b.m128); }
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec2fa mini(const Vec2fa& a, const Vec2fa& b) {
|
||||
const vint4 ai = _mm_castps_si128(a);
|
||||
const vint4 bi = _mm_castps_si128(b);
|
||||
|
@ -181,7 +165,7 @@ namespace embree
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec2fa maxi(const Vec2fa& a, const Vec2fa& b) {
|
||||
const vint4 ai = _mm_castps_si128(a);
|
||||
const vint4 bi = _mm_castps_si128(b);
|
||||
|
@ -292,9 +276,9 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline Vec2fa floor(const Vec2fa& a) { return vrndmq_f32(a); }
|
||||
__forceinline Vec2fa ceil (const Vec2fa& a) { return vrndpq_f32(a); }
|
||||
//__forceinline Vec2fa trunc(const Vec2fa& a) { return vrndq_f32(a); }
|
||||
//__forceinline Vec2fa trunc(const Vec2fa& a) { return vrndq_f32(a); }
|
||||
__forceinline Vec2fa floor(const Vec2fa& a) { return vrndmq_f32(a); }
|
||||
__forceinline Vec2fa ceil (const Vec2fa& a) { return vrndpq_f32(a); }
|
||||
#elif defined (__SSE4_1__)
|
||||
//__forceinline Vec2fa trunc( const Vec2fa& a ) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
|
||||
__forceinline Vec2fa floor( const Vec2fa& a ) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
|
||||
|
|
26
thirdparty/embree/common/math/vec3.h
vendored
26
thirdparty/embree/common/math/vec3.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -206,7 +206,8 @@ namespace embree
|
|||
template<typename T> __forceinline T rcp_length( const Vec3<T>& a ) { return rsqrt(sqr(a)); }
|
||||
template<typename T> __forceinline Vec3<T> normalize( const Vec3<T>& a ) { return a*rsqrt(sqr(a)); }
|
||||
template<typename T> __forceinline T distance ( const Vec3<T>& a, const Vec3<T>& b ) { return length(a-b); }
|
||||
template<typename T> __forceinline Vec3<T> cross ( const Vec3<T>& a, const Vec3<T>& b ) { return Vec3<T>(prod_diff(a.y,b.z,a.z,b.y), prod_diff(a.z,b.x,a.x,b.z), prod_diff(a.x,b.y,a.y,b.x)); }
|
||||
template<typename T> __forceinline Vec3<T> cross ( const Vec3<T>& a, const Vec3<T>& b ) { return Vec3<T>(msub(a.y,b.z,a.z*b.y), msub(a.z,b.x,a.x*b.z), msub(a.x,b.y,a.y*b.x)); }
|
||||
|
||||
template<typename T> __forceinline Vec3<T> stable_triangle_normal( const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c )
|
||||
{
|
||||
const T ab_x = a.z*b.y, ab_y = a.x*b.z, ab_z = a.y*b.x;
|
||||
|
@ -265,11 +266,11 @@ namespace embree
|
|||
/// SSE / AVX / MIC specializations
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
#if defined __SSE__
|
||||
#include "../simd/sse.h"
|
||||
#endif
|
||||
|
||||
#if defined(__AVX__)
|
||||
#if defined __AVX__
|
||||
#include "../simd/avx.h"
|
||||
#endif
|
||||
|
||||
|
@ -290,18 +291,14 @@ namespace embree
|
|||
template<> __forceinline Vec3<vfloat4>::Vec3(const Vec3fa& a) {
|
||||
x = a.x; y = a.y; z = a.z;
|
||||
}
|
||||
#elif defined(__SSE__) || defined(__ARM_NEON)
|
||||
#elif defined(__SSE__)
|
||||
template<>
|
||||
__forceinline Vec3<vfloat4>::Vec3(const Vec3fa& a) {
|
||||
const vfloat4 v = vfloat4(a.m128); x = shuffle<0,0,0,0>(v); y = shuffle<1,1,1,1>(v); z = shuffle<2,2,2,2>(v);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
__forceinline Vec3<vfloat4> broadcast4f(const Vec3<vfloat4>& a, const size_t k) {
|
||||
return Vec3<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]));
|
||||
}
|
||||
|
||||
#if defined(__SSE__)
|
||||
template<>
|
||||
__forceinline Vec3<vfloat4> broadcast<vfloat4,vfloat4>(const Vec3<vfloat4>& a, const size_t k) {
|
||||
return Vec3<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]));
|
||||
|
@ -318,15 +315,6 @@ namespace embree
|
|||
__forceinline Vec3<vfloat8>::Vec3(const Vec3fa& a) {
|
||||
x = a.x; y = a.y; z = a.z;
|
||||
}
|
||||
__forceinline Vec3<vfloat4> broadcast4f(const Vec3<vfloat8>& a, const size_t k) {
|
||||
return Vec3<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]));
|
||||
}
|
||||
__forceinline Vec3<vfloat8> broadcast8f(const Vec3<vfloat4>& a, const size_t k) {
|
||||
return Vec3<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]));
|
||||
}
|
||||
__forceinline Vec3<vfloat8> broadcast8f(const Vec3<vfloat8>& a, const size_t k) {
|
||||
return Vec3<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]));
|
||||
}
|
||||
|
||||
template<>
|
||||
__forceinline Vec3<vfloat8> broadcast<vfloat8,vfloat4>(const Vec3<vfloat4>& a, const size_t k) {
|
||||
|
|
2
thirdparty/embree/common/math/vec3ba.h
vendored
2
thirdparty/embree/common/math/vec3ba.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
129
thirdparty/embree/common/math/vec3fa.h
vendored
129
thirdparty/embree/common/math/vec3fa.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -55,13 +55,7 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static __forceinline Vec3fa load( const void* const a ) {
|
||||
#if defined(__aarch64__)
|
||||
__m128 t = _mm_load_ps((float*)a);
|
||||
t[3] = 0.0f;
|
||||
return Vec3fa(t);
|
||||
#else
|
||||
return Vec3fa(_mm_and_ps(_mm_load_ps((float*)a),_mm_castsi128_ps(_mm_set_epi32(0, -1, -1, -1))));
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline Vec3fa loadu( const void* const a ) {
|
||||
|
@ -95,42 +89,19 @@ namespace embree
|
|||
|
||||
__forceinline Vec3fa operator +( const Vec3fa& a ) { return a; }
|
||||
__forceinline Vec3fa operator -( const Vec3fa& a ) {
|
||||
#if defined(__aarch64__)
|
||||
return vnegq_f32(a.m128);
|
||||
#else
|
||||
const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
|
||||
|
||||
return _mm_xor_ps(a.m128, mask);
|
||||
#endif
|
||||
}
|
||||
__forceinline Vec3fa abs ( const Vec3fa& a ) {
|
||||
#if defined(__aarch64__)
|
||||
return _mm_abs_ps(a.m128);
|
||||
#else
|
||||
const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff));
|
||||
return _mm_and_ps(a.m128, mask);
|
||||
#endif
|
||||
}
|
||||
__forceinline Vec3fa sign ( const Vec3fa& a ) {
|
||||
#if defined(__aarch64__)
|
||||
Vec3fa r = blendv_ps(vOne, vmOne, _mm_cmplt_ps (a.m128,vdupq_n_f32(0.0f)));
|
||||
return r;
|
||||
#else
|
||||
return blendv_ps(Vec3fa(one).m128, (-Vec3fa(one)).m128, _mm_cmplt_ps (a.m128,Vec3fa(zero).m128));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline Vec3fa rcp ( const Vec3fa& a )
|
||||
{
|
||||
#if defined(__aarch64__) && defined(BUILD_IOS)
|
||||
return vdivq_f32(vdupq_n_f32(1.0f),a.m128);
|
||||
#elif defined(__aarch64__)
|
||||
__m128 reciprocal = _mm_rcp_ps(a.m128);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal);
|
||||
return (const Vec3fa)reciprocal;
|
||||
#else
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
const Vec3fa r = _mm_rcp14_ps(a.m128);
|
||||
#else
|
||||
|
@ -145,7 +116,6 @@ namespace embree
|
|||
#endif
|
||||
|
||||
return res;
|
||||
#endif //defined(__aarch64__)
|
||||
}
|
||||
|
||||
__forceinline Vec3fa sqrt ( const Vec3fa& a ) { return _mm_sqrt_ps(a.m128); }
|
||||
|
@ -153,20 +123,12 @@ namespace embree
|
|||
|
||||
__forceinline Vec3fa rsqrt( const Vec3fa& a )
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
__m128 r = _mm_rsqrt_ps(a.m128);
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r));
|
||||
return r;
|
||||
#else
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
__m128 r = _mm_rsqrt14_ps(a.m128);
|
||||
#else
|
||||
__m128 r = _mm_rsqrt_ps(a.m128);
|
||||
#endif
|
||||
return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a.m128, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline Vec3fa zero_fix(const Vec3fa& a) {
|
||||
|
@ -199,7 +161,7 @@ namespace embree
|
|||
__forceinline Vec3fa min( const Vec3fa& a, const Vec3fa& b ) { return _mm_min_ps(a.m128,b.m128); }
|
||||
__forceinline Vec3fa max( const Vec3fa& a, const Vec3fa& b ) { return _mm_max_ps(a.m128,b.m128); }
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec3fa mini(const Vec3fa& a, const Vec3fa& b) {
|
||||
const vint4 ai = _mm_castps_si128(a.m128);
|
||||
const vint4 bi = _mm_castps_si128(b.m128);
|
||||
|
@ -208,7 +170,7 @@ namespace embree
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec3fa maxi(const Vec3fa& a, const Vec3fa& b) {
|
||||
const vint4 ai = _mm_castps_si128(a.m128);
|
||||
const vint4 bi = _mm_castps_si128(b.m128);
|
||||
|
@ -230,30 +192,11 @@ namespace embree
|
|||
__forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fmsub_ps(a.m128,b.m128,c.m128); }
|
||||
__forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fnmadd_ps(a.m128,b.m128,c.m128); }
|
||||
__forceinline Vec3fa nmsub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fnmsub_ps(a.m128,b.m128,c.m128); }
|
||||
#else
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline Vec3fa madd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
|
||||
return _mm_madd_ps(a.m128, b.m128, c.m128); //a*b+c;
|
||||
}
|
||||
__forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
|
||||
return _mm_msub_ps(a.m128, b.m128, c.m128); //-a*b+c;
|
||||
}
|
||||
__forceinline Vec3fa nmsub( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
|
||||
Vec3fa t = _mm_madd_ps(a.m128, b.m128, c.m128);
|
||||
return -t;
|
||||
}
|
||||
__forceinline Vec3fa msub( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) {
|
||||
return _mm_madd_ps(a.m128,b.m128,vnegq_f32(c.m128)); //a*b-c
|
||||
}
|
||||
|
||||
#else
|
||||
__forceinline Vec3fa madd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b+c; }
|
||||
__forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b-c; }
|
||||
__forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return -a*b+c;}
|
||||
__forceinline Vec3fa nmsub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return -a*b-c; }
|
||||
__forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b-c; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
__forceinline Vec3fa madd ( const float a, const Vec3fa& b, const Vec3fa& c) { return madd(Vec3fa(a),b,c); }
|
||||
|
@ -275,37 +218,18 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#if defined(__aarch64__) && defined(BUILD_IOS)
|
||||
__forceinline float reduce_add(const Vec3fa& v) {
|
||||
float32x4_t t = v.m128;
|
||||
t[3] = 0.0f;
|
||||
return vaddvq_f32(t);
|
||||
}
|
||||
|
||||
__forceinline float reduce_mul(const Vec3fa& v) { return v.x*v.y*v.z; }
|
||||
__forceinline float reduce_min(const Vec3fa& v) {
|
||||
float32x4_t t = v.m128;
|
||||
t[3] = t[2];
|
||||
return vminvq_f32(t);
|
||||
}
|
||||
__forceinline float reduce_max(const Vec3fa& v) {
|
||||
float32x4_t t = v.m128;
|
||||
t[3] = t[2];
|
||||
return vmaxvq_f32(t);
|
||||
}
|
||||
#else
|
||||
__forceinline float reduce_add(const Vec3fa& v) {
|
||||
|
||||
__forceinline float reduce_add(const Vec3fa& v) {
|
||||
const vfloat4 a(v.m128);
|
||||
const vfloat4 b = shuffle<1>(a);
|
||||
const vfloat4 c = shuffle<2>(a);
|
||||
return _mm_cvtss_f32(a+b+c);
|
||||
return _mm_cvtss_f32(a+b+c);
|
||||
}
|
||||
|
||||
__forceinline float reduce_mul(const Vec3fa& v) { return v.x*v.y*v.z; }
|
||||
__forceinline float reduce_min(const Vec3fa& v) { return min(v.x,v.y,v.z); }
|
||||
__forceinline float reduce_max(const Vec3fa& v) { return max(v.x,v.y,v.z); }
|
||||
#endif
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Comparison Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -317,13 +241,8 @@ namespace embree
|
|||
__forceinline Vec3ba neq_mask(const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpneq_ps(a.m128, b.m128); }
|
||||
__forceinline Vec3ba lt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmplt_ps (a.m128, b.m128); }
|
||||
__forceinline Vec3ba le_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmple_ps (a.m128, b.m128); }
|
||||
#if defined(__aarch64__)
|
||||
__forceinline Vec3ba gt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpgt_ps (a.m128, b.m128); }
|
||||
__forceinline Vec3ba ge_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpge_ps (a.m128, b.m128); }
|
||||
#else
|
||||
__forceinline Vec3ba gt_mask(const Vec3fa& a, const Vec3fa& b) { return _mm_cmpnle_ps(a.m128, b.m128); }
|
||||
__forceinline Vec3ba ge_mask(const Vec3fa& a, const Vec3fa& b) { return _mm_cmpnlt_ps(a.m128, b.m128); }
|
||||
#endif
|
||||
__forceinline Vec3ba gt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpnle_ps(a.m128, b.m128); }
|
||||
__forceinline Vec3ba ge_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpnlt_ps(a.m128, b.m128); }
|
||||
|
||||
__forceinline bool isvalid ( const Vec3fa& v ) {
|
||||
return all(gt_mask(v,Vec3fa(-FLT_LARGE)) & lt_mask(v,Vec3fa(+FLT_LARGE)));
|
||||
|
@ -361,7 +280,7 @@ namespace embree
|
|||
vfloat4 b0 = shuffle<1,2,0,3>(vfloat4(b.m128));
|
||||
vfloat4 a1 = shuffle<1,2,0,3>(vfloat4(a.m128));
|
||||
vfloat4 b1 = vfloat4(b.m128);
|
||||
return Vec3fa(shuffle<1,2,0,3>(prod_diff(a0,b0,a1,b1)));
|
||||
return Vec3fa(shuffle<1,2,0,3>(msub(a0,b0,a1*b1)));
|
||||
}
|
||||
|
||||
__forceinline float sqr_length ( const Vec3fa& a ) { return dot(a,a); }
|
||||
|
@ -416,11 +335,7 @@ namespace embree
|
|||
/// Rounding Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline Vec3fa floor(const Vec3fa& a) { return vrndmq_f32(a.m128); }
|
||||
__forceinline Vec3fa ceil (const Vec3fa& a) { return vrndpq_f32(a.m128); }
|
||||
__forceinline Vec3fa trunc(const Vec3fa& a) { return vrndq_f32(a.m128); }
|
||||
#elif defined (__SSE4_1__)
|
||||
#if defined (__SSE4_1__)
|
||||
__forceinline Vec3fa trunc( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEAREST_INT); }
|
||||
__forceinline Vec3fa floor( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEG_INF ); }
|
||||
__forceinline Vec3fa ceil ( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_POS_INF ); }
|
||||
|
@ -478,10 +393,8 @@ namespace embree
|
|||
|
||||
__forceinline Vec3fx( const Vec3fa& other, const int a1) { m128 = other.m128; a = a1; }
|
||||
__forceinline Vec3fx( const Vec3fa& other, const unsigned a1) { m128 = other.m128; u = a1; }
|
||||
__forceinline Vec3fx( const Vec3fa& other, const float w1) {
|
||||
#if defined (__aarch64__)
|
||||
m128 = other.m128; m128[3] = w1;
|
||||
#elif defined (__SSE4_1__)
|
||||
__forceinline Vec3fx( const Vec3fa& other, const float w1) {
|
||||
#if defined (__SSE4_1__)
|
||||
m128 = _mm_insert_ps(other.m128, _mm_set_ss(w1),3 << 4);
|
||||
#else
|
||||
const vint4 mask(-1,-1,-1,0);
|
||||
|
@ -613,7 +526,7 @@ namespace embree
|
|||
__forceinline Vec3fx min( const Vec3fx& a, const Vec3fx& b ) { return _mm_min_ps(a.m128,b.m128); }
|
||||
__forceinline Vec3fx max( const Vec3fx& a, const Vec3fx& b ) { return _mm_max_ps(a.m128,b.m128); }
|
||||
|
||||
#if defined(__SSE4_1__) || defined(__aarch64__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec3fx mini(const Vec3fx& a, const Vec3fx& b) {
|
||||
const vint4 ai = _mm_castps_si128(a.m128);
|
||||
const vint4 bi = _mm_castps_si128(b.m128);
|
||||
|
@ -622,7 +535,7 @@ namespace embree
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(__SSE4_1__) || defined(__aarch64__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec3fx maxi(const Vec3fx& a, const Vec3fx& b) {
|
||||
const vint4 ai = _mm_castps_si128(a.m128);
|
||||
const vint4 bi = _mm_castps_si128(b.m128);
|
||||
|
@ -671,11 +584,11 @@ namespace embree
|
|||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
__forceinline float reduce_add(const Vec3fx& v) {
|
||||
__forceinline float reduce_add(const Vec3fx& v) {
|
||||
const vfloat4 a(v.m128);
|
||||
const vfloat4 b = shuffle<1>(a);
|
||||
const vfloat4 c = shuffle<2>(a);
|
||||
return _mm_cvtss_f32(a+b+c);
|
||||
return _mm_cvtss_f32(a+b+c);
|
||||
}
|
||||
|
||||
__forceinline float reduce_mul(const Vec3fx& v) { return v.x*v.y*v.z; }
|
||||
|
@ -787,7 +700,11 @@ namespace embree
|
|||
/// Rounding Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined (__SSE4_1__) && !defined(__aarch64__)
|
||||
#if defined(__aarch64__)
|
||||
__forceinline Vec3fx trunc(const Vec3fx& a) { return vrndq_f32(a.m128); }
|
||||
__forceinline Vec3fx floor(const Vec3fx& a) { return vrndmq_f32(a.m128); }
|
||||
__forceinline Vec3fx ceil (const Vec3fx& a) { return vrndpq_f32(a.m128); }
|
||||
#elif defined (__SSE4_1__)
|
||||
__forceinline Vec3fx trunc( const Vec3fx& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEAREST_INT); }
|
||||
__forceinline Vec3fx floor( const Vec3fx& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEG_INF ); }
|
||||
__forceinline Vec3fx ceil ( const Vec3fx& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_POS_INF ); }
|
||||
|
|
40
thirdparty/embree/common/math/vec3ia.h
vendored
40
thirdparty/embree/common/math/vec3ia.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -65,9 +65,7 @@ namespace embree
|
|||
|
||||
__forceinline Vec3ia operator +( const Vec3ia& a ) { return a; }
|
||||
__forceinline Vec3ia operator -( const Vec3ia& a ) { return _mm_sub_epi32(_mm_setzero_si128(), a.m128); }
|
||||
#if (defined(__aarch64__))
|
||||
__forceinline Vec3ia abs ( const Vec3ia& a ) { return vabsq_s32(a.m128); }
|
||||
#elif defined(__SSSE3__)
|
||||
#if defined(__SSSE3__)
|
||||
__forceinline Vec3ia abs ( const Vec3ia& a ) { return _mm_abs_epi32(a.m128); }
|
||||
#endif
|
||||
|
||||
|
@ -83,7 +81,7 @@ namespace embree
|
|||
__forceinline Vec3ia operator -( const Vec3ia& a, const int b ) { return a-Vec3ia(b); }
|
||||
__forceinline Vec3ia operator -( const int a, const Vec3ia& b ) { return Vec3ia(a)-b; }
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec3ia operator *( const Vec3ia& a, const Vec3ia& b ) { return _mm_mullo_epi32(a.m128, b.m128); }
|
||||
__forceinline Vec3ia operator *( const Vec3ia& a, const int b ) { return a * Vec3ia(b); }
|
||||
__forceinline Vec3ia operator *( const int a, const Vec3ia& b ) { return Vec3ia(a) * b; }
|
||||
|
@ -101,14 +99,12 @@ namespace embree
|
|||
__forceinline Vec3ia operator ^( const Vec3ia& a, const int b ) { return a ^ Vec3ia(b); }
|
||||
__forceinline Vec3ia operator ^( const int a, const Vec3ia& b ) { return Vec3ia(a) ^ b; }
|
||||
|
||||
#if !defined(__ARM_NEON)
|
||||
__forceinline Vec3ia operator <<( const Vec3ia& a, const int n ) { return _mm_slli_epi32(a.m128, n); }
|
||||
__forceinline Vec3ia operator >>( const Vec3ia& a, const int n ) { return _mm_srai_epi32(a.m128, n); }
|
||||
|
||||
__forceinline Vec3ia sll ( const Vec3ia& a, const int b ) { return _mm_slli_epi32(a.m128, b); }
|
||||
__forceinline Vec3ia sra ( const Vec3ia& a, const int b ) { return _mm_srai_epi32(a.m128, b); }
|
||||
__forceinline Vec3ia srl ( const Vec3ia& a, const int b ) { return _mm_srli_epi32(a.m128, b); }
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Assignment Operators
|
||||
|
@ -120,7 +116,7 @@ namespace embree
|
|||
__forceinline Vec3ia& operator -=( Vec3ia& a, const Vec3ia& b ) { return a = a - b; }
|
||||
__forceinline Vec3ia& operator -=( Vec3ia& a, const int& b ) { return a = a - b; }
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec3ia& operator *=( Vec3ia& a, const Vec3ia& b ) { return a = a * b; }
|
||||
__forceinline Vec3ia& operator *=( Vec3ia& a, const int& b ) { return a = a * b; }
|
||||
#endif
|
||||
|
@ -131,38 +127,18 @@ namespace embree
|
|||
__forceinline Vec3ia& operator |=( Vec3ia& a, const Vec3ia& b ) { return a = a | b; }
|
||||
__forceinline Vec3ia& operator |=( Vec3ia& a, const int& b ) { return a = a | b; }
|
||||
|
||||
#if !defined(__ARM_NEON)
|
||||
__forceinline Vec3ia& operator <<=( Vec3ia& a, const int& b ) { return a = a << b; }
|
||||
__forceinline Vec3ia& operator >>=( Vec3ia& a, const int& b ) { return a = a >> b; }
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#if defined(__aarch64__)
|
||||
__forceinline int reduce_add(const Vec3ia& v) {
|
||||
int32x4_t t = v.m128;
|
||||
t[3] = 0;
|
||||
return vaddvq_s32(t);
|
||||
|
||||
}
|
||||
__forceinline int reduce_mul(const Vec3ia& v) { return v.x*v.y*v.z; }
|
||||
__forceinline int reduce_min(const Vec3ia& v) {
|
||||
int32x4_t t = (__m128i)blendv_ps((__m128)v0x7fffffff, (__m128)v.m128, (__m128)vFFF0);
|
||||
return vminvq_s32(t);
|
||||
|
||||
}
|
||||
__forceinline int reduce_max(const Vec3ia& v) {
|
||||
int32x4_t t = (__m128i)blendv_ps((__m128)v0x80000000, (__m128)v.m128, (__m128)vFFF0);
|
||||
return vmaxvq_s32(t);
|
||||
|
||||
}
|
||||
#else
|
||||
|
||||
__forceinline int reduce_add(const Vec3ia& v) { return v.x+v.y+v.z; }
|
||||
__forceinline int reduce_mul(const Vec3ia& v) { return v.x*v.y*v.z; }
|
||||
__forceinline int reduce_min(const Vec3ia& v) { return min(v.x,v.y,v.z); }
|
||||
__forceinline int reduce_max(const Vec3ia& v) { return max(v.x,v.y,v.z); }
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Comparison Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -185,14 +161,14 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
__forceinline Vec3ia select( const Vec3ba& m, const Vec3ia& t, const Vec3ia& f ) {
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
|
||||
#else
|
||||
return _mm_or_si128(_mm_and_si128(_mm_castps_si128(m), t), _mm_andnot_si128(_mm_castps_si128(m), f));
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline Vec3ia min( const Vec3ia& a, const Vec3ia& b ) { return _mm_min_epi32(a.m128,b.m128); }
|
||||
__forceinline Vec3ia max( const Vec3ia& a, const Vec3ia& b ) { return _mm_max_epi32(a.m128,b.m128); }
|
||||
#else
|
||||
|
|
23
thirdparty/embree/common/math/vec4.h
vendored
23
thirdparty/embree/common/math/vec4.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -192,7 +192,7 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef Vec4<bool > Vec4b;
|
||||
typedef Vec4<uint8_t > Vec4uc;
|
||||
typedef Vec4<unsigned char> Vec4uc;
|
||||
typedef Vec4<int > Vec4i;
|
||||
typedef Vec4<float > Vec4f;
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ namespace embree
|
|||
/// SSE / AVX / MIC specializations
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
#if defined __SSE__
|
||||
#include "../simd/sse.h"
|
||||
#endif
|
||||
|
||||
|
@ -225,31 +225,16 @@ namespace embree
|
|||
template<> __forceinline Vec4<vfloat4>::Vec4( const Vec3fx& a ) {
|
||||
x = a.x; y = a.y; z = a.z; w = a.w;
|
||||
}
|
||||
#elif defined(__SSE__) || defined(__ARM_NEON)
|
||||
#elif defined(__SSE__)
|
||||
template<> __forceinline Vec4<vfloat4>::Vec4( const Vec3fx& a ) {
|
||||
const vfloat4 v = vfloat4(a.m128); x = shuffle<0,0,0,0>(v); y = shuffle<1,1,1,1>(v); z = shuffle<2,2,2,2>(v); w = shuffle<3,3,3,3>(v);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
__forceinline Vec4<vfloat4> broadcast4f( const Vec4<vfloat4>& a, const size_t k ) {
|
||||
return Vec4<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]), vfloat4::broadcast(&a.w[k]));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__AVX__)
|
||||
template<> __forceinline Vec4<vfloat8>::Vec4( const Vec3fx& a ) {
|
||||
x = a.x; y = a.y; z = a.z; w = a.w;
|
||||
}
|
||||
__forceinline Vec4<vfloat4> broadcast4f( const Vec4<vfloat8>& a, const size_t k ) {
|
||||
return Vec4<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k]), vfloat4::broadcast(&a.w[k]));
|
||||
}
|
||||
__forceinline Vec4<vfloat8> broadcast8f( const Vec4<vfloat4>& a, const size_t k ) {
|
||||
return Vec4<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]), vfloat8::broadcast(&a.w[k]));
|
||||
}
|
||||
__forceinline Vec4<vfloat8> broadcast8f( const Vec4<vfloat8>& a, const size_t k ) {
|
||||
return Vec4<vfloat8>(vfloat8::broadcast(&a.x[k]), vfloat8::broadcast(&a.y[k]), vfloat8::broadcast(&a.z[k]), vfloat8::broadcast(&a.w[k]));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__AVX512F__)
|
||||
|
|
50
thirdparty/embree/common/simd/arm/emulation.h
vendored
Normal file
50
thirdparty/embree/common/simd/arm/emulation.h
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
/* Make precision match SSE, at the cost of some performance */
|
||||
#if !defined(__aarch64__)
|
||||
# define SSE2NEON_PRECISE_DIV 1
|
||||
# define SSE2NEON_PRECISE_SQRT 1
|
||||
#endif
|
||||
|
||||
#include "sse2neon.h"
|
||||
|
||||
__forceinline __m128 _mm_fmsub_ps(__m128 a, __m128 b, __m128 c) {
|
||||
__m128 neg_c = vreinterpretq_m128_f32(vnegq_f32(vreinterpretq_f32_m128(c)));
|
||||
return _mm_fmadd_ps(a, b, neg_c);
|
||||
}
|
||||
|
||||
__forceinline __m128 _mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) {
|
||||
#if defined(__aarch64__)
|
||||
return vreinterpretq_m128_f32(vfmsq_f32(vreinterpretq_f32_m128(c),
|
||||
vreinterpretq_f32_m128(b),
|
||||
vreinterpretq_f32_m128(a)));
|
||||
#else
|
||||
return _mm_sub_ps(c, _mm_mul_ps(a, b));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline __m128 _mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) {
|
||||
return vreinterpretq_m128_f32(vnegq_f32(vreinterpretq_f32_m128(_mm_fmadd_ps(a,b,c))));
|
||||
}
|
||||
|
||||
|
||||
/* Dummy defines for floating point control */
|
||||
#define _MM_MASK_MASK 0x1f80
|
||||
#define _MM_MASK_DIV_ZERO 0x200
|
||||
#define _MM_FLUSH_ZERO_ON 0x8000
|
||||
#define _MM_MASK_DENORM 0x100
|
||||
#define _MM_SET_EXCEPTION_MASK(x)
|
||||
#define _MM_SET_FLUSH_ZERO_MODE(x)
|
||||
|
||||
__forceinline int _mm_getcsr()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
__forceinline void _mm_mfence()
|
||||
{
|
||||
__sync_synchronize();
|
||||
}
|
6996
thirdparty/embree/common/simd/arm/sse2neon.h
vendored
Normal file
6996
thirdparty/embree/common/simd/arm/sse2neon.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
thirdparty/embree/common/simd/avx.h
vendored
2
thirdparty/embree/common/simd/avx.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/simd/avx512.h
vendored
2
thirdparty/embree/common/simd/avx512.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
4
thirdparty/embree/common/simd/simd.h
vendored
4
thirdparty/embree/common/simd/simd.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -6,7 +6,7 @@
|
|||
#include "../math/math.h"
|
||||
|
||||
/* include SSE wrapper classes */
|
||||
#if defined(__SSE__) || defined(__ARM_NEON)
|
||||
#if defined(__SSE__)
|
||||
# include "sse.h"
|
||||
#endif
|
||||
|
||||
|
|
2
thirdparty/embree/common/simd/sse.cpp
vendored
2
thirdparty/embree/common/simd/sse.cpp
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "sse.h"
|
||||
|
|
4
thirdparty/embree/common/simd/sse.h
vendored
4
thirdparty/embree/common/simd/sse.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -11,7 +11,7 @@
|
|||
|
||||
namespace embree
|
||||
{
|
||||
#if (defined(__aarch64__) && defined(BUILD_IOS)) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline __m128 blendv_ps(__m128 f, __m128 t, __m128 mask) {
|
||||
return _mm_blendv_ps(f,t,mask);
|
||||
}
|
||||
|
|
71
thirdparty/embree/common/simd/varying.h
vendored
71
thirdparty/embree/common/simd/varying.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -9,7 +9,7 @@ namespace embree
|
|||
{
|
||||
/* Varying numeric types */
|
||||
template<int N>
|
||||
struct vfloat
|
||||
struct vfloat_impl
|
||||
{
|
||||
union { float f[N]; int i[N]; };
|
||||
__forceinline const float& operator [](size_t index) const { assert(index < N); return f[index]; }
|
||||
|
@ -17,7 +17,7 @@ namespace embree
|
|||
};
|
||||
|
||||
template<int N>
|
||||
struct vdouble
|
||||
struct vdouble_impl
|
||||
{
|
||||
union { double f[N]; long long i[N]; };
|
||||
__forceinline const double& operator [](size_t index) const { assert(index < N); return f[index]; }
|
||||
|
@ -25,7 +25,7 @@ namespace embree
|
|||
};
|
||||
|
||||
template<int N>
|
||||
struct vint
|
||||
struct vint_impl
|
||||
{
|
||||
int i[N];
|
||||
__forceinline const int& operator [](size_t index) const { assert(index < N); return i[index]; }
|
||||
|
@ -33,7 +33,7 @@ namespace embree
|
|||
};
|
||||
|
||||
template<int N>
|
||||
struct vuint
|
||||
struct vuint_impl
|
||||
{
|
||||
unsigned int i[N];
|
||||
__forceinline const unsigned int& operator [](size_t index) const { assert(index < N); return i[index]; }
|
||||
|
@ -41,7 +41,7 @@ namespace embree
|
|||
};
|
||||
|
||||
template<int N>
|
||||
struct vllong
|
||||
struct vllong_impl
|
||||
{
|
||||
long long i[N];
|
||||
__forceinline const long long& operator [](size_t index) const { assert(index < N); return i[index]; }
|
||||
|
@ -49,20 +49,13 @@ namespace embree
|
|||
};
|
||||
|
||||
/* Varying bool types */
|
||||
template<int N> struct vboolf { int i[N]; }; // for float/int
|
||||
template<int N> struct vboold { long long i[N]; }; // for double/long long
|
||||
|
||||
/* Aliases to default types */
|
||||
template<int N> using vreal = vfloat<N>;
|
||||
template<int N> using vbool = vboolf<N>;
|
||||
|
||||
template<int N> struct vboolf_impl { int i[N]; }; // for float/int
|
||||
template<int N> struct vboold_impl { long long i[N]; }; // for double/long long
|
||||
|
||||
/* Varying size constants */
|
||||
#if defined(__AVX512VL__) // SKX
|
||||
const int VSIZEX = 8; // default size
|
||||
const int VSIZEL = 16; // large size
|
||||
#elif defined(__AVX512F__) // KNL
|
||||
const int VSIZEX = 16;
|
||||
const int VSIZEL = 16;
|
||||
#elif defined(__AVX__)
|
||||
const int VSIZEX = 8;
|
||||
const int VSIZEL = 8;
|
||||
|
@ -71,21 +64,41 @@ namespace embree
|
|||
const int VSIZEL = 4;
|
||||
#endif
|
||||
|
||||
/* Extends varying size N to optimal or up to max(N, N2) */
|
||||
template<int N, int N2 = VSIZEX>
|
||||
struct vextend
|
||||
{
|
||||
#if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
|
||||
/* use 16-wide SIMD calculations on KNL even for 4 and 8 wide SIMD */
|
||||
static const int size = (N2 == VSIZEX) ? VSIZEX : N;
|
||||
#define SIMD_MODE(N) N, 16
|
||||
#else
|
||||
/* calculate with same SIMD width otherwise */
|
||||
static const int size = N;
|
||||
#define SIMD_MODE(N) N, N
|
||||
#endif
|
||||
template<int N>
|
||||
struct vtypes {
|
||||
using vbool = vboolf_impl<N>;
|
||||
using vboolf = vboolf_impl<N>;
|
||||
using vboold = vboold_impl<N>;
|
||||
using vint = vint_impl<N>;
|
||||
using vuint = vuint_impl<N>;
|
||||
using vllong = vllong_impl<N>;
|
||||
using vfloat = vfloat_impl<N>;
|
||||
using vdouble = vdouble_impl<N>;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct vtypes<1> {
|
||||
using vbool = bool;
|
||||
using vboolf = bool;
|
||||
using vboold = bool;
|
||||
using vint = int;
|
||||
using vuint = unsigned int;
|
||||
using vllong = long long;
|
||||
using vfloat = float;
|
||||
using vdouble = double;
|
||||
};
|
||||
|
||||
/* Aliases to default types */
|
||||
template<int N> using vbool = typename vtypes<N>::vbool;
|
||||
template<int N> using vboolf = typename vtypes<N>::vboolf;
|
||||
template<int N> using vboold = typename vtypes<N>::vboold;
|
||||
template<int N> using vint = typename vtypes<N>::vint;
|
||||
template<int N> using vuint = typename vtypes<N>::vuint;
|
||||
template<int N> using vllong = typename vtypes<N>::vllong;
|
||||
template<int N> using vreal = typename vtypes<N>::vfloat;
|
||||
template<int N> using vfloat = typename vtypes<N>::vfloat;
|
||||
template<int N> using vdouble = typename vtypes<N>::vdouble;
|
||||
|
||||
/* 4-wide shortcuts */
|
||||
typedef vfloat<4> vfloat4;
|
||||
typedef vdouble<4> vdouble4;
|
||||
|
|
29
thirdparty/embree/common/simd/vboold4_avx.h
vendored
29
thirdparty/embree/common/simd/vboold4_avx.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide AVX bool type for 64bit data types*/
|
||||
|
@ -49,19 +57,13 @@ namespace embree
|
|||
#endif
|
||||
}
|
||||
|
||||
__forceinline vboold(__m128d a, __m128d b) : vl(a), vh(b) {}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Constants
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
__forceinline vboold(FalseTy) : v(_mm256_setzero_pd()) {}
|
||||
#if !defined(__aarch64__)
|
||||
__forceinline vboold(TrueTy) : v(_mm256_cmp_pd(_mm256_setzero_pd(), _mm256_setzero_pd(), _CMP_EQ_OQ)) {}
|
||||
#else
|
||||
__forceinline vboold(TrueTy) : v(_mm256_cmpeq_pd(_mm256_setzero_pd(), _mm256_setzero_pd())) {}
|
||||
#endif
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -105,10 +107,9 @@ namespace embree
|
|||
/// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
__forceinline vboold4 unpacklo(const vboold4& a, const vboold4& b) { return _mm256_unpacklo_pd(a, b); }
|
||||
__forceinline vboold4 unpackhi(const vboold4& a, const vboold4& b) { return _mm256_unpackhi_pd(a, b); }
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(__AVX2__)
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
|
@ -158,3 +159,11 @@ namespace embree
|
|||
<< a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
18
thirdparty/embree/common/simd/vboold4_avx512.h
vendored
18
thirdparty/embree/common/simd/vboold4_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide AVX-512 bool type */
|
||||
|
@ -138,3 +146,11 @@ namespace embree
|
|||
return cout << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
31
thirdparty/embree/common/simd/vboold8_avx512.h
vendored
31
thirdparty/embree/common/simd/vboold8_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX-512 bool type */
|
||||
|
@ -32,25 +40,12 @@ namespace embree
|
|||
|
||||
/* return int8 mask */
|
||||
__forceinline __m128i mask8() const {
|
||||
#if defined(__AVX512BW__)
|
||||
return _mm_movm_epi8(v);
|
||||
#else
|
||||
const __m512i f = _mm512_set1_epi64(0);
|
||||
const __m512i t = _mm512_set1_epi64(-1);
|
||||
const __m512i m = _mm512_mask_or_epi64(f,v,t,t);
|
||||
return _mm512_cvtepi64_epi8(m);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* return int64 mask */
|
||||
__forceinline __m512i mask64() const {
|
||||
#if defined(__AVX512DQ__)
|
||||
return _mm512_movm_epi64(v);
|
||||
#else
|
||||
const __m512i f = _mm512_set1_epi64(0);
|
||||
const __m512i t = _mm512_set1_epi64(-1);
|
||||
return _mm512_mask_or_epi64(f,v,t,t);
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -146,3 +141,11 @@ namespace embree
|
|||
return cout << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
31
thirdparty/embree/common/simd/vboolf16_avx512.h
vendored
31
thirdparty/embree/common/simd/vboolf16_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 16-wide AVX-512 bool type */
|
||||
|
@ -33,25 +41,12 @@ namespace embree
|
|||
|
||||
/* return int8 mask */
|
||||
__forceinline __m128i mask8() const {
|
||||
#if defined(__AVX512BW__)
|
||||
return _mm_movm_epi8(v);
|
||||
#else
|
||||
const __m512i f = _mm512_set1_epi32(0);
|
||||
const __m512i t = _mm512_set1_epi32(-1);
|
||||
const __m512i m = _mm512_mask_or_epi32(f,v,t,t);
|
||||
return _mm512_cvtepi32_epi8(m);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* return int32 mask */
|
||||
__forceinline __m512i mask32() const {
|
||||
#if defined(__AVX512DQ__)
|
||||
return _mm512_movm_epi32(v);
|
||||
#else
|
||||
const __m512i f = _mm512_set1_epi32(0);
|
||||
const __m512i t = _mm512_set1_epi32(-1);
|
||||
return _mm512_mask_or_epi32(f,v,t,t);
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -148,3 +143,11 @@ namespace embree
|
|||
return cout << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
18
thirdparty/embree/common/simd/vboolf4_avx512.h
vendored
18
thirdparty/embree/common/simd/vboolf4_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide AVX-512 bool type */
|
||||
|
@ -141,3 +149,11 @@ namespace embree
|
|||
return cout << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
51
thirdparty/embree/common/simd/vboolf4_sse2.h
vendored
51
thirdparty/embree/common/simd/vboolf4_sse2.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide SSE bool type */
|
||||
|
@ -37,13 +45,9 @@ namespace embree
|
|||
: v(mm_lookupmask_ps[(size_t(b) << 3) | (size_t(a) << 2) | (size_t(b) << 1) | size_t(a)]) {}
|
||||
__forceinline vboolf(bool a, bool b, bool c, bool d)
|
||||
: v(mm_lookupmask_ps[(size_t(d) << 3) | (size_t(c) << 2) | (size_t(b) << 1) | size_t(a)]) {}
|
||||
#if defined(__aarch64__) && defined(BUILD_IOS)
|
||||
__forceinline vboolf(int mask) { v = mm_lookupmask_ps[mask]; }
|
||||
__forceinline vboolf(unsigned int mask) { v = mm_lookupmask_ps[mask]; }
|
||||
#else
|
||||
__forceinline vboolf(int mask) { assert(mask >= 0 && mask < 16); v = mm_lookupmask_ps[mask]; }
|
||||
__forceinline vboolf(unsigned int mask) { assert(mask < 16); v = mm_lookupmask_ps[mask]; }
|
||||
#endif
|
||||
|
||||
/* return int32 mask */
|
||||
__forceinline __m128i mask32() const {
|
||||
return _mm_castps_si128(v);
|
||||
|
@ -60,13 +64,8 @@ namespace embree
|
|||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__aarch64__) && defined(BUILD_IOS)
|
||||
__forceinline bool operator [](size_t index) const { return (_mm_movemask_ps(v) >> index) & 1; }
|
||||
__forceinline int& operator [](size_t index) { return i[index]; }
|
||||
#else
|
||||
__forceinline bool operator [](size_t index) const { assert(index < 4); return (_mm_movemask_ps(v) >> index) & 1; }
|
||||
__forceinline int& operator [](size_t index) { assert(index < 4); return i[index]; }
|
||||
#endif
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -101,7 +100,7 @@ namespace embree
|
|||
__forceinline vboolf4 operator ==(const vboolf4& a, const vboolf4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
|
||||
|
||||
__forceinline vboolf4 select(const vboolf4& m, const vboolf4& t, const vboolf4& f) {
|
||||
#if (defined(__aarch64__) && defined(BUILD_IOS)) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
return _mm_blendv_ps(f, t, m);
|
||||
#else
|
||||
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
|
||||
|
@ -115,17 +114,6 @@ namespace embree
|
|||
__forceinline vboolf4 unpacklo(const vboolf4& a, const vboolf4& b) { return _mm_unpacklo_ps(a, b); }
|
||||
__forceinline vboolf4 unpackhi(const vboolf4& a, const vboolf4& b) { return _mm_unpackhi_ps(a, b); }
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vboolf4 shuffle(const vboolf4& v) {
|
||||
return vreinterpretq_f32_u8(vqtbl1q_u8( vreinterpretq_u8_s32(v), _MN_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vboolf4 shuffle(const vboolf4& a, const vboolf4& b) {
|
||||
return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
#else
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vboolf4 shuffle(const vboolf4& v) {
|
||||
return _mm_castsi128_ps(_mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0)));
|
||||
|
@ -135,8 +123,7 @@ namespace embree
|
|||
__forceinline vboolf4 shuffle(const vboolf4& a, const vboolf4& b) {
|
||||
return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
template<int i0>
|
||||
__forceinline vboolf4 shuffle(const vboolf4& v) {
|
||||
return shuffle<i0,i0,i0,i0>(v);
|
||||
|
@ -148,7 +135,7 @@ namespace embree
|
|||
template<> __forceinline vboolf4 shuffle<0, 1, 0, 1>(const vboolf4& v) { return _mm_castpd_ps(_mm_movedup_pd(v)); }
|
||||
#endif
|
||||
|
||||
#if defined(__SSE4_1__) && !defined(__aarch64__)
|
||||
#if defined(__SSE4_1__)
|
||||
template<int dst, int src, int clr> __forceinline vboolf4 insert(const vboolf4& a, const vboolf4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); }
|
||||
template<int dst, int src> __forceinline vboolf4 insert(const vboolf4& a, const vboolf4& b) { return insert<dst, src, 0>(a, b); }
|
||||
template<int dst> __forceinline vboolf4 insert(const vboolf4& a, const bool b) { return insert<dst, 0>(a, vboolf4(b)); }
|
||||
|
@ -170,14 +157,10 @@ namespace embree
|
|||
__forceinline bool none(const vboolf4& valid, const vboolf4& b) { return none(valid & b); }
|
||||
|
||||
__forceinline size_t movemask(const vboolf4& a) { return _mm_movemask_ps(a); }
|
||||
#if defined(__aarch64__) && defined(BUILD_IOS)
|
||||
__forceinline size_t popcnt(const vboolf4& a) { return _mm_movemask_popcnt_ps(a); }
|
||||
#else
|
||||
#if defined(__SSE4_2__)
|
||||
__forceinline size_t popcnt(const vboolf4& a) { return popcnt((size_t)_mm_movemask_ps(a)); }
|
||||
#else
|
||||
__forceinline size_t popcnt(const vboolf4& a) { return bool(a[0])+bool(a[1])+bool(a[2])+bool(a[3]); }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -196,3 +179,11 @@ __forceinline size_t popcnt(const vboolf4& a) { return _mm_movemask_popcnt_ps(a)
|
|||
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
23
thirdparty/embree/common/simd/vboolf8_avx.h
vendored
23
thirdparty/embree/common/simd/vboolf8_avx.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX bool type */
|
||||
|
@ -68,11 +76,8 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
__forceinline vboolf(FalseTy) : v(_mm256_setzero_ps()) {}
|
||||
#if !defined(__aarch64__)
|
||||
__forceinline vboolf(TrueTy) : v(_mm256_cmp_ps(_mm256_setzero_ps(), _mm256_setzero_ps(), _CMP_EQ_OQ)) {}
|
||||
#else
|
||||
__forceinline vboolf(TrueTy) : v(_mm256_cmpeq_ps(_mm256_setzero_ps(), _mm256_setzero_ps())) {}
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -187,3 +192,11 @@ namespace embree
|
|||
<< a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
18
thirdparty/embree/common/simd/vboolf8_avx512.h
vendored
18
thirdparty/embree/common/simd/vboolf8_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX-512 bool type */
|
||||
|
@ -141,3 +149,11 @@ namespace embree
|
|||
return cout << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
39
thirdparty/embree/common/simd/vdouble4_avx.h
vendored
39
thirdparty/embree/common/simd/vdouble4_avx.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide AVX 64-bit double type */
|
||||
|
@ -181,20 +189,13 @@ namespace embree
|
|||
__forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_GE); }
|
||||
__forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_GT); }
|
||||
__forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_LE); }
|
||||
#elif !defined(__aarch64__)
|
||||
#else
|
||||
__forceinline vboold4 operator ==(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_EQ_OQ); }
|
||||
__forceinline vboold4 operator !=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NEQ_UQ); }
|
||||
__forceinline vboold4 operator < (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_LT_OS); }
|
||||
__forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NLT_US); }
|
||||
__forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NLE_US); }
|
||||
__forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_LE_OS); }
|
||||
#else
|
||||
__forceinline vboold4 operator ==(const vdouble4& a, const vdouble4& b) { return _mm256_cmpeq_pd(a, b); }
|
||||
__forceinline vboold4 operator !=(const vdouble4& a, const vdouble4& b) { return _mm256_cmpneq_pd(a, b); }
|
||||
__forceinline vboold4 operator < (const vdouble4& a, const vdouble4& b) { return _mm256_cmplt_pd(a, b); }
|
||||
__forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmpnlt_pd(a, b); }
|
||||
__forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmpnle_pd(a, b); }
|
||||
__forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmple_pd(a, b); }
|
||||
#endif
|
||||
|
||||
__forceinline vboold4 operator ==(const vdouble4& a, double b) { return a == vdouble4(b); }
|
||||
|
@ -246,18 +247,6 @@ namespace embree
|
|||
#endif
|
||||
}
|
||||
|
||||
__forceinline void xchg(const vboold4& m, vdouble4& a, vdouble4& b) {
|
||||
const vdouble4 c = a; a = select(m,b,a); b = select(m,c,b);
|
||||
}
|
||||
|
||||
__forceinline vboold4 test(const vdouble4& a, const vdouble4& b) {
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm256_test_epi64_mask(_mm256_castpd_si256(a),_mm256_castpd_si256(b));
|
||||
#else
|
||||
return _mm256_testz_si256(_mm256_castpd_si256(a),_mm256_castpd_si256(b));
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -322,3 +311,11 @@ namespace embree
|
|||
return cout;
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
39
thirdparty/embree/common/simd/vdouble8_avx512.h
vendored
39
thirdparty/embree/common/simd/vdouble8_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX-512 64-bit double type */
|
||||
|
@ -91,15 +99,6 @@ namespace embree
|
|||
_mm512_mask_store_pd(addr, mask, v2);
|
||||
}
|
||||
|
||||
/* pass by value to avoid compiler generating inefficient code */
|
||||
static __forceinline void storeu_compact(const vboold8 mask,void * addr, const vdouble8& reg) {
|
||||
_mm512_mask_compressstoreu_pd(addr, mask, reg);
|
||||
}
|
||||
|
||||
static __forceinline vdouble8 compact64bit(const vboold8& mask, vdouble8& v) {
|
||||
return _mm512_mask_compress_pd(v, mask, v);
|
||||
}
|
||||
|
||||
static __forceinline vdouble8 compact(const vboold8& mask, vdouble8& v) {
|
||||
return _mm512_mask_compress_pd(v, mask, v);
|
||||
}
|
||||
|
@ -260,18 +259,6 @@ namespace embree
|
|||
return _mm512_mask_or_pd(f,m,t,t);
|
||||
}
|
||||
|
||||
__forceinline void xchg(const vboold8& m, vdouble8& a, vdouble8& b) {
|
||||
const vdouble8 c = a; a = select(m,b,a); b = select(m,c,b);
|
||||
}
|
||||
|
||||
__forceinline vboold8 test(const vboold8& m, const vdouble8& a, const vdouble8& b) {
|
||||
return _mm512_mask_test_epi64_mask(m,_mm512_castpd_si512(a),_mm512_castpd_si512(b));
|
||||
}
|
||||
|
||||
__forceinline vboold8 test(const vdouble8& a, const vdouble8& b) {
|
||||
return _mm512_test_epi64_mask(_mm512_castpd_si512(a),_mm512_castpd_si512(b));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -354,3 +341,11 @@ namespace embree
|
|||
return cout;
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
206
thirdparty/embree/common/simd/vfloat16_avx512.h
vendored
206
thirdparty/embree/common/simd/vfloat16_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 16-wide AVX-512 float type */
|
||||
|
@ -73,11 +81,11 @@ namespace embree
|
|||
}
|
||||
|
||||
/* WARNING: due to f64x4 the mask is considered as an 8bit mask */
|
||||
__forceinline vfloat(const vboolf16& mask, const vfloat8& a, const vfloat8& b) {
|
||||
/*__forceinline vfloat(const vboolf16& mask, const vfloat8& a, const vfloat8& b) {
|
||||
__m512d aa = _mm512_broadcast_f64x4(_mm256_castps_pd(a));
|
||||
aa = _mm512_mask_broadcast_f64x4(aa,mask,_mm256_castps_pd(b));
|
||||
v = _mm512_castpd_ps(aa);
|
||||
}
|
||||
}*/
|
||||
|
||||
__forceinline explicit vfloat(const vint16& a) {
|
||||
v = _mm512_cvtepi32_ps(a);
|
||||
|
@ -123,30 +131,6 @@ namespace embree
|
|||
return _mm512_set1_ps(*f);
|
||||
}
|
||||
|
||||
static __forceinline vfloat16 compact(const vboolf16& mask, vfloat16 &v) {
|
||||
return _mm512_mask_compress_ps(v, mask, v);
|
||||
}
|
||||
static __forceinline vfloat16 compact(const vboolf16& mask, vfloat16 &a, const vfloat16& b) {
|
||||
return _mm512_mask_compress_ps(a, mask, b);
|
||||
}
|
||||
|
||||
static __forceinline vfloat16 expand(const vboolf16& mask, const vfloat16& a, vfloat16& b) {
|
||||
return _mm512_mask_expand_ps(b, mask, a);
|
||||
}
|
||||
|
||||
static __forceinline vfloat16 loadu_compact(const vboolf16& mask, const void* ptr) {
|
||||
return _mm512_mask_expandloadu_ps(_mm512_setzero_ps(), mask, (float*)ptr);
|
||||
}
|
||||
|
||||
static __forceinline void storeu_compact(const vboolf16& mask, float *addr, const vfloat16 reg) {
|
||||
_mm512_mask_compressstoreu_ps(addr, mask, reg);
|
||||
}
|
||||
|
||||
static __forceinline void storeu_compact_single(const vboolf16& mask, float * addr, const vfloat16& reg) {
|
||||
//_mm512_mask_compressstoreu_ps(addr,mask,reg);
|
||||
*addr = mm512_cvtss_f32(_mm512_mask_compress_ps(reg, mask, reg));
|
||||
}
|
||||
|
||||
template<int scale = 4>
|
||||
static __forceinline vfloat16 gather(const float* ptr, const vint16& index) {
|
||||
return _mm512_i32gather_ps(index, ptr, scale);
|
||||
|
@ -194,12 +178,8 @@ namespace embree
|
|||
__forceinline vfloat16 signmsk(const vfloat16& a) { return _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(a),_mm512_set1_epi32(0x80000000))); }
|
||||
|
||||
__forceinline vfloat16 rcp(const vfloat16& a) {
|
||||
#if defined(__AVX512ER__)
|
||||
return _mm512_rcp28_ps(a);
|
||||
#else
|
||||
const vfloat16 r = _mm512_rcp14_ps(a);
|
||||
return _mm512_mul_ps(r, _mm512_fnmadd_ps(r, a, vfloat16(2.0f)));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline vfloat16 sqr (const vfloat16& a) { return _mm512_mul_ps(a,a); }
|
||||
|
@ -207,13 +187,9 @@ namespace embree
|
|||
|
||||
__forceinline vfloat16 rsqrt(const vfloat16& a)
|
||||
{
|
||||
#if defined(__AVX512VL__)
|
||||
const vfloat16 r = _mm512_rsqrt14_ps(a);
|
||||
return _mm512_fmadd_ps(_mm512_set1_ps(1.5f), r,
|
||||
_mm512_mul_ps(_mm512_mul_ps(_mm512_mul_ps(a, _mm512_set1_ps(-0.5f)), r), _mm512_mul_ps(r, r)));
|
||||
#else
|
||||
return _mm512_rsqrt28_ps(a);
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -242,54 +218,26 @@ namespace embree
|
|||
return _mm512_castsi512_ps(_mm512_xor_epi32(_mm512_castps_si512(a),_mm512_castps_si512(b)));
|
||||
}
|
||||
|
||||
__forceinline vfloat16 min(const vfloat16& a, const vfloat16& b) {
|
||||
return _mm512_min_ps(a,b);
|
||||
}
|
||||
__forceinline vfloat16 min(const vfloat16& a, float b) {
|
||||
return _mm512_min_ps(a,vfloat16(b));
|
||||
}
|
||||
__forceinline vfloat16 min(const float& a, const vfloat16& b) {
|
||||
return _mm512_min_ps(vfloat16(a),b);
|
||||
}
|
||||
__forceinline vfloat16 min(const vfloat16& a, const vfloat16& b) { return _mm512_min_ps(a,b); }
|
||||
__forceinline vfloat16 min(const vfloat16& a, float b) { return _mm512_min_ps(a,vfloat16(b)); }
|
||||
__forceinline vfloat16 min(const float& a, const vfloat16& b) { return _mm512_min_ps(vfloat16(a),b); }
|
||||
|
||||
__forceinline vfloat16 max(const vfloat16& a, const vfloat16& b) {
|
||||
return _mm512_max_ps(a,b);
|
||||
}
|
||||
__forceinline vfloat16 max(const vfloat16& a, float b) {
|
||||
return _mm512_max_ps(a,vfloat16(b));
|
||||
}
|
||||
__forceinline vfloat16 max(const float& a, const vfloat16& b) {
|
||||
return _mm512_max_ps(vfloat16(a),b);
|
||||
}
|
||||
|
||||
__forceinline vfloat16 mask_add(const vboolf16& mask, const vfloat16& c, const vfloat16& a, const vfloat16& b) { return _mm512_mask_add_ps (c,mask,a,b); }
|
||||
__forceinline vfloat16 mask_min(const vboolf16& mask, const vfloat16& c, const vfloat16& a, const vfloat16& b) {
|
||||
return _mm512_mask_min_ps(c,mask,a,b);
|
||||
};
|
||||
__forceinline vfloat16 mask_max(const vboolf16& mask, const vfloat16& c, const vfloat16& a, const vfloat16& b) {
|
||||
return _mm512_mask_max_ps(c,mask,a,b);
|
||||
};
|
||||
__forceinline vfloat16 max(const vfloat16& a, const vfloat16& b) { return _mm512_max_ps(a,b); }
|
||||
__forceinline vfloat16 max(const vfloat16& a, float b) { return _mm512_max_ps(a,vfloat16(b)); }
|
||||
__forceinline vfloat16 max(const float& a, const vfloat16& b) { return _mm512_max_ps(vfloat16(a),b); }
|
||||
|
||||
__forceinline vfloat16 mini(const vfloat16& a, const vfloat16& b) {
|
||||
#if !defined(__AVX512ER__) // SKX
|
||||
const vint16 ai = _mm512_castps_si512(a);
|
||||
const vint16 bi = _mm512_castps_si512(b);
|
||||
const vint16 ci = _mm512_min_epi32(ai,bi);
|
||||
return _mm512_castsi512_ps(ci);
|
||||
#else // KNL
|
||||
return min(a,b);
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline vfloat16 maxi(const vfloat16& a, const vfloat16& b) {
|
||||
#if !defined(__AVX512ER__) // SKX
|
||||
const vint16 ai = _mm512_castps_si512(a);
|
||||
const vint16 bi = _mm512_castps_si512(b);
|
||||
const vint16 ci = _mm512_max_epi32(ai,bi);
|
||||
return _mm512_castsi512_ps(ci);
|
||||
#else // KNL
|
||||
return max(a,b);
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -300,43 +248,6 @@ namespace embree
|
|||
__forceinline vfloat16 msub (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmsub_ps(a,b,c); }
|
||||
__forceinline vfloat16 nmadd(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmadd_ps(a,b,c); }
|
||||
__forceinline vfloat16 nmsub(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmsub_ps(a,b,c); }
|
||||
|
||||
__forceinline vfloat16 mask_msub(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_fmsub_ps(a,mask,b,c); }
|
||||
|
||||
__forceinline vfloat16 madd231 (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmadd_ps(c,b,a); }
|
||||
__forceinline vfloat16 msub213 (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmsub_ps(a,b,c); }
|
||||
__forceinline vfloat16 msub231 (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmsub_ps(c,b,a); }
|
||||
__forceinline vfloat16 msubr231(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmadd_ps(c,b,a); }
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Operators with rounding
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
__forceinline vfloat16 madd_round_down(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmadd_round_ps(a,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 madd_round_up (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmadd_round_ps(a,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
__forceinline vfloat16 mul_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_mul_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 mul_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_mul_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
__forceinline vfloat16 add_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_add_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 add_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_add_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
__forceinline vfloat16 sub_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_sub_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 sub_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_sub_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
__forceinline vfloat16 div_round_down(const vfloat16& a, const vfloat16& b) { return _mm512_div_round_ps(a,b,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 div_round_up (const vfloat16& a, const vfloat16& b) { return _mm512_div_round_ps(a,b,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
__forceinline vfloat16 mask_msub_round_down(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_fmsub_round_ps(a,mask,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 mask_msub_round_up (const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_fmsub_round_ps(a,mask,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
__forceinline vfloat16 mask_mul_round_down(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_mul_round_ps(a,mask,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 mask_mul_round_up (const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_mul_round_ps(a,mask,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
__forceinline vfloat16 mask_sub_round_down(const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_sub_round_ps(a,mask,b,c,_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); }
|
||||
__forceinline vfloat16 mask_sub_round_up (const vboolf16& mask,const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_mask_sub_round_ps(a,mask,b,c,_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); }
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Assignment Operators
|
||||
|
@ -404,13 +315,6 @@ namespace embree
|
|||
return madd(t,b-a,a);
|
||||
}
|
||||
|
||||
__forceinline void xchg(vboolf16 m, vfloat16& a, vfloat16& b)
|
||||
{
|
||||
vfloat16 c = a;
|
||||
a = select(m,b,a);
|
||||
b = select(m,c,b);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Rounding Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -455,24 +359,6 @@ namespace embree
|
|||
return _mm512_shuffle_f32x4(v, v, _MM_SHUFFLE(i3, i2, i1, i0));
|
||||
}
|
||||
|
||||
__forceinline vfloat16 interleave_even(const vfloat16& a, const vfloat16& b) {
|
||||
return _mm512_castsi512_ps(_mm512_mask_shuffle_epi32(_mm512_castps_si512(a), mm512_int2mask(0xaaaa), _mm512_castps_si512(b), (_MM_PERM_ENUM)0xb1));
|
||||
}
|
||||
|
||||
__forceinline vfloat16 interleave_odd(const vfloat16& a, const vfloat16& b) {
|
||||
return _mm512_castsi512_ps(_mm512_mask_shuffle_epi32(_mm512_castps_si512(b), mm512_int2mask(0x5555), _mm512_castps_si512(a), (_MM_PERM_ENUM)0xb1));
|
||||
}
|
||||
|
||||
__forceinline vfloat16 interleave2_even(const vfloat16& a, const vfloat16& b) {
|
||||
/* mask should be 8-bit but is 16-bit to reuse for interleave_even */
|
||||
return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(a), mm512_int2mask(0xaaaa), _mm512_castps_si512(b), (_MM_PERM_ENUM)0xb1));
|
||||
}
|
||||
|
||||
__forceinline vfloat16 interleave2_odd(const vfloat16& a, const vfloat16& b) {
|
||||
/* mask should be 8-bit but is 16-bit to reuse for interleave_odd */
|
||||
return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(b), mm512_int2mask(0x5555), _mm512_castps_si512(a), (_MM_PERM_ENUM)0xb1));
|
||||
}
|
||||
|
||||
__forceinline vfloat16 interleave4_even(const vfloat16& a, const vfloat16& b) {
|
||||
return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(a), mm512_int2mask(0xcc), _mm512_castps_si512(b), (_MM_PERM_ENUM)0x4e));
|
||||
}
|
||||
|
@ -537,17 +423,6 @@ namespace embree
|
|||
__forceinline void transpose(const vfloat16& r0, const vfloat16& r1, const vfloat16& r2, const vfloat16& r3,
|
||||
vfloat16& c0, vfloat16& c1, vfloat16& c2, vfloat16& c3)
|
||||
{
|
||||
#if defined(__AVX512F__) && !defined(__AVX512VL__) // KNL
|
||||
vfloat16 a0a1_c0c1 = interleave_even(r0, r1);
|
||||
vfloat16 a2a3_c2c3 = interleave_even(r2, r3);
|
||||
vfloat16 b0b1_d0d1 = interleave_odd (r0, r1);
|
||||
vfloat16 b2b3_d2d3 = interleave_odd (r2, r3);
|
||||
|
||||
c0 = interleave2_even(a0a1_c0c1, a2a3_c2c3);
|
||||
c1 = interleave2_even(b0b1_d0d1, b2b3_d2d3);
|
||||
c2 = interleave2_odd (a0a1_c0c1, a2a3_c2c3);
|
||||
c3 = interleave2_odd (b0b1_d0d1, b2b3_d2d3);
|
||||
#else
|
||||
vfloat16 a0a2_b0b2 = unpacklo(r0, r2);
|
||||
vfloat16 c0c2_d0d2 = unpackhi(r0, r2);
|
||||
vfloat16 a1a3_b1b3 = unpacklo(r1, r3);
|
||||
|
@ -557,7 +432,6 @@ namespace embree
|
|||
c1 = unpackhi(a0a2_b0b2, a1a3_b1b3);
|
||||
c2 = unpacklo(c0c2_d0d2, c1c3_d1d3);
|
||||
c3 = unpackhi(c0c2_d0d2, c1c3_d1d3);
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3,
|
||||
|
@ -715,44 +589,6 @@ namespace embree
|
|||
return v;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Memory load and store operations
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
__forceinline vfloat16 loadAOS4to16f(const float& x, const float& y, const float& z)
|
||||
{
|
||||
vfloat16 f = zero;
|
||||
f = select(0x1111,vfloat16::broadcast(&x),f);
|
||||
f = select(0x2222,vfloat16::broadcast(&y),f);
|
||||
f = select(0x4444,vfloat16::broadcast(&z),f);
|
||||
return f;
|
||||
}
|
||||
|
||||
__forceinline vfloat16 loadAOS4to16f(unsigned int index,
|
||||
const vfloat16& x,
|
||||
const vfloat16& y,
|
||||
const vfloat16& z)
|
||||
{
|
||||
vfloat16 f = zero;
|
||||
f = select(0x1111,vfloat16::broadcast((float*)&x + index),f);
|
||||
f = select(0x2222,vfloat16::broadcast((float*)&y + index),f);
|
||||
f = select(0x4444,vfloat16::broadcast((float*)&z + index),f);
|
||||
return f;
|
||||
}
|
||||
|
||||
__forceinline vfloat16 loadAOS4to16f(unsigned int index,
|
||||
const vfloat16& x,
|
||||
const vfloat16& y,
|
||||
const vfloat16& z,
|
||||
const vfloat16& fill)
|
||||
{
|
||||
vfloat16 f = fill;
|
||||
f = select(0x1111,vfloat16::broadcast((float*)&x + index),f);
|
||||
f = select(0x2222,vfloat16::broadcast((float*)&y + index),f);
|
||||
f = select(0x4444,vfloat16::broadcast((float*)&z + index),f);
|
||||
return f;
|
||||
}
|
||||
|
||||
__forceinline vfloat16 rcp_safe(const vfloat16& a) {
|
||||
return rcp(select(a != vfloat16(zero), a, vfloat16(min_rcp_input)));
|
||||
}
|
||||
|
@ -769,3 +605,11 @@ namespace embree
|
|||
return cout;
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
425
thirdparty/embree/common/simd/vfloat4_sse2.h
vendored
425
thirdparty/embree/common/simd/vfloat4_sse2.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide SSE float type */
|
||||
|
@ -10,18 +18,18 @@ namespace embree
|
|||
struct vfloat<4>
|
||||
{
|
||||
ALIGNED_STRUCT_(16);
|
||||
|
||||
|
||||
typedef vboolf4 Bool;
|
||||
typedef vint4 Int;
|
||||
typedef vfloat4 Float;
|
||||
|
||||
|
||||
enum { size = 4 }; // number of SIMD elements
|
||||
union { __m128 v; float f[4]; int i[4]; }; // data
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Constructors, Assignment & Cast Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
__forceinline vfloat() {}
|
||||
__forceinline vfloat(const vfloat4& other) { v = other.v; }
|
||||
__forceinline vfloat4& operator =(const vfloat4& other) { v = other.v; return *this; }
|
||||
|
@ -34,19 +42,14 @@ namespace embree
|
|||
__forceinline vfloat(float a, float b, float c, float d) : v(_mm_set_ps(d, c, b, a)) {}
|
||||
|
||||
__forceinline explicit vfloat(const vint4& a) : v(_mm_cvtepi32_ps(a)) {}
|
||||
#if defined(__aarch64__)
|
||||
__forceinline explicit vfloat(const vuint4& x) {
|
||||
v = vcvtq_f32_u32(vreinterpretq_u32_s32(x.v));
|
||||
}
|
||||
#else
|
||||
__forceinline explicit vfloat(const vuint4& x) {
|
||||
const __m128i a = _mm_and_si128(x,_mm_set1_epi32(0x7FFFFFFF));
|
||||
const __m128i b = _mm_and_si128(_mm_srai_epi32(x,31),_mm_set1_epi32(0x4F000000)); //0x4F000000 = 2^31
|
||||
const __m128i b = _mm_and_si128(_mm_srai_epi32(x,31),_mm_set1_epi32(0x4F000000)); //0x4F000000 = 2^31
|
||||
const __m128 af = _mm_cvtepi32_ps(a);
|
||||
const __m128 bf = _mm_castsi128_ps(b);
|
||||
const __m128 bf = _mm_castsi128_ps(b);
|
||||
v = _mm_add_ps(af,bf);
|
||||
}
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Constants
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -71,13 +74,6 @@ namespace embree
|
|||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
static __forceinline vfloat4 compact(const vboolf4& mask, vfloat4 &v) {
|
||||
return _mm_mask_compress_ps(v, mask, v);
|
||||
}
|
||||
static __forceinline vfloat4 compact(const vboolf4& mask, vfloat4 &a, const vfloat4& b) {
|
||||
return _mm_mask_compress_ps(a, mask, b);
|
||||
}
|
||||
|
||||
static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_ps (_mm_setzero_ps(),mask,(float*)ptr); }
|
||||
static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_ps(_mm_setzero_ps(),mask,(float*)ptr); }
|
||||
|
||||
|
@ -107,44 +103,32 @@ namespace embree
|
|||
#if defined (__SSE4_1__)
|
||||
return _mm_castsi128_ps(_mm_stream_load_si128((__m128i*)ptr));
|
||||
#else
|
||||
return _mm_load_ps(ptr);
|
||||
return _mm_load_ps(ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__aarch64__)
|
||||
static __forceinline vfloat4 load(const int8_t* ptr) {
|
||||
return __m128(_mm_load4epi8_f32(((__m128i*)ptr)));
|
||||
}
|
||||
#elif defined(__SSE4_1__)
|
||||
static __forceinline vfloat4 load(const int8_t* ptr) {
|
||||
#if defined(__SSE4_1__)
|
||||
static __forceinline vfloat4 load(const char* ptr) {
|
||||
return _mm_cvtepi32_ps(_mm_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr)));
|
||||
}
|
||||
#else
|
||||
static __forceinline vfloat4 load(const int8_t* ptr) {
|
||||
static __forceinline vfloat4 load(const char* ptr) {
|
||||
return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
static __forceinline vfloat4 load(const uint8_t* ptr) {
|
||||
return __m128(_mm_load4epu8_f32(((__m128i*)ptr)));
|
||||
}
|
||||
#elif defined(__SSE4_1__)
|
||||
static __forceinline vfloat4 load(const uint8_t* ptr) {
|
||||
#if defined(__SSE4_1__)
|
||||
static __forceinline vfloat4 load(const unsigned char* ptr) {
|
||||
return _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)));
|
||||
}
|
||||
#else
|
||||
static __forceinline vfloat4 load(const uint8_t* ptr) {
|
||||
static __forceinline vfloat4 load(const unsigned char* ptr) {
|
||||
//return _mm_cvtpu8_ps(*(__m64*)ptr); // don't enable, will use MMX instructions
|
||||
return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
static __forceinline vfloat4 load(const short* ptr) {
|
||||
return __m128(_mm_load4epi16_f32(((__m128i*)ptr)));
|
||||
}
|
||||
#elif defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
static __forceinline vfloat4 load(const short* ptr) {
|
||||
return _mm_cvtepi32_ps(_mm_cvtepi16_epi32(_mm_loadu_si128((__m128i*)ptr)));
|
||||
}
|
||||
|
@ -157,15 +141,11 @@ namespace embree
|
|||
static __forceinline vfloat4 load(const unsigned short* ptr) {
|
||||
return _mm_mul_ps(vfloat4(vint4::load(ptr)),vfloat4(1.0f/65535.0f));
|
||||
}
|
||||
|
||||
|
||||
static __forceinline void store_nt(void* ptr, const vfloat4& v)
|
||||
{
|
||||
#if defined (__SSE4_1__)
|
||||
#if defined(__aarch64__)
|
||||
_mm_stream_ps((float*)ptr,vreinterpretq_s32_f32(v.v));
|
||||
#else
|
||||
_mm_stream_ps((float*)ptr,v);
|
||||
#endif
|
||||
#else
|
||||
_mm_store_ps((float*)ptr,v);
|
||||
#endif
|
||||
|
@ -173,14 +153,14 @@ namespace embree
|
|||
|
||||
template<int scale = 4>
|
||||
static __forceinline vfloat4 gather(const float* ptr, const vint4& index) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _mm_i32gather_ps(ptr, index, scale);
|
||||
#else
|
||||
return vfloat4(
|
||||
*(float*)(((int8_t*)ptr)+scale*index[0]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[1]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[2]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[3]));
|
||||
*(float*)(((char*)ptr)+scale*index[0]),
|
||||
*(float*)(((char*)ptr)+scale*index[1]),
|
||||
*(float*)(((char*)ptr)+scale*index[2]),
|
||||
*(float*)(((char*)ptr)+scale*index[3]));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -189,13 +169,13 @@ namespace embree
|
|||
vfloat4 r = zero;
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm_mmask_i32gather_ps(r, mask, index, ptr, scale);
|
||||
#elif defined(__AVX2__) && !defined(__aarch64__)
|
||||
#elif defined(__AVX2__)
|
||||
return _mm_mask_i32gather_ps(r, ptr, index, mask, scale);
|
||||
#else
|
||||
if (likely(mask[0])) r[0] = *(float*)(((int8_t*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(float*)(((int8_t*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(float*)(((int8_t*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(float*)(((int8_t*)ptr)+scale*index[3]);
|
||||
if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(float*)(((char*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(float*)(((char*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(float*)(((char*)ptr)+scale*index[3]);
|
||||
return r;
|
||||
#endif
|
||||
}
|
||||
|
@ -206,10 +186,10 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm_i32scatter_ps((float*)ptr, index, v, scale);
|
||||
#else
|
||||
*(float*)(((int8_t*)ptr)+scale*index[0]) = v[0];
|
||||
*(float*)(((int8_t*)ptr)+scale*index[1]) = v[1];
|
||||
*(float*)(((int8_t*)ptr)+scale*index[2]) = v[2];
|
||||
*(float*)(((int8_t*)ptr)+scale*index[3]) = v[3];
|
||||
*(float*)(((char*)ptr)+scale*index[0]) = v[0];
|
||||
*(float*)(((char*)ptr)+scale*index[1]) = v[1];
|
||||
*(float*)(((char*)ptr)+scale*index[2]) = v[2];
|
||||
*(float*)(((char*)ptr)+scale*index[3]) = v[3];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -219,20 +199,20 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm_mask_i32scatter_ps((float*)ptr ,mask, index, v, scale);
|
||||
#else
|
||||
if (likely(mask[0])) *(float*)(((int8_t*)ptr)+scale*index[0]) = v[0];
|
||||
if (likely(mask[1])) *(float*)(((int8_t*)ptr)+scale*index[1]) = v[1];
|
||||
if (likely(mask[2])) *(float*)(((int8_t*)ptr)+scale*index[2]) = v[2];
|
||||
if (likely(mask[3])) *(float*)(((int8_t*)ptr)+scale*index[3]) = v[3];
|
||||
if (likely(mask[0])) *(float*)(((char*)ptr)+scale*index[0]) = v[0];
|
||||
if (likely(mask[1])) *(float*)(((char*)ptr)+scale*index[1]) = v[1];
|
||||
if (likely(mask[2])) *(float*)(((char*)ptr)+scale*index[2]) = v[2];
|
||||
if (likely(mask[3])) *(float*)(((char*)ptr)+scale*index[3]) = v[3];
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline void store(const vboolf4& mask, int8_t* ptr, const vint4& ofs, const vfloat4& v) {
|
||||
static __forceinline void store(const vboolf4& mask, char* ptr, const vint4& ofs, const vfloat4& v) {
|
||||
scatter<1>(mask,ptr,ofs,v);
|
||||
}
|
||||
static __forceinline void store(const vboolf4& mask, float* ptr, const vint4& ofs, const vfloat4& v) {
|
||||
scatter<4>(mask,ptr,ofs,v);
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -243,15 +223,27 @@ namespace embree
|
|||
friend __forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm_mask_blend_ps(m, f, t);
|
||||
#elif defined(__SSE4_1__) || (defined(__aarch64__))
|
||||
return _mm_blendv_ps(f, t, m);
|
||||
#elif defined(__SSE4_1__)
|
||||
return _mm_blendv_ps(f, t, m);
|
||||
#else
|
||||
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
|
||||
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Load/Store
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template<> struct mem<vfloat4>
|
||||
{
|
||||
static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return vfloat4::load (mask,ptr); }
|
||||
static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return vfloat4::loadu(mask,ptr); }
|
||||
|
||||
static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::store (mask,ptr,v); }
|
||||
static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::storeu(mask,ptr,v); }
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Unary Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -264,47 +256,18 @@ namespace embree
|
|||
__forceinline vfloat4 toFloat(const vint4& a) { return vfloat4(a); }
|
||||
|
||||
__forceinline vfloat4 operator +(const vfloat4& a) { return a; }
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vfloat4 operator -(const vfloat4& a) {
|
||||
return vnegq_f32(a);
|
||||
}
|
||||
#else
|
||||
__forceinline vfloat4 operator -(const vfloat4& a) { return _mm_xor_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vfloat4 abs(const vfloat4& a) { return _mm_abs_ps(a); }
|
||||
#else
|
||||
__forceinline vfloat4 abs(const vfloat4& a) { return _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); }
|
||||
#endif
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
__forceinline vfloat4 sign(const vfloat4& a) { return _mm_mask_blend_ps(_mm_cmp_ps_mask(a, vfloat4(zero), _CMP_LT_OQ), vfloat4(one), -vfloat4(one)); }
|
||||
#else
|
||||
__forceinline vfloat4 sign(const vfloat4& a) { return blendv_ps(vfloat4(one), -vfloat4(one), _mm_cmplt_ps(a, vfloat4(zero))); }
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a, vreinterpretq_f32_u32(v0x80000000)); }
|
||||
#else
|
||||
__forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a,_mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline vfloat4 rcp(const vfloat4& a)
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
#if defined(BUILD_IOS)
|
||||
return vfloat4(vdivq_f32(vdupq_n_f32(1.0f),a.v));
|
||||
#else //BUILD_IOS
|
||||
__m128 reciprocal = _mm_rcp_ps(a);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
|
||||
// +1 round since NEON's reciprocal estimate instruction has less accuracy than SSE2's rcp.
|
||||
reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal);
|
||||
return (const vfloat4)reciprocal;
|
||||
#endif // BUILD_IOS
|
||||
#else
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
const vfloat4 r = _mm_rcp14_ps(a);
|
||||
#else
|
||||
|
@ -316,45 +279,31 @@ namespace embree
|
|||
#else
|
||||
return _mm_mul_ps(r,_mm_sub_ps(vfloat4(2.0f), _mm_mul_ps(r, a)));
|
||||
#endif
|
||||
|
||||
#endif //defined(__aarch64__)
|
||||
}
|
||||
__forceinline vfloat4 sqr (const vfloat4& a) { return _mm_mul_ps(a,a); }
|
||||
__forceinline vfloat4 sqrt(const vfloat4& a) { return _mm_sqrt_ps(a); }
|
||||
|
||||
__forceinline vfloat4 rsqrt(const vfloat4& a)
|
||||
{
|
||||
#if defined(__aarch64__)
|
||||
vfloat4 r = _mm_rsqrt_ps(a);
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));
|
||||
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));
|
||||
return r;
|
||||
#else
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
const vfloat4 r = _mm_rsqrt14_ps(a);
|
||||
vfloat4 r = _mm_rsqrt14_ps(a);
|
||||
#else
|
||||
const vfloat4 r = _mm_rsqrt_ps(a);
|
||||
vfloat4 r = _mm_rsqrt_ps(a);
|
||||
#endif
|
||||
|
||||
#if defined(__AVX2__)
|
||||
return _mm_fmadd_ps(_mm_set1_ps(1.5f), r,
|
||||
_mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
#if defined(__ARM_NEON)
|
||||
r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
#elif defined(__AVX2__)
|
||||
r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
#else
|
||||
return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r),
|
||||
_mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
#endif
|
||||
|
||||
r = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
|
||||
__forceinline vboolf4 isnan(const vfloat4& a) {
|
||||
#if defined(__aarch64__)
|
||||
const vfloat4 b = _mm_and_ps(a, vreinterpretq_f32_u32(v0x7fffffff));
|
||||
#else
|
||||
const vfloat4 b = _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)));
|
||||
#endif
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm_cmp_epi32_mask(_mm_castps_si128(b), _mm_set1_epi32(0x7f800000), _MM_CMPINT_GT);
|
||||
#else
|
||||
|
@ -395,8 +344,7 @@ namespace embree
|
|||
__forceinline vfloat4 max(const vfloat4& a, float b) { return _mm_max_ps(a,vfloat4(b)); }
|
||||
__forceinline vfloat4 max(float a, const vfloat4& b) { return _mm_max_ps(vfloat4(a),b); }
|
||||
|
||||
#if defined(__SSE4_1__) || defined(__aarch64__)
|
||||
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) {
|
||||
const vint4 ai = _mm_castps_si128(a);
|
||||
const vint4 bi = _mm_castps_si128(b);
|
||||
|
@ -438,30 +386,16 @@ namespace embree
|
|||
/// Ternary Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__AVX2__)
|
||||
#if defined(__AVX2__) || defined(__ARM_NEON)
|
||||
__forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmadd_ps(a,b,c); }
|
||||
__forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmsub_ps(a,b,c); }
|
||||
__forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmadd_ps(a,b,c); }
|
||||
__forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmsub_ps(a,b,c); }
|
||||
#else
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) {
|
||||
return _mm_madd_ps(a, b, c); //a*b+c;
|
||||
}
|
||||
__forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) {
|
||||
return _mm_msub_ps(a, b, c); //-a*b+c;
|
||||
}
|
||||
__forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) {
|
||||
return vnegq_f32(vfmaq_f32(c,a, b));
|
||||
}
|
||||
#else
|
||||
__forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b+c; }
|
||||
__forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; }
|
||||
__forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b+c;}
|
||||
__forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b-c; }
|
||||
#endif
|
||||
__forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; }
|
||||
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -495,13 +429,8 @@ namespace embree
|
|||
__forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmpeq_ps (a, b); }
|
||||
__forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmpneq_ps(a, b); }
|
||||
__forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmplt_ps (a, b); }
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpge_ps (a, b); }
|
||||
__forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpgt_ps (a, b); }
|
||||
#else
|
||||
__forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpnlt_ps(a, b); }
|
||||
__forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpnle_ps(a, b); }
|
||||
#endif
|
||||
__forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmple_ps (a, b); }
|
||||
#endif
|
||||
|
||||
|
@ -513,7 +442,7 @@ namespace embree
|
|||
|
||||
__forceinline vboolf4 operator < (const vfloat4& a, float b) { return a < vfloat4(b); }
|
||||
__forceinline vboolf4 operator < (float a, const vfloat4& b) { return vfloat4(a) < b; }
|
||||
|
||||
|
||||
__forceinline vboolf4 operator >=(const vfloat4& a, float b) { return a >= vfloat4(b); }
|
||||
__forceinline vboolf4 operator >=(float a, const vfloat4& b) { return vfloat4(a) >= b; }
|
||||
|
||||
|
@ -549,68 +478,17 @@ namespace embree
|
|||
template<int mask>
|
||||
__forceinline vfloat4 select(const vfloat4& t, const vfloat4& f)
|
||||
{
|
||||
#if defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
return _mm_blend_ps(f, t, mask);
|
||||
#else
|
||||
return select(vboolf4(mask), t, f);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<> __forceinline vfloat4 select<0>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vzero));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<1>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v000F));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<2>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v00F0));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<3>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v00FF));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<4>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0F00));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<5>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0F0F));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<6>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0FF0));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<7>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(v0FFF));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<8>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF000));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<9>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF00F));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<10>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF0F0));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<11>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vF0FF));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<12>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFF00));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<13>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFF0F));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<14>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFFF0));
|
||||
}
|
||||
template<> __forceinline vfloat4 select<15>(const vfloat4& t, const vfloat4& f) {
|
||||
return _mm_blendv_ps(f, t, vreinterpretq_f32_u32(vFFFF));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline vfloat4 lerp(const vfloat4& a, const vfloat4& b, const vfloat4& t) {
|
||||
return madd(t,b-a,a);
|
||||
}
|
||||
|
||||
|
||||
__forceinline bool isvalid(const vfloat4& v) {
|
||||
return all((v > vfloat4(-FLT_LARGE)) & (v < vfloat4(+FLT_LARGE)));
|
||||
}
|
||||
|
@ -622,21 +500,21 @@ namespace embree
|
|||
__forceinline bool is_finite(const vboolf4& valid, const vfloat4& a) {
|
||||
return all(valid, (a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX)));
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Rounding Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); } // towards -inf
|
||||
__forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); } // toward +inf
|
||||
__forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); } // towards 0
|
||||
__forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); } // to nearest, ties to even. NOTE(LTE): arm clang uses vrndnq, old gcc uses vrndqn?
|
||||
__forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); }
|
||||
__forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); }
|
||||
__forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); }
|
||||
__forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); }
|
||||
#elif defined (__SSE4_1__)
|
||||
__forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
|
||||
__forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }
|
||||
__forceinline vfloat4 trunc(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); }
|
||||
__forceinline vfloat4 round(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); } // (even) https://www.felixcloutier.com/x86/roundpd
|
||||
__forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
|
||||
__forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }
|
||||
__forceinline vfloat4 trunc(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); }
|
||||
__forceinline vfloat4 round(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
|
||||
#else
|
||||
__forceinline vfloat4 floor(const vfloat4& a) { return vfloat4(floorf(a[0]),floorf(a[1]),floorf(a[2]),floorf(a[3])); }
|
||||
__forceinline vfloat4 ceil (const vfloat4& a) { return vfloat4(ceilf (a[0]),ceilf (a[1]),ceilf (a[2]),ceilf (a[3])); }
|
||||
|
@ -646,9 +524,7 @@ namespace embree
|
|||
__forceinline vfloat4 frac(const vfloat4& a) { return a-floor(a); }
|
||||
|
||||
__forceinline vint4 floori(const vfloat4& a) {
|
||||
#if defined(__aarch64__)
|
||||
return vcvtq_s32_f32(floor(a));
|
||||
#elif defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
return vint4(floor(a));
|
||||
#else
|
||||
return vint4(a-vfloat4(0.5f));
|
||||
|
@ -662,16 +538,6 @@ namespace embree
|
|||
__forceinline vfloat4 unpacklo(const vfloat4& a, const vfloat4& b) { return _mm_unpacklo_ps(a, b); }
|
||||
__forceinline vfloat4 unpackhi(const vfloat4& a, const vfloat4& b) { return _mm_unpackhi_ps(a, b); }
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vfloat4 shuffle(const vfloat4& v) {
|
||||
return vreinterpretq_f32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) {
|
||||
return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
#else
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vfloat4 shuffle(const vfloat4& v) {
|
||||
return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v), _MM_SHUFFLE(i3, i2, i1, i0)));
|
||||
|
@ -681,19 +547,8 @@ namespace embree
|
|||
__forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) {
|
||||
return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined (__SSSE3__)
|
||||
__forceinline vfloat4 shuffle8(const vfloat4& a, const vint4& shuf) {
|
||||
return _mm_castsi128_ps(_mm_shuffle_epi8(_mm_castps_si128(a), shuf));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return __m128(vqtbl1q_u8( uint8x16_t(v.v), v0022 )); }
|
||||
template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return __m128(vqtbl1q_u8( uint8x16_t(v.v), v1133)); }
|
||||
template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return __m128(vqtbl1q_u8( uint8x16_t(v.v), v0101)); }
|
||||
#elif defined(__SSE3__)
|
||||
#if defined(__SSE3__)
|
||||
template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return _mm_moveldup_ps(v); }
|
||||
template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return _mm_movehdup_ps(v); }
|
||||
template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(v))); }
|
||||
|
@ -704,56 +559,10 @@ namespace embree
|
|||
return shuffle<i,i,i,i>(v);
|
||||
}
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int i> __forceinline float extract(const vfloat4& a);
|
||||
template<> __forceinline float extract<0>(const vfloat4& b) {
|
||||
return b[0];
|
||||
}
|
||||
template<> __forceinline float extract<1>(const vfloat4& b) {
|
||||
return b[1];
|
||||
}
|
||||
template<> __forceinline float extract<2>(const vfloat4& b) {
|
||||
return b[2];
|
||||
}
|
||||
template<> __forceinline float extract<3>(const vfloat4& b) {
|
||||
return b[3];
|
||||
}
|
||||
#elif defined (__SSE4_1__) && !defined(__GNUC__)
|
||||
template<int i> __forceinline float extract(const vfloat4& a) { return _mm_cvtss_f32(_mm_extract_ps(a,i)); }
|
||||
template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); }
|
||||
#else
|
||||
template<int i> __forceinline float extract(const vfloat4& a) { return _mm_cvtss_f32(shuffle<i,i,i,i>(a)); }
|
||||
template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); }
|
||||
#endif
|
||||
template<int i> __forceinline float extract (const vfloat4& a) { return _mm_cvtss_f32(shuffle<i>(a)); }
|
||||
template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); }
|
||||
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int dst> __forceinline vfloat4 insert(const vfloat4& a, float b);
|
||||
template<> __forceinline vfloat4 insert<0>(const vfloat4& a, float b)
|
||||
{
|
||||
vfloat4 c = a;
|
||||
c[0] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vfloat4 insert<1>(const vfloat4& a, float b)
|
||||
{
|
||||
vfloat4 c = a;
|
||||
c[1] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vfloat4 insert<2>(const vfloat4& a, float b)
|
||||
{
|
||||
vfloat4 c = a;
|
||||
c[2] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vfloat4 insert<3>(const vfloat4& a, float b)
|
||||
{
|
||||
vfloat4 c = a;
|
||||
c[3] = b;
|
||||
return c;
|
||||
}
|
||||
#elif defined (__SSE4_1__)
|
||||
#if defined (__SSE4_1__)
|
||||
template<int dst, int src, int clr> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); }
|
||||
template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return insert<dst, src, 0>(a, b); }
|
||||
template<int dst> __forceinline vfloat4 insert(const vfloat4& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); }
|
||||
|
@ -762,19 +571,10 @@ namespace embree
|
|||
template<int dst> __forceinline vfloat4 insert(const vfloat4& a, float b) { vfloat4 c = a; c[dst&3] = b; return c; }
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline float toScalar(const vfloat4& v) {
|
||||
return v[0];
|
||||
}
|
||||
#else
|
||||
__forceinline float toScalar(const vfloat4& v) { return _mm_cvtss_f32(v); }
|
||||
#endif
|
||||
__forceinline vfloat4 broadcast4f(const vfloat4& a, size_t k) {
|
||||
return vfloat4::broadcast(&a[k]);
|
||||
}
|
||||
|
||||
__forceinline vfloat4 shift_right_1(const vfloat4& x) {
|
||||
return _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(x), 4));
|
||||
return _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(x), 4));
|
||||
}
|
||||
|
||||
#if defined (__AVX2__)
|
||||
|
@ -790,7 +590,7 @@ namespace embree
|
|||
template<int i>
|
||||
__forceinline vfloat4 align_shift_right(const vfloat4& a, const vfloat4& b) {
|
||||
return _mm_castsi128_ps(_mm_alignr_epi32(_mm_castps_si128(a), _mm_castps_si128(b), i));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -864,39 +664,28 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vfloat4 vreduce_min(const vfloat4& v) { float h = vminvq_f32(v); return vdupq_n_f32(h); }
|
||||
__forceinline vfloat4 vreduce_max(const vfloat4& v) { float h = vmaxvq_f32(v); return vdupq_n_f32(h); }
|
||||
__forceinline vfloat4 vreduce_add(const vfloat4& v) { float h = vaddvq_f32(v); return vdupq_n_f32(h); }
|
||||
#else
|
||||
|
||||
__forceinline vfloat4 vreduce_min(const vfloat4& v) { vfloat4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
|
||||
__forceinline vfloat4 vreduce_max(const vfloat4& v) { vfloat4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
|
||||
__forceinline vfloat4 vreduce_add(const vfloat4& v) { vfloat4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline float reduce_min(const vfloat4& v) { return vminvq_f32(v); }
|
||||
__forceinline float reduce_max(const vfloat4& v) { return vmaxvq_f32(v); }
|
||||
__forceinline float reduce_add(const vfloat4& v) { return vaddvq_f32(v); }
|
||||
#else
|
||||
__forceinline float reduce_min(const vfloat4& v) { return _mm_cvtss_f32(vreduce_min(v)); }
|
||||
__forceinline float reduce_max(const vfloat4& v) { return _mm_cvtss_f32(vreduce_max(v)); }
|
||||
__forceinline float reduce_add(const vfloat4& v) { return _mm_cvtss_f32(vreduce_add(v)); }
|
||||
#endif
|
||||
|
||||
__forceinline size_t select_min(const vboolf4& valid, const vfloat4& v)
|
||||
{
|
||||
const vfloat4 a = select(valid,v,vfloat4(pos_inf));
|
||||
__forceinline size_t select_min(const vboolf4& valid, const vfloat4& v)
|
||||
{
|
||||
const vfloat4 a = select(valid,v,vfloat4(pos_inf));
|
||||
const vbool4 valid_min = valid & (a == vreduce_min(a));
|
||||
return bsf(movemask(any(valid_min) ? valid_min : valid));
|
||||
return bsf(movemask(any(valid_min) ? valid_min : valid));
|
||||
}
|
||||
__forceinline size_t select_max(const vboolf4& valid, const vfloat4& v)
|
||||
{
|
||||
const vfloat4 a = select(valid,v,vfloat4(neg_inf));
|
||||
__forceinline size_t select_max(const vboolf4& valid, const vfloat4& v)
|
||||
{
|
||||
const vfloat4 a = select(valid,v,vfloat4(neg_inf));
|
||||
const vbool4 valid_max = valid & (a == vreduce_max(a));
|
||||
return bsf(movemask(any(valid_max) ? valid_max : valid));
|
||||
return bsf(movemask(any(valid_max) ? valid_max : valid));
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Euclidian Space Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -911,7 +700,7 @@ namespace embree
|
|||
const vfloat4 b0 = shuffle<1,2,0,3>(b);
|
||||
const vfloat4 a1 = shuffle<1,2,0,3>(a);
|
||||
const vfloat4 b1 = b;
|
||||
return shuffle<1,2,0,3>(prod_diff(a0,b0,a1,b1));
|
||||
return shuffle<1,2,0,3>(msub(a0,b0,a1*b1));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -923,3 +712,11 @@ namespace embree
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
231
thirdparty/embree/common/simd/vfloat8_avx.h
vendored
231
thirdparty/embree/common/simd/vfloat8_avx.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX float type */
|
||||
|
@ -33,7 +41,7 @@ namespace embree
|
|||
__forceinline explicit vfloat(const vfloat4& a) : v(_mm256_insertf128_ps(_mm256_castps128_ps256(a),a,1)) {}
|
||||
__forceinline vfloat(const vfloat4& a, const vfloat4& b) : v(_mm256_insertf128_ps(_mm256_castps128_ps256(a),b,1)) {}
|
||||
|
||||
__forceinline explicit vfloat(const int8_t* a) : v(_mm256_loadu_ps((const float*)a)) {}
|
||||
__forceinline explicit vfloat(const char* a) : v(_mm256_loadu_ps((const float*)a)) {}
|
||||
__forceinline vfloat(float a) : v(_mm256_set1_ps(a)) {}
|
||||
__forceinline vfloat(float a, float b) : v(_mm256_set_ps(b, a, b, a, b, a, b, a)) {}
|
||||
__forceinline vfloat(float a, float b, float c, float d) : v(_mm256_set_ps(d, c, b, a, d, c, b, a)) {}
|
||||
|
@ -61,21 +69,7 @@ namespace embree
|
|||
return _mm256_broadcast_ss((float*)a);
|
||||
}
|
||||
|
||||
static __forceinline vfloat8 broadcast2(const float* a, const float* b) {
|
||||
#if defined(__INTEL_COMPILER)
|
||||
const vfloat8 v0 = _mm256_broadcast_ss(a);
|
||||
const vfloat8 v1 = _mm256_broadcast_ss(b);
|
||||
return _mm256_blend_ps(v1, v0, 0xf);
|
||||
#else
|
||||
return _mm256_set_ps(*b,*b,*b,*b,*a,*a,*a,*a);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline vfloat8 broadcast4f(const vfloat4* ptr) {
|
||||
return _mm256_broadcast_ps((__m128*)ptr);
|
||||
}
|
||||
|
||||
static __forceinline vfloat8 load(const int8_t* ptr) {
|
||||
static __forceinline vfloat8 load(const char* ptr) {
|
||||
#if defined(__AVX2__)
|
||||
return _mm256_cvtepi32_ps(_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr)));
|
||||
#else
|
||||
|
@ -83,7 +77,7 @@ namespace embree
|
|||
#endif
|
||||
}
|
||||
|
||||
static __forceinline vfloat8 load(const uint8_t* ptr) {
|
||||
static __forceinline vfloat8 load(const unsigned char* ptr) {
|
||||
#if defined(__AVX2__)
|
||||
return _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)));
|
||||
#else
|
||||
|
@ -107,24 +101,11 @@ namespace embree
|
|||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
static __forceinline vfloat8 compact(const vboolf8& mask, vfloat8 &v) {
|
||||
return _mm256_mask_compress_ps(v, mask, v);
|
||||
}
|
||||
static __forceinline vfloat8 compact(const vboolf8& mask, vfloat8 &a, const vfloat8& b) {
|
||||
return _mm256_mask_compress_ps(a, mask, b);
|
||||
}
|
||||
|
||||
static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_mask_load_ps (_mm256_setzero_ps(),mask,(float*)ptr); }
|
||||
static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_mask_loadu_ps(_mm256_setzero_ps(),mask,(float*)ptr); }
|
||||
|
||||
static __forceinline void store (const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_mask_store_ps ((float*)ptr,mask,v); }
|
||||
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_mask_storeu_ps((float*)ptr,mask,v); }
|
||||
#elif defined(__aarch64__)
|
||||
static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask.v); }
|
||||
static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask.v); }
|
||||
|
||||
static __forceinline void store (const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,v); }
|
||||
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,v); }
|
||||
#else
|
||||
static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask); }
|
||||
static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask); }
|
||||
|
@ -145,18 +126,18 @@ namespace embree
|
|||
|
||||
template<int scale = 4>
|
||||
static __forceinline vfloat8 gather(const float* ptr, const vint8& index) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _mm256_i32gather_ps(ptr, index ,scale);
|
||||
#else
|
||||
return vfloat8(
|
||||
*(float*)(((int8_t*)ptr)+scale*index[0]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[1]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[2]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[3]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[4]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[5]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[6]),
|
||||
*(float*)(((int8_t*)ptr)+scale*index[7]));
|
||||
*(float*)(((char*)ptr)+scale*index[0]),
|
||||
*(float*)(((char*)ptr)+scale*index[1]),
|
||||
*(float*)(((char*)ptr)+scale*index[2]),
|
||||
*(float*)(((char*)ptr)+scale*index[3]),
|
||||
*(float*)(((char*)ptr)+scale*index[4]),
|
||||
*(float*)(((char*)ptr)+scale*index[5]),
|
||||
*(float*)(((char*)ptr)+scale*index[6]),
|
||||
*(float*)(((char*)ptr)+scale*index[7]));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -165,17 +146,17 @@ namespace embree
|
|||
vfloat8 r = zero;
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm256_mmask_i32gather_ps(r, mask, index, ptr, scale);
|
||||
#elif defined(__AVX2__) && !defined(__aarch64__)
|
||||
#elif defined(__AVX2__)
|
||||
return _mm256_mask_i32gather_ps(r, ptr, index, mask, scale);
|
||||
#else
|
||||
if (likely(mask[0])) r[0] = *(float*)(((int8_t*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(float*)(((int8_t*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(float*)(((int8_t*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(float*)(((int8_t*)ptr)+scale*index[3]);
|
||||
if (likely(mask[4])) r[4] = *(float*)(((int8_t*)ptr)+scale*index[4]);
|
||||
if (likely(mask[5])) r[5] = *(float*)(((int8_t*)ptr)+scale*index[5]);
|
||||
if (likely(mask[6])) r[6] = *(float*)(((int8_t*)ptr)+scale*index[6]);
|
||||
if (likely(mask[7])) r[7] = *(float*)(((int8_t*)ptr)+scale*index[7]);
|
||||
if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(float*)(((char*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(float*)(((char*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(float*)(((char*)ptr)+scale*index[3]);
|
||||
if (likely(mask[4])) r[4] = *(float*)(((char*)ptr)+scale*index[4]);
|
||||
if (likely(mask[5])) r[5] = *(float*)(((char*)ptr)+scale*index[5]);
|
||||
if (likely(mask[6])) r[6] = *(float*)(((char*)ptr)+scale*index[6]);
|
||||
if (likely(mask[7])) r[7] = *(float*)(((char*)ptr)+scale*index[7]);
|
||||
return r;
|
||||
#endif
|
||||
}
|
||||
|
@ -186,14 +167,14 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm256_i32scatter_ps((float*)ptr, ofs, v, scale);
|
||||
#else
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(float*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
*(float*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(float*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(float*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(float*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(float*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(float*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(float*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(float*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -203,24 +184,17 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm256_mask_i32scatter_ps((float*)ptr, mask, ofs, v, scale);
|
||||
#else
|
||||
if (likely(mask[0])) *(float*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(float*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(float*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(float*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(float*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(float*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(float*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(float*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
if (likely(mask[0])) *(float*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(float*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(float*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(float*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(float*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(float*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(float*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(float*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline void store(const vboolf8& mask, int8_t* ptr, const vint8& ofs, const vfloat8& v) {
|
||||
scatter<1>(mask,ptr,ofs,v);
|
||||
}
|
||||
static __forceinline void store(const vboolf8& mask, float* ptr, const vint8& ofs, const vfloat8& v) {
|
||||
scatter<4>(mask,ptr,ofs,v);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -241,60 +215,27 @@ namespace embree
|
|||
__forceinline vfloat8 toFloat(const vint8& a) { return vfloat8(a); }
|
||||
|
||||
__forceinline vfloat8 operator +(const vfloat8& a) { return a; }
|
||||
#if !defined(__aarch64__)
|
||||
__forceinline vfloat8 operator -(const vfloat8& a) {
|
||||
const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
|
||||
return _mm256_xor_ps(a, mask);
|
||||
}
|
||||
#else
|
||||
__forceinline vfloat8 operator -(const vfloat8& a) {
|
||||
__m256 res;
|
||||
res.lo = vnegq_f32(a.v.lo);
|
||||
res.hi = vnegq_f32(a.v.hi);
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
__forceinline vfloat8 abs(const vfloat8& a) {
|
||||
const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff));
|
||||
return _mm256_and_ps(a, mask);
|
||||
}
|
||||
#else
|
||||
__forceinline vfloat8 abs(const vfloat8& a) {
|
||||
__m256 res;
|
||||
res.lo = vabsq_f32(a.v.lo);
|
||||
res.hi = vabsq_f32(a.v.hi);
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
__forceinline vfloat8 abs(const vfloat8& a) {
|
||||
const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff));
|
||||
return _mm256_and_ps(a, mask);
|
||||
}
|
||||
__forceinline vfloat8 sign (const vfloat8& a) { return _mm256_blendv_ps(vfloat8(one), -vfloat8(one), _mm256_cmp_ps(a, vfloat8(zero), _CMP_NGE_UQ)); }
|
||||
#else
|
||||
__forceinline vfloat8 sign (const vfloat8& a) { return _mm256_blendv_ps(vfloat8(one), -vfloat8(one), _mm256_cmplt_ps(a, vfloat8(zero))); }
|
||||
#endif
|
||||
__forceinline vfloat8 signmsk(const vfloat8& a) { return _mm256_and_ps(a,_mm256_castsi256_ps(_mm256_set1_epi32(0x80000000))); }
|
||||
|
||||
|
||||
static __forceinline vfloat8 rcp(const vfloat8& a)
|
||||
{
|
||||
#if defined(BUILD_IOS) && defined(__aarch64__)
|
||||
// ios devices are faster doing full divide, no need for NR fixup
|
||||
vfloat8 ret;
|
||||
const float32x4_t one = vdupq_n_f32(1.0f);
|
||||
ret.v.lo = vdivq_f32(one, a.v.lo);
|
||||
ret.v.hi = vdivq_f32(one, a.v.hi);
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
const vfloat8 r = _mm256_rcp14_ps(a);
|
||||
#else
|
||||
const vfloat8 r = _mm256_rcp_ps(a);
|
||||
#endif
|
||||
|
||||
#if defined(__AVX2__) //&& !defined(aarch64)
|
||||
|
||||
#if defined(__AVX2__)
|
||||
return _mm256_mul_ps(r, _mm256_fnmadd_ps(r, a, vfloat8(2.0f)));
|
||||
#else
|
||||
return _mm256_mul_ps(r, _mm256_sub_ps(vfloat8(2.0f), _mm256_mul_ps(r, a)));
|
||||
|
@ -443,29 +384,17 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
|
||||
return _mm256_mask_blend_ps(m, f, t);
|
||||
}
|
||||
#elif !defined(__aarch64__)
|
||||
__forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_EQ_OQ); }
|
||||
__forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NEQ_UQ); }
|
||||
__forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LT_OS); }
|
||||
__forceinline vboolf8 operator >=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLT_US); }
|
||||
__forceinline vboolf8 operator > (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLE_US); }
|
||||
__forceinline vboolf8 operator <=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LE_OS); }
|
||||
#else
|
||||
static __forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_EQ_OQ); }
|
||||
static __forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NEQ_UQ); }
|
||||
static __forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LT_OS); }
|
||||
static __forceinline vboolf8 operator >=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLT_US); }
|
||||
static __forceinline vboolf8 operator > (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NLE_US); }
|
||||
static __forceinline vboolf8 operator <=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LE_OS); }
|
||||
|
||||
__forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
|
||||
static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
|
||||
return _mm256_blendv_ps(f, t, m);
|
||||
}
|
||||
#else
|
||||
__forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmpeq_ps(a, b); }
|
||||
__forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmpneq_ps(a, b); }
|
||||
__forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmplt_ps(a, b); }
|
||||
__forceinline vboolf8 operator >=(const vfloat8& a, const vfloat8& b) { return _mm256_cmpge_ps(a, b); }
|
||||
__forceinline vboolf8 operator > (const vfloat8& a, const vfloat8& b) { return _mm256_cmpgt_ps(a, b); }
|
||||
__forceinline vboolf8 operator <=(const vfloat8& a, const vfloat8& b) { return _mm256_cmple_ps(a, b); }
|
||||
|
||||
__forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
|
||||
return _mm256_blendv_ps(f, t, m);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
template<int mask>
|
||||
|
@ -534,17 +463,10 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
/// Rounding Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
__forceinline vfloat8 floor(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEG_INF ); }
|
||||
__forceinline vfloat8 ceil (const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_POS_INF ); }
|
||||
__forceinline vfloat8 trunc(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_ZERO ); }
|
||||
__forceinline vfloat8 round(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }
|
||||
#else
|
||||
__forceinline vfloat8 floor(const vfloat8& a) { return _mm256_floor_ps(a); }
|
||||
__forceinline vfloat8 ceil (const vfloat8& a) { return _mm256_ceil_ps(a); }
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline vfloat8 frac (const vfloat8& a) { return a-floor(a); }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -579,11 +501,9 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
return _mm256_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));
|
||||
}
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
template<> __forceinline vfloat8 shuffle<0, 0, 2, 2>(const vfloat8& v) { return _mm256_moveldup_ps(v); }
|
||||
template<> __forceinline vfloat8 shuffle<1, 1, 3, 3>(const vfloat8& v) { return _mm256_movehdup_ps(v); }
|
||||
template<> __forceinline vfloat8 shuffle<0, 1, 0, 1>(const vfloat8& v) { return _mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(v))); }
|
||||
#endif
|
||||
|
||||
__forceinline vfloat8 broadcast(const float* ptr) { return _mm256_broadcast_ss(ptr); }
|
||||
template<size_t i> __forceinline vfloat8 insert4(const vfloat8& a, const vfloat4& b) { return _mm256_insertf128_ps(a, b, i); }
|
||||
|
@ -592,10 +512,8 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
|
||||
__forceinline float toScalar(const vfloat8& v) { return _mm_cvtss_f32(_mm256_castps256_ps128(v)); }
|
||||
|
||||
__forceinline vfloat8 assign(const vfloat4& a) { return _mm256_castps128_ps256(a); }
|
||||
|
||||
#if defined (__AVX2__) && !defined(__aarch64__)
|
||||
__forceinline vfloat8 permute(const vfloat8& a, const __m256i& index) {
|
||||
#if defined (__AVX2__)
|
||||
static __forceinline vfloat8 permute(const vfloat8& a, const __m256i& index) {
|
||||
return _mm256_permutevar8x32_ps(a, index);
|
||||
}
|
||||
#endif
|
||||
|
@ -618,14 +536,6 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
}
|
||||
#endif
|
||||
|
||||
__forceinline vfloat4 broadcast4f(const vfloat8& a, const size_t k) {
|
||||
return vfloat4::broadcast(&a[k]);
|
||||
}
|
||||
|
||||
__forceinline vfloat8 broadcast8f(const vfloat8& a, const size_t k) {
|
||||
return vfloat8::broadcast(&a[k]);
|
||||
}
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
static __forceinline vfloat8 shift_right_1(const vfloat8& x) {
|
||||
return align_shift_right<1>(zero,x);
|
||||
|
@ -699,7 +609,7 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#if !defined(__aarch64__)
|
||||
|
||||
__forceinline vfloat8 vreduce_min2(const vfloat8& v) { return min(v,shuffle<1,0,3,2>(v)); }
|
||||
__forceinline vfloat8 vreduce_min4(const vfloat8& v) { vfloat8 v1 = vreduce_min2(v); return min(v1,shuffle<2,3,0,1>(v1)); }
|
||||
__forceinline vfloat8 vreduce_min (const vfloat8& v) { vfloat8 v1 = vreduce_min4(v); return min(v1,shuffle4<1,0>(v1)); }
|
||||
|
@ -715,14 +625,7 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
__forceinline float reduce_min(const vfloat8& v) { return toScalar(vreduce_min(v)); }
|
||||
__forceinline float reduce_max(const vfloat8& v) { return toScalar(vreduce_max(v)); }
|
||||
__forceinline float reduce_add(const vfloat8& v) { return toScalar(vreduce_add(v)); }
|
||||
#else
|
||||
__forceinline float reduce_min(const vfloat8& v) { return vminvq_f32(_mm_min_ps(v.v.lo,v.v.hi)); }
|
||||
__forceinline float reduce_max(const vfloat8& v) { return vmaxvq_f32(_mm_max_ps(v.v.lo,v.v.hi)); }
|
||||
__forceinline vfloat8 vreduce_min(const vfloat8& v) { return vfloat8(reduce_min(v)); }
|
||||
__forceinline vfloat8 vreduce_max(const vfloat8& v) { return vfloat8(reduce_max(v)); }
|
||||
__forceinline float reduce_add(const vfloat8& v) { return vaddvq_f32(_mm_add_ps(v.v.lo,v.v.hi)); }
|
||||
|
||||
#endif
|
||||
__forceinline size_t select_min(const vboolf8& valid, const vfloat8& v)
|
||||
{
|
||||
const vfloat8 a = select(valid,v,vfloat8(pos_inf));
|
||||
|
@ -845,3 +748,11 @@ __forceinline vfloat8 abs(const vfloat8& a) {
|
|||
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
56
thirdparty/embree/common/simd/vint16_avx512.h
vendored
56
thirdparty/embree/common/simd/vint16_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 16-wide AVX-512 integer type */
|
||||
|
@ -90,10 +98,10 @@ namespace embree
|
|||
|
||||
static __forceinline vint16 load (const void* addr) { return _mm512_load_si512((int*)addr); }
|
||||
|
||||
static __forceinline vint16 load(const uint8_t* ptr) { return _mm512_cvtepu8_epi32(_mm_load_si128((__m128i*)ptr)); }
|
||||
static __forceinline vint16 load(const unsigned char* ptr) { return _mm512_cvtepu8_epi32(_mm_load_si128((__m128i*)ptr)); }
|
||||
static __forceinline vint16 load(const unsigned short* ptr) { return _mm512_cvtepu16_epi32(_mm256_load_si256((__m256i*)ptr)); }
|
||||
|
||||
static __forceinline vint16 loadu(const uint8_t* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
|
||||
static __forceinline vint16 loadu(const unsigned char* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
|
||||
static __forceinline vint16 loadu(const unsigned short* ptr) { return _mm512_cvtepu16_epi32(_mm256_loadu_si256((__m256i*)ptr)); }
|
||||
|
||||
static __forceinline vint16 loadu(const void* addr) { return _mm512_loadu_si512(addr); }
|
||||
|
@ -109,20 +117,6 @@ namespace embree
|
|||
|
||||
static __forceinline void store_nt(void* __restrict__ ptr, const vint16& a) { _mm512_stream_si512((__m512i*)ptr,a); }
|
||||
|
||||
/* pass by value to avoid compiler generating inefficient code */
|
||||
static __forceinline void storeu_compact(const vboolf16 mask, void* addr, vint16 reg) {
|
||||
_mm512_mask_compressstoreu_epi32(addr,mask,reg);
|
||||
}
|
||||
|
||||
static __forceinline void storeu_compact_single(const vboolf16 mask, void* addr, vint16 reg) {
|
||||
//_mm512_mask_compressstoreu_epi32(addr,mask,reg);
|
||||
*(float*)addr = mm512_cvtss_f32(_mm512_mask_compress_ps(_mm512_castsi512_ps(reg),mask,_mm512_castsi512_ps(reg)));
|
||||
}
|
||||
|
||||
static __forceinline vint16 compact64bit(const vboolf16& mask, vint16 &v) {
|
||||
return _mm512_mask_compress_epi64(v,mask,v);
|
||||
}
|
||||
|
||||
static __forceinline vint16 compact(const vboolf16& mask, vint16 &v) {
|
||||
return _mm512_mask_compress_epi32(v,mask,v);
|
||||
}
|
||||
|
@ -160,10 +154,6 @@ namespace embree
|
|||
_mm512_mask_i32scatter_epi32((int*)ptr,mask,index,v,scale);
|
||||
}
|
||||
|
||||
static __forceinline vint16 broadcast64bit(size_t v) {
|
||||
return _mm512_set1_epi64(v);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -313,18 +303,6 @@ namespace embree
|
|||
return _mm512_mask_or_epi32(f,m,t,t);
|
||||
}
|
||||
|
||||
__forceinline void xchg(const vboolf16& m, vint16& a, vint16& b) {
|
||||
const vint16 c = a; a = select(m,b,a); b = select(m,c,b);
|
||||
}
|
||||
|
||||
__forceinline vboolf16 test(const vboolf16& m, const vint16& a, const vint16& b) {
|
||||
return _mm512_mask_test_epi32_mask(m,a,b);
|
||||
}
|
||||
|
||||
__forceinline vboolf16 test(const vint16& a, const vint16& b) {
|
||||
return _mm512_test_epi32_mask(a,b);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -363,10 +341,6 @@ namespace embree
|
|||
|
||||
template<int i> __forceinline vint16 insert4(const vint16& a, const vint4& b) { return _mm512_inserti32x4(a, b, i); }
|
||||
|
||||
__forceinline size_t extract64bit(const vint16& v) {
|
||||
return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
|
||||
}
|
||||
|
||||
template<int N, int i>
|
||||
vint<N> extractN(const vint16& v);
|
||||
|
||||
|
@ -488,3 +462,11 @@ namespace embree
|
|||
return cout;
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
243
thirdparty/embree/common/simd/vint4_sse2.h
vendored
243
thirdparty/embree/common/simd/vint4_sse2.h
vendored
|
@ -1,10 +1,18 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../math/math.h"
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide SSE integer type */
|
||||
|
@ -23,7 +31,7 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Constructors, Assignment & Cast Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
__forceinline vint() {}
|
||||
__forceinline vint(const vint4& a) { v = a.v; }
|
||||
__forceinline vint4& operator =(const vint4& a) { v = a.v; return *this; }
|
||||
|
@ -68,7 +76,7 @@ namespace embree
|
|||
|
||||
static __forceinline void store (void* ptr, const vint4& v) { _mm_store_si128((__m128i*)ptr,v); }
|
||||
static __forceinline void storeu(void* ptr, const vint4& v) { _mm_storeu_si128((__m128i*)ptr,v); }
|
||||
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
static __forceinline vint4 compact(const vboolf4& mask, vint4 &v) {
|
||||
|
@ -98,81 +106,61 @@ namespace embree
|
|||
#endif
|
||||
|
||||
|
||||
#if defined(__aarch64__)
|
||||
static __forceinline vint4 load(const uint8_t* ptr) {
|
||||
return _mm_load4epu8_epi32(((__m128i*)ptr));
|
||||
}
|
||||
static __forceinline vint4 loadu(const uint8_t* ptr) {
|
||||
return _mm_load4epu8_epi32(((__m128i*)ptr));
|
||||
}
|
||||
#elif defined(__SSE4_1__)
|
||||
static __forceinline vint4 load(const uint8_t* ptr) {
|
||||
#if defined(__SSE4_1__)
|
||||
static __forceinline vint4 load(const unsigned char* ptr) {
|
||||
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
|
||||
}
|
||||
|
||||
static __forceinline vint4 loadu(const uint8_t* ptr) {
|
||||
static __forceinline vint4 loadu(const unsigned char* ptr) {
|
||||
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
|
||||
}
|
||||
#else
|
||||
|
||||
static __forceinline vint4 load(const uint8_t* ptr) {
|
||||
static __forceinline vint4 load(const unsigned char* ptr) {
|
||||
return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
|
||||
}
|
||||
}
|
||||
|
||||
static __forceinline vint4 loadu(const uint8_t* ptr) {
|
||||
static __forceinline vint4 loadu(const unsigned char* ptr) {
|
||||
return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static __forceinline vint4 load(const unsigned short* ptr) {
|
||||
#if defined(__aarch64__)
|
||||
return __m128i(vmovl_u16(vld1_u16(ptr)));
|
||||
#elif defined (__SSE4_1__)
|
||||
#if defined (__SSE4_1__)
|
||||
return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr));
|
||||
#else
|
||||
return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static __forceinline void store(uint8_t* ptr, const vint4& v) {
|
||||
#if defined(__aarch64__)
|
||||
int32x4_t x = v;
|
||||
uint16x4_t y = vqmovn_u32(uint32x4_t(x));
|
||||
uint8x8_t z = vqmovn_u16(vcombine_u16(y, y));
|
||||
vst1_lane_u32((uint32_t *)ptr,uint32x2_t(z), 0);
|
||||
#elif defined(__SSE4_1__)
|
||||
static __forceinline void store(unsigned char* ptr, const vint4& v) {
|
||||
#if defined(__SSE4_1__)
|
||||
__m128i x = v;
|
||||
x = _mm_packus_epi32(x, x);
|
||||
x = _mm_packus_epi16(x, x);
|
||||
*(int*)ptr = _mm_cvtsi128_si32(x);
|
||||
#else
|
||||
for (size_t i=0;i<4;i++)
|
||||
ptr[i] = (uint8_t)v[i];
|
||||
ptr[i] = (unsigned char)v[i];
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline void store(unsigned short* ptr, const vint4& v) {
|
||||
#if defined(__aarch64__)
|
||||
uint32x4_t x = uint32x4_t(v.v);
|
||||
uint16x4_t y = vqmovn_u32(x);
|
||||
vst1_u16(ptr, y);
|
||||
#else
|
||||
for (size_t i=0;i<4;i++)
|
||||
ptr[i] = (unsigned short)v[i];
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline vint4 load_nt(void* ptr) {
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
return _mm_stream_load_si128((__m128i*)ptr);
|
||||
#if defined(__SSE4_1__)
|
||||
return _mm_stream_load_si128((__m128i*)ptr);
|
||||
#else
|
||||
return _mm_load_si128((__m128i*)ptr);
|
||||
return _mm_load_si128((__m128i*)ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static __forceinline void store_nt(void* ptr, const vint4& v) {
|
||||
#if !defined(__aarch64__) && defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
_mm_stream_ps((float*)ptr, _mm_castsi128_ps(v));
|
||||
#else
|
||||
_mm_store_si128((__m128i*)ptr,v);
|
||||
|
@ -181,14 +169,14 @@ namespace embree
|
|||
|
||||
template<int scale = 4>
|
||||
static __forceinline vint4 gather(const int* ptr, const vint4& index) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _mm_i32gather_epi32(ptr, index, scale);
|
||||
#else
|
||||
return vint4(
|
||||
*(int*)(((int8_t*)ptr)+scale*index[0]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[1]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[2]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[3]));
|
||||
*(int*)(((char*)ptr)+scale*index[0]),
|
||||
*(int*)(((char*)ptr)+scale*index[1]),
|
||||
*(int*)(((char*)ptr)+scale*index[2]),
|
||||
*(int*)(((char*)ptr)+scale*index[3]));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -197,13 +185,13 @@ namespace embree
|
|||
vint4 r = zero;
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale);
|
||||
#elif defined(__AVX2__) && !defined(__aarch64__)
|
||||
#elif defined(__AVX2__)
|
||||
return _mm_mask_i32gather_epi32(r, ptr, index, mask, scale);
|
||||
#else
|
||||
if (likely(mask[0])) r[0] = *(int*)(((int8_t*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(int*)(((int8_t*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(int*)(((int8_t*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(int*)(((int8_t*)ptr)+scale*index[3]);
|
||||
if (likely(mask[0])) r[0] = *(int*)(((char*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(int*)(((char*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(int*)(((char*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(int*)(((char*)ptr)+scale*index[3]);
|
||||
return r;
|
||||
#endif
|
||||
}
|
||||
|
@ -214,10 +202,10 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm_i32scatter_epi32((int*)ptr, index, v, scale);
|
||||
#else
|
||||
*(int*)(((int8_t*)ptr)+scale*index[0]) = v[0];
|
||||
*(int*)(((int8_t*)ptr)+scale*index[1]) = v[1];
|
||||
*(int*)(((int8_t*)ptr)+scale*index[2]) = v[2];
|
||||
*(int*)(((int8_t*)ptr)+scale*index[3]) = v[3];
|
||||
*(int*)(((char*)ptr)+scale*index[0]) = v[0];
|
||||
*(int*)(((char*)ptr)+scale*index[1]) = v[1];
|
||||
*(int*)(((char*)ptr)+scale*index[2]) = v[2];
|
||||
*(int*)(((char*)ptr)+scale*index[3]) = v[3];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -227,14 +215,14 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm_mask_i32scatter_epi32((int*)ptr, mask, index, v, scale);
|
||||
#else
|
||||
if (likely(mask[0])) *(int*)(((int8_t*)ptr)+scale*index[0]) = v[0];
|
||||
if (likely(mask[1])) *(int*)(((int8_t*)ptr)+scale*index[1]) = v[1];
|
||||
if (likely(mask[2])) *(int*)(((int8_t*)ptr)+scale*index[2]) = v[2];
|
||||
if (likely(mask[3])) *(int*)(((int8_t*)ptr)+scale*index[3]) = v[3];
|
||||
if (likely(mask[0])) *(int*)(((char*)ptr)+scale*index[0]) = v[0];
|
||||
if (likely(mask[1])) *(int*)(((char*)ptr)+scale*index[1]) = v[1];
|
||||
if (likely(mask[2])) *(int*)(((char*)ptr)+scale*index[2]) = v[2];
|
||||
if (likely(mask[3])) *(int*)(((char*)ptr)+scale*index[3]) = v[3];
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__x86_64__) || defined(__aarch64__)
|
||||
#if defined(__x86_64__)
|
||||
static __forceinline vint4 broadcast64(long long a) { return _mm_set1_epi64x(a); }
|
||||
#endif
|
||||
|
||||
|
@ -248,12 +236,10 @@ namespace embree
|
|||
friend __forceinline vint4 select(const vboolf4& m, const vint4& t, const vint4& f) {
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm_mask_blend_epi32(m, (__m128i)f, (__m128i)t);
|
||||
#elif defined(__aarch64__)
|
||||
return _mm_castps_si128(_mm_blendv_ps((__m128)f.v,(__m128) t.v, (__m128)m.v));
|
||||
#elif defined(__SSE4_1__)
|
||||
return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
|
||||
return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
|
||||
#else
|
||||
return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
|
||||
return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
@ -270,9 +256,7 @@ namespace embree
|
|||
|
||||
__forceinline vint4 operator +(const vint4& a) { return a; }
|
||||
__forceinline vint4 operator -(const vint4& a) { return _mm_sub_epi32(_mm_setzero_si128(), a); }
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vint4 abs(const vint4& a) { return vabsq_s32(a.v); }
|
||||
#elif defined(__SSSE3__)
|
||||
#if defined(__SSSE3__)
|
||||
__forceinline vint4 abs(const vint4& a) { return _mm_abs_epi32(a); }
|
||||
#endif
|
||||
|
||||
|
@ -288,7 +272,7 @@ namespace embree
|
|||
__forceinline vint4 operator -(const vint4& a, int b) { return a - vint4(b); }
|
||||
__forceinline vint4 operator -(int a, const vint4& b) { return vint4(a) - b; }
|
||||
|
||||
#if (defined(__aarch64__)) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline vint4 operator *(const vint4& a, const vint4& b) { return _mm_mullo_epi32(a, b); }
|
||||
#else
|
||||
__forceinline vint4 operator *(const vint4& a, const vint4& b) { return vint4(a[0]*b[0],a[1]*b[1],a[2]*b[2],a[3]*b[3]); }
|
||||
|
@ -308,34 +292,34 @@ namespace embree
|
|||
__forceinline vint4 operator ^(const vint4& a, int b) { return a ^ vint4(b); }
|
||||
__forceinline vint4 operator ^(int a, const vint4& b) { return vint4(a) ^ b; }
|
||||
|
||||
__forceinline vint4 operator <<(const vint4& a, const int n) { return _mm_slli_epi32(a, n); }
|
||||
__forceinline vint4 operator >>(const vint4& a, const int n) { return _mm_srai_epi32(a, n); }
|
||||
__forceinline vint4 operator <<(const vint4& a, int n) { return _mm_slli_epi32(a, n); }
|
||||
__forceinline vint4 operator >>(const vint4& a, int n) { return _mm_srai_epi32(a, n); }
|
||||
|
||||
__forceinline vint4 sll (const vint4& a, int b) { return _mm_slli_epi32(a, b); }
|
||||
__forceinline vint4 sra (const vint4& a, int b) { return _mm_srai_epi32(a, b); }
|
||||
__forceinline vint4 srl (const vint4& a, int b) { return _mm_srli_epi32(a, b); }
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Assignment Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
__forceinline vint4& operator +=(vint4& a, const vint4& b) { return a = a + b; }
|
||||
__forceinline vint4& operator +=(vint4& a, int b) { return a = a + b; }
|
||||
|
||||
|
||||
__forceinline vint4& operator -=(vint4& a, const vint4& b) { return a = a - b; }
|
||||
__forceinline vint4& operator -=(vint4& a, int b) { return a = a - b; }
|
||||
|
||||
#if (defined(__aarch64__)) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline vint4& operator *=(vint4& a, const vint4& b) { return a = a * b; }
|
||||
__forceinline vint4& operator *=(vint4& a, int b) { return a = a * b; }
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline vint4& operator &=(vint4& a, const vint4& b) { return a = a & b; }
|
||||
__forceinline vint4& operator &=(vint4& a, int b) { return a = a & b; }
|
||||
|
||||
|
||||
__forceinline vint4& operator |=(vint4& a, const vint4& b) { return a = a | b; }
|
||||
__forceinline vint4& operator |=(vint4& a, int b) { return a = a | b; }
|
||||
|
||||
|
||||
__forceinline vint4& operator <<=(vint4& a, int b) { return a = a << b; }
|
||||
__forceinline vint4& operator >>=(vint4& a, int b) { return a = a >> b; }
|
||||
|
||||
|
@ -402,15 +386,14 @@ namespace embree
|
|||
|
||||
template<int mask>
|
||||
__forceinline vint4 select(const vint4& t, const vint4& f) {
|
||||
#if defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask));
|
||||
#else
|
||||
return select(vboolf4(mask), t, f);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline vint4 min(const vint4& a, const vint4& b) { return _mm_min_epi32(a, b); }
|
||||
__forceinline vint4 max(const vint4& a, const vint4& b) { return _mm_max_epi32(a, b); }
|
||||
|
||||
|
@ -434,25 +417,16 @@ namespace embree
|
|||
__forceinline vint4 unpacklo(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
|
||||
__forceinline vint4 unpackhi(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vint4 shuffle(const vint4& v) {
|
||||
return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vint4 shuffle(const vint4& a, const vint4& b) {
|
||||
return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
#else
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vint4 shuffle(const vint4& v) {
|
||||
return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0));
|
||||
}
|
||||
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vint4 shuffle(const vint4& a, const vint4& b) {
|
||||
return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__SSE3__)
|
||||
template<> __forceinline vint4 shuffle<0, 0, 2, 2>(const vint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); }
|
||||
template<> __forceinline vint4 shuffle<1, 1, 3, 3>(const vint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); }
|
||||
|
@ -464,10 +438,7 @@ namespace embree
|
|||
return shuffle<i,i,i,i>(v);
|
||||
}
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int src> __forceinline int extract(const vint4& b);
|
||||
template<int dst> __forceinline vint4 insert(const vint4& a, const int b);
|
||||
#elif defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
template<int src> __forceinline int extract(const vint4& b) { return _mm_extract_epi32(b, src); }
|
||||
template<int dst> __forceinline vint4 insert(const vint4& a, const int b) { return _mm_insert_epi32(a, b, dst); }
|
||||
#else
|
||||
|
@ -475,69 +446,19 @@ namespace embree
|
|||
template<int dst> __forceinline vint4 insert(const vint4& a, int b) { vint4 c = a; c[dst&3] = b; return c; }
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<> __forceinline int extract<0>(const vint4& b) {
|
||||
return b.v[0];
|
||||
}
|
||||
template<> __forceinline int extract<1>(const vint4& b) {
|
||||
return b.v[1];
|
||||
}
|
||||
template<> __forceinline int extract<2>(const vint4& b) {
|
||||
return b.v[2];
|
||||
}
|
||||
template<> __forceinline int extract<3>(const vint4& b) {
|
||||
return b.v[3];
|
||||
}
|
||||
template<> __forceinline vint4 insert<0>(const vint4& a, int b)
|
||||
{
|
||||
vint4 c = a;
|
||||
c[0] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vint4 insert<1>(const vint4& a, int b)
|
||||
{
|
||||
vint4 c = a;
|
||||
c[1] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vint4 insert<2>(const vint4& a, int b)
|
||||
{
|
||||
vint4 c = a;
|
||||
c[2] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vint4 insert<3>(const vint4& a, int b)
|
||||
{
|
||||
vint4 c = a;
|
||||
c[3] = b;
|
||||
return c;
|
||||
}
|
||||
|
||||
__forceinline int toScalar(const vint4& v) {
|
||||
return v[0];
|
||||
}
|
||||
|
||||
__forceinline size_t toSizeT(const vint4& v) {
|
||||
uint64x2_t x = uint64x2_t(v.v);
|
||||
return x[0];
|
||||
}
|
||||
#else
|
||||
|
||||
template<> __forceinline int extract<0>(const vint4& b) { return _mm_cvtsi128_si32(b); }
|
||||
|
||||
__forceinline int toScalar(const vint4& v) { return _mm_cvtsi128_si32(v); }
|
||||
|
||||
__forceinline size_t toSizeT(const vint4& v) {
|
||||
__forceinline size_t toSizeT(const vint4& v) {
|
||||
#if defined(__WIN32__) && !defined(__X86_64__) // win32 workaround
|
||||
return toScalar(v);
|
||||
#elif defined(__ARM_NEON)
|
||||
// FIXME(LTE): Do we need a swap(i.e. use lane 1)?
|
||||
return vgetq_lane_u64(*(reinterpret_cast<const uint64x2_t *>(&v)), 0);
|
||||
#else
|
||||
return _mm_cvtsi128_si64(v);
|
||||
return _mm_cvtsi128_si64(v);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(__AVX512VL__)
|
||||
|
||||
__forceinline vint4 permute(const vint4 &a, const vint4 &index) {
|
||||
|
@ -546,25 +467,15 @@ namespace embree
|
|||
|
||||
template<int i>
|
||||
__forceinline vint4 align_shift_right(const vint4& a, const vint4& b) {
|
||||
return _mm_alignr_epi32(a, b, i);
|
||||
}
|
||||
return _mm_alignr_epi32(a, b, i);
|
||||
}
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__aarch64__) || defined(__SSE4_1__)
|
||||
|
||||
#if defined(__aarch64__)
|
||||
__forceinline vint4 vreduce_min(const vint4& v) { int h = vminvq_s32(v); return vdupq_n_s32(h); }
|
||||
__forceinline vint4 vreduce_max(const vint4& v) { int h = vmaxvq_s32(v); return vdupq_n_s32(h); }
|
||||
__forceinline vint4 vreduce_add(const vint4& v) { int h = vaddvq_s32(v); return vdupq_n_s32(h); }
|
||||
|
||||
__forceinline int reduce_min(const vint4& v) { return vminvq_s32(v); }
|
||||
__forceinline int reduce_max(const vint4& v) { return vmaxvq_s32(v); }
|
||||
__forceinline int reduce_add(const vint4& v) { return vaddvq_s32(v); }
|
||||
#else
|
||||
#if defined(__SSE4_1__)
|
||||
__forceinline vint4 vreduce_min(const vint4& v) { vint4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
|
||||
__forceinline vint4 vreduce_max(const vint4& v) { vint4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
|
||||
__forceinline vint4 vreduce_add(const vint4& v) { vint4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
|
||||
|
@ -572,8 +483,7 @@ namespace embree
|
|||
__forceinline int reduce_min(const vint4& v) { return toScalar(vreduce_min(v)); }
|
||||
__forceinline int reduce_max(const vint4& v) { return toScalar(vreduce_max(v)); }
|
||||
__forceinline int reduce_add(const vint4& v) { return toScalar(vreduce_add(v)); }
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline size_t select_min(const vint4& v) { return bsf(movemask(v == vreduce_min(v))); }
|
||||
__forceinline size_t select_max(const vint4& v) { return bsf(movemask(v == vreduce_max(v))); }
|
||||
|
||||
|
@ -592,7 +502,7 @@ namespace embree
|
|||
/// Sorting networks
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if (defined(__aarch64__)) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
|
||||
__forceinline vint4 usort_ascending(const vint4& v)
|
||||
{
|
||||
|
@ -679,3 +589,10 @@ namespace embree
|
|||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
98
thirdparty/embree/common/simd/vint8_avx.h
vendored
98
thirdparty/embree/common/simd/vint8_avx.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX integer type */
|
||||
|
@ -71,25 +79,20 @@ namespace embree
|
|||
static __forceinline void store (void* ptr, const vint8& f) { _mm256_store_ps((float*)ptr,_mm256_castsi256_ps(f)); }
|
||||
static __forceinline void storeu(void* ptr, const vint8& f) { _mm256_storeu_ps((float*)ptr,_mm256_castsi256_ps(f)); }
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
static __forceinline void store (const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
|
||||
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
|
||||
#else
|
||||
static __forceinline void store (const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
|
||||
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
|
||||
#endif
|
||||
|
||||
static __forceinline void store_nt(void* ptr, const vint8& v) {
|
||||
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
|
||||
}
|
||||
|
||||
static __forceinline vint8 load(const uint8_t* ptr) {
|
||||
static __forceinline vint8 load(const unsigned char* ptr) {
|
||||
vint4 il = vint4::load(ptr+0);
|
||||
vint4 ih = vint4::load(ptr+4);
|
||||
return vint8(il,ih);
|
||||
}
|
||||
|
||||
static __forceinline vint8 loadu(const uint8_t* ptr) {
|
||||
static __forceinline vint8 loadu(const unsigned char* ptr) {
|
||||
vint4 il = vint4::loadu(ptr+0);
|
||||
vint4 ih = vint4::loadu(ptr+4);
|
||||
return vint8(il,ih);
|
||||
|
@ -107,7 +110,7 @@ namespace embree
|
|||
return vint8(il,ih);
|
||||
}
|
||||
|
||||
static __forceinline void store(uint8_t* ptr, const vint8& i) {
|
||||
static __forceinline void store(unsigned char* ptr, const vint8& i) {
|
||||
vint4 il(i.vl);
|
||||
vint4 ih(i.vh);
|
||||
vint4::store(ptr + 0,il);
|
||||
|
@ -122,54 +125,54 @@ namespace embree
|
|||
template<int scale = 4>
|
||||
static __forceinline vint8 gather(const int* ptr, const vint8& index) {
|
||||
return vint8(
|
||||
*(int*)(((int8_t*)ptr)+scale*index[0]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[1]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[2]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[3]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[4]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[5]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[6]),
|
||||
*(int*)(((int8_t*)ptr)+scale*index[7]));
|
||||
*(int*)(((char*)ptr)+scale*index[0]),
|
||||
*(int*)(((char*)ptr)+scale*index[1]),
|
||||
*(int*)(((char*)ptr)+scale*index[2]),
|
||||
*(int*)(((char*)ptr)+scale*index[3]),
|
||||
*(int*)(((char*)ptr)+scale*index[4]),
|
||||
*(int*)(((char*)ptr)+scale*index[5]),
|
||||
*(int*)(((char*)ptr)+scale*index[6]),
|
||||
*(int*)(((char*)ptr)+scale*index[7]));
|
||||
}
|
||||
|
||||
template<int scale = 4>
|
||||
static __forceinline vint8 gather(const vboolf8& mask, const int* ptr, const vint8& index) {
|
||||
vint8 r = zero;
|
||||
if (likely(mask[0])) r[0] = *(int*)(((int8_t*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(int*)(((int8_t*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(int*)(((int8_t*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(int*)(((int8_t*)ptr)+scale*index[3]);
|
||||
if (likely(mask[4])) r[4] = *(int*)(((int8_t*)ptr)+scale*index[4]);
|
||||
if (likely(mask[5])) r[5] = *(int*)(((int8_t*)ptr)+scale*index[5]);
|
||||
if (likely(mask[6])) r[6] = *(int*)(((int8_t*)ptr)+scale*index[6]);
|
||||
if (likely(mask[7])) r[7] = *(int*)(((int8_t*)ptr)+scale*index[7]);
|
||||
if (likely(mask[0])) r[0] = *(int*)(((char*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(int*)(((char*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(int*)(((char*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(int*)(((char*)ptr)+scale*index[3]);
|
||||
if (likely(mask[4])) r[4] = *(int*)(((char*)ptr)+scale*index[4]);
|
||||
if (likely(mask[5])) r[5] = *(int*)(((char*)ptr)+scale*index[5]);
|
||||
if (likely(mask[6])) r[6] = *(int*)(((char*)ptr)+scale*index[6]);
|
||||
if (likely(mask[7])) r[7] = *(int*)(((char*)ptr)+scale*index[7]);
|
||||
return r;
|
||||
}
|
||||
|
||||
template<int scale = 4>
|
||||
static __forceinline void scatter(void* ptr, const vint8& ofs, const vint8& v)
|
||||
{
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
*(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
}
|
||||
|
||||
template<int scale = 4>
|
||||
static __forceinline void scatter(const vboolf8& mask, void* ptr, const vint8& ofs, const vint8& v)
|
||||
{
|
||||
if (likely(mask[0])) *(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
if (likely(mask[0])) *(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
}
|
||||
|
||||
|
||||
|
@ -315,11 +318,6 @@ namespace embree
|
|||
return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(f), _mm256_castsi256_ps(t), m));
|
||||
}
|
||||
|
||||
__forceinline vint8 notand(const vboolf8& m, const vint8& f) {
|
||||
return _mm256_castps_si256(_mm256_andnot_ps(m, _mm256_castsi256_ps(f)));
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -462,3 +460,11 @@ namespace embree
|
|||
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
68
thirdparty/embree/common/simd/vint8_avx2.h
vendored
68
thirdparty/embree/common/simd/vint8_avx2.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX integer type */
|
||||
|
@ -67,8 +75,8 @@ namespace embree
|
|||
/// Loads and Stores
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static __forceinline vint8 load(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vint8 loadu(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vint8 load(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vint8 loadu(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vint8 load(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_load_si128((__m128i*)ptr)); }
|
||||
static __forceinline vint8 loadu(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr)); }
|
||||
|
||||
|
@ -108,7 +116,7 @@ namespace embree
|
|||
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
|
||||
}
|
||||
|
||||
static __forceinline void store(uint8_t* ptr, const vint8& i)
|
||||
static __forceinline void store(unsigned char* ptr, const vint8& i)
|
||||
{
|
||||
for (size_t j=0; j<8; j++)
|
||||
ptr[j] = i[j];
|
||||
|
@ -140,14 +148,14 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm256_i32scatter_epi32((int*)ptr, ofs, v, scale);
|
||||
#else
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
*(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -157,14 +165,14 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm256_mask_i32scatter_epi32((int*)ptr, mask, ofs, v, scale);
|
||||
#else
|
||||
if (likely(mask[0])) *(int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
if (likely(mask[0])) *(int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -385,9 +393,7 @@ namespace embree
|
|||
|
||||
__forceinline int toScalar(const vint8& v) { return _mm_cvtsi128_si32(_mm256_castsi256_si128(v)); }
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
|
||||
__forceinline vint8 permute(const vint8& v, const __m256i& index) {
|
||||
__forceinline vint8 permute(const vint8& v, const __m256i& index) {
|
||||
return _mm256_permutevar8x32_epi32(v, index);
|
||||
}
|
||||
|
||||
|
@ -395,8 +401,6 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
|
|||
return _mm256_castps_si256(_mm256_permutevar_ps(_mm256_castsi256_ps(v), index));
|
||||
}
|
||||
|
||||
|
||||
|
||||
template<int i>
|
||||
static __forceinline vint8 align_shift_right(const vint8& a, const vint8& b) {
|
||||
#if defined(__AVX512VL__)
|
||||
|
@ -406,9 +410,6 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
|
|||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -435,9 +436,6 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
|
|||
__forceinline size_t select_min(const vboolf8& valid, const vint8& v) { const vint8 a = select(valid,v,vint8(pos_inf)); return bsf(movemask(valid & (a == vreduce_min(a)))); }
|
||||
__forceinline size_t select_max(const vboolf8& valid, const vint8& v) { const vint8 a = select(valid,v,vint8(neg_inf)); return bsf(movemask(valid & (a == vreduce_max(a)))); }
|
||||
|
||||
|
||||
__forceinline vint8 assign(const vint4& a) { return _mm256_castsi128_si256(a); }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Sorting networks
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -510,3 +508,11 @@ __forceinline vint8 permute(const vint8& v, const __m256i& index) {
|
|||
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
40
thirdparty/embree/common/simd/vllong4_avx2.h
vendored
40
thirdparty/embree/common/simd/vllong4_avx2.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide AVX2 64-bit long long type */
|
||||
|
@ -95,16 +103,6 @@ namespace embree
|
|||
#endif
|
||||
}
|
||||
|
||||
static __forceinline vllong4 broadcast64bit(size_t v) {
|
||||
return _mm256_set1_epi64x(v);
|
||||
}
|
||||
|
||||
static __forceinline size_t extract64bit(const vllong4& v)
|
||||
{
|
||||
return _mm_cvtsi128_si64(_mm256_castsi256_si128(v));
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -276,18 +274,6 @@ namespace embree
|
|||
__forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a <= b); }
|
||||
#endif
|
||||
|
||||
__forceinline void xchg(const vboold4& m, vllong4& a, vllong4& b) {
|
||||
const vllong4 c = a; a = select(m,b,a); b = select(m,c,b);
|
||||
}
|
||||
|
||||
__forceinline vboold4 test(const vllong4& a, const vllong4& b) {
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm256_test_epi64_mask(a,b);
|
||||
#else
|
||||
return _mm256_testz_si256(a,b);
|
||||
#endif
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -356,3 +342,11 @@ namespace embree
|
|||
return cout;
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
59
thirdparty/embree/common/simd/vllong8_avx512.h
vendored
59
thirdparty/embree/common/simd/vllong8_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX-512 64-bit long long type */
|
||||
|
@ -78,7 +86,7 @@ namespace embree
|
|||
return _mm512_load_si512(addr);
|
||||
}
|
||||
|
||||
static __forceinline vllong8 load(const uint8_t* ptr) {
|
||||
static __forceinline vllong8 load(const unsigned char* ptr) {
|
||||
return _mm512_cvtepu8_epi64(*(__m128i*)ptr);
|
||||
}
|
||||
|
||||
|
@ -98,19 +106,6 @@ namespace embree
|
|||
_mm512_mask_store_epi64(addr,mask,v2);
|
||||
}
|
||||
|
||||
/* pass by value to avoid compiler generating inefficient code */
|
||||
static __forceinline void storeu_compact(const vboold8 mask, void* addr, const vllong8& reg) {
|
||||
_mm512_mask_compressstoreu_epi64(addr,mask,reg);
|
||||
}
|
||||
|
||||
static __forceinline vllong8 compact64bit(const vboold8& mask, vllong8& v) {
|
||||
return _mm512_mask_compress_epi64(v,mask,v);
|
||||
}
|
||||
|
||||
static __forceinline vllong8 compact64bit(const vboold8& mask, vllong8& dest, const vllong8& source) {
|
||||
return _mm512_mask_compress_epi64(dest,mask,source);
|
||||
}
|
||||
|
||||
static __forceinline vllong8 compact(const vboold8& mask, vllong8& v) {
|
||||
return _mm512_mask_compress_epi64(v,mask,v);
|
||||
}
|
||||
|
@ -123,16 +118,6 @@ namespace embree
|
|||
return _mm512_mask_expand_epi64(b,mask,a);
|
||||
}
|
||||
|
||||
static __forceinline vllong8 broadcast64bit(size_t v) {
|
||||
return _mm512_set1_epi64(v);
|
||||
}
|
||||
|
||||
static __forceinline size_t extract64bit(const vllong8& v)
|
||||
{
|
||||
return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -271,18 +256,6 @@ namespace embree
|
|||
return _mm512_mask_or_epi64(f,m,t,t);
|
||||
}
|
||||
|
||||
__forceinline void xchg(const vboold8& m, vllong8& a, vllong8& b) {
|
||||
const vllong8 c = a; a = select(m,b,a); b = select(m,c,b);
|
||||
}
|
||||
|
||||
__forceinline vboold8 test(const vboold8& m, const vllong8& a, const vllong8& b) {
|
||||
return _mm512_mask_test_epi64_mask(m,a,b);
|
||||
}
|
||||
|
||||
__forceinline vboold8 test(const vllong8& a, const vllong8& b) {
|
||||
return _mm512_test_epi64_mask(a,b);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -321,10 +294,6 @@ namespace embree
|
|||
return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
|
||||
}
|
||||
|
||||
__forceinline vllong8 zeroExtend32Bit(const __m512i& a) {
|
||||
return _mm512_cvtepu32_epi64(_mm512_castsi512_si256(a));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -379,3 +348,11 @@ namespace embree
|
|||
return cout;
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
55
thirdparty/embree/common/simd/vuint16_avx512.h
vendored
55
thirdparty/embree/common/simd/vuint16_avx512.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 16-wide AVX-512 unsigned integer type */
|
||||
|
@ -83,7 +91,7 @@ namespace embree
|
|||
return _mm512_loadu_si512(addr);
|
||||
}
|
||||
|
||||
static __forceinline vuint16 loadu(const uint8_t* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
|
||||
static __forceinline vuint16 loadu(const unsigned char* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
|
||||
static __forceinline vuint16 loadu(const unsigned short* ptr) { return _mm512_cvtepu16_epi32(_mm256_loadu_si256((__m256i*)ptr)); }
|
||||
|
||||
static __forceinline vuint16 load(const vuint16* addr) {
|
||||
|
@ -113,20 +121,6 @@ namespace embree
|
|||
_mm512_mask_store_epi32(addr,mask,v2);
|
||||
}
|
||||
|
||||
/* pass by value to avoid compiler generating inefficient code */
|
||||
static __forceinline void storeu_compact(const vboolf16 mask, void* addr, const vuint16 reg) {
|
||||
_mm512_mask_compressstoreu_epi32(addr,mask,reg);
|
||||
}
|
||||
|
||||
static __forceinline void storeu_compact_single(const vboolf16 mask, void* addr, vuint16 reg) {
|
||||
//_mm512_mask_compressstoreu_epi32(addr,mask,reg);
|
||||
*(float*)addr = mm512_cvtss_f32(_mm512_mask_compress_ps(_mm512_castsi512_ps(reg),mask,_mm512_castsi512_ps(reg)));
|
||||
}
|
||||
|
||||
static __forceinline vuint16 compact64bit(const vboolf16& mask, vuint16& v) {
|
||||
return _mm512_mask_compress_epi64(v,mask,v);
|
||||
}
|
||||
|
||||
static __forceinline vuint16 compact(const vboolf16& mask, vuint16& v) {
|
||||
return _mm512_mask_compress_epi32(v,mask,v);
|
||||
}
|
||||
|
@ -164,15 +158,6 @@ namespace embree
|
|||
_mm512_mask_i32scatter_epi32((int*)ptr,mask,index,v,scale);
|
||||
}
|
||||
|
||||
static __forceinline vuint16 broadcast64bit(size_t v) {
|
||||
return _mm512_set1_epi64(v);
|
||||
}
|
||||
|
||||
static __forceinline size_t extract64bit(const vuint16& v)
|
||||
{
|
||||
return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Array Access
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -315,18 +300,6 @@ namespace embree
|
|||
return _mm512_mask_or_epi32(f,m,t,t);
|
||||
}
|
||||
|
||||
__forceinline void xchg(const vboolf16& m, vuint16& a, vuint16& b) {
|
||||
const vuint16 c = a; a = select(m,b,a); b = select(m,c,b);
|
||||
}
|
||||
|
||||
__forceinline vboolf16 test(const vboolf16& m, const vuint16& a, const vuint16& b) {
|
||||
return _mm512_mask_test_epi32_mask(m,a,b);
|
||||
}
|
||||
|
||||
__forceinline vboolf16 test(const vuint16& a, const vuint16& b) {
|
||||
return _mm512_test_epi32_mask(a,b);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Movement/Shifting/Shuffling Functions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -441,3 +414,11 @@ namespace embree
|
|||
return cout;
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
149
thirdparty/embree/common/simd/vuint4_sse2.h
vendored
149
thirdparty/embree/common/simd/vuint4_sse2.h
vendored
|
@ -1,10 +1,18 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../math/math.h"
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 4-wide SSE integer type */
|
||||
|
@ -87,64 +95,27 @@ namespace embree
|
|||
static __forceinline void storeu(const vboolf4& mask, void* ptr, const vuint4& i) { storeu(ptr,select(mask,i,loadu(ptr))); }
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
static __forceinline vuint4 load(const uint8_t* ptr) {
|
||||
return _mm_load4epu8_epi32(((__m128i*)ptr));
|
||||
}
|
||||
static __forceinline vuint4 loadu(const uint8_t* ptr) {
|
||||
return _mm_load4epu8_epi32(((__m128i*)ptr));
|
||||
}
|
||||
#elif defined(__SSE4_1__)
|
||||
static __forceinline vuint4 load(const uint8_t* ptr) {
|
||||
#if defined(__SSE4_1__)
|
||||
static __forceinline vuint4 load(const unsigned char* ptr) {
|
||||
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
|
||||
}
|
||||
|
||||
static __forceinline vuint4 loadu(const uint8_t* ptr) {
|
||||
static __forceinline vuint4 loadu(const unsigned char* ptr) {
|
||||
return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static __forceinline vuint4 load(const unsigned short* ptr) {
|
||||
#if defined(__aarch64__)
|
||||
return _mm_load4epu16_epi32(((__m128i*)ptr));
|
||||
#elif defined (__SSE4_1__)
|
||||
#if defined (__SSE4_1__)
|
||||
return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr));
|
||||
#else
|
||||
return vuint4(ptr[0],ptr[1],ptr[2],ptr[3]);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline void store_uint8(uint8_t* ptr, const vuint4& v) {
|
||||
#if defined(__aarch64__)
|
||||
uint32x4_t x = uint32x4_t(v.v);
|
||||
uint16x4_t y = vqmovn_u32(x);
|
||||
uint8x8_t z = vqmovn_u16(vcombine_u16(y, y));
|
||||
vst1_lane_u32((uint32_t *)ptr, uint32x2_t(z), 0);
|
||||
#elif defined(__SSE4_1__)
|
||||
__m128i x = v;
|
||||
x = _mm_packus_epi32(x, x);
|
||||
x = _mm_packus_epi16(x, x);
|
||||
*(unsigned*)ptr = _mm_cvtsi128_si32(x);
|
||||
#else
|
||||
for (size_t i=0;i<4;i++)
|
||||
ptr[i] = (uint8_t)v[i];
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline void store_uint8(unsigned short* ptr, const vuint4& v) {
|
||||
#if defined(__aarch64__)
|
||||
uint32x4_t x = (uint32x4_t)v.v;
|
||||
uint16x4_t y = vqmovn_u32(x);
|
||||
vst1_u16(ptr, y);
|
||||
#else
|
||||
for (size_t i=0;i<4;i++)
|
||||
ptr[i] = (unsigned short)v[i];
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline vuint4 load_nt(void* ptr) {
|
||||
#if (defined(__aarch64__)) || defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
return _mm_stream_load_si128((__m128i*)ptr);
|
||||
#else
|
||||
return _mm_load_si128((__m128i*)ptr);
|
||||
|
@ -152,8 +123,8 @@ namespace embree
|
|||
}
|
||||
|
||||
static __forceinline void store_nt(void* ptr, const vuint4& v) {
|
||||
#if !defined(__aarch64__) && defined(__SSE4_1__)
|
||||
_mm_stream_ps((float*)ptr, _mm_castsi128_ps(v));
|
||||
#if defined(__SSE4_1__)
|
||||
_mm_stream_ps((float*)ptr,_mm_castsi128_ps(v));
|
||||
#else
|
||||
_mm_store_si128((__m128i*)ptr,v);
|
||||
#endif
|
||||
|
@ -161,14 +132,14 @@ namespace embree
|
|||
|
||||
template<int scale = 4>
|
||||
static __forceinline vuint4 gather(const unsigned int* ptr, const vint4& index) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _mm_i32gather_epi32((const int*)ptr, index, scale);
|
||||
#else
|
||||
return vuint4(
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[0]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[1]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[2]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[3]));
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[0]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[1]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[2]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[3]));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -177,13 +148,13 @@ namespace embree
|
|||
vuint4 r = zero;
|
||||
#if defined(__AVX512VL__)
|
||||
return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale);
|
||||
#elif defined(__AVX2__) && !defined(__aarch64__)
|
||||
#elif defined(__AVX2__)
|
||||
return _mm_mask_i32gather_epi32(r, (const int*)ptr, index, mask, scale);
|
||||
#else
|
||||
if (likely(mask[0])) r[0] = *(unsigned int*)(((int8_t*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(unsigned int*)(((int8_t*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(unsigned int*)(((int8_t*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(unsigned int*)(((int8_t*)ptr)+scale*index[3]);
|
||||
if (likely(mask[0])) r[0] = *(unsigned int*)(((char*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(unsigned int*)(((char*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(unsigned int*)(((char*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(unsigned int*)(((char*)ptr)+scale*index[3]);
|
||||
return r;
|
||||
#endif
|
||||
}
|
||||
|
@ -373,25 +344,16 @@ namespace embree
|
|||
__forceinline vuint4 unpacklo(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
|
||||
__forceinline vuint4 unpackhi(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vuint4 shuffle(const vuint4& v) {
|
||||
return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) {
|
||||
return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
|
||||
}
|
||||
#else
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vuint4 shuffle(const vuint4& v) {
|
||||
return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0));
|
||||
}
|
||||
|
||||
template<int i0, int i1, int i2, int i3>
|
||||
__forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) {
|
||||
return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__SSE3__)
|
||||
template<> __forceinline vuint4 shuffle<0, 0, 2, 2>(const vuint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); }
|
||||
template<> __forceinline vuint4 shuffle<1, 1, 3, 3>(const vuint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); }
|
||||
|
@ -403,10 +365,7 @@ namespace embree
|
|||
return shuffle<i,i,i,i>(v);
|
||||
}
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<int src> __forceinline unsigned int extract(const vuint4& b);
|
||||
template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b);
|
||||
#elif defined(__SSE4_1__)
|
||||
#if defined(__SSE4_1__)
|
||||
template<int src> __forceinline unsigned int extract(const vuint4& b) { return _mm_extract_epi32(b, src); }
|
||||
template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { return _mm_insert_epi32(a, b, dst); }
|
||||
#else
|
||||
|
@ -414,50 +373,11 @@ namespace embree
|
|||
template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { vuint4 c = a; c[dst&3] = b; return c; }
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
template<> __forceinline unsigned int extract<0>(const vuint4& b) {
|
||||
return b[0];
|
||||
}
|
||||
template<> __forceinline unsigned int extract<1>(const vuint4& b) {
|
||||
return b[1];
|
||||
}
|
||||
template<> __forceinline unsigned int extract<2>(const vuint4& b) {
|
||||
return b[2];
|
||||
}
|
||||
template<> __forceinline unsigned int extract<3>(const vuint4& b) {
|
||||
return b[3];
|
||||
}
|
||||
|
||||
template<> __forceinline vuint4 insert<0>(const vuint4& a, unsigned b){
|
||||
vuint4 c = a;
|
||||
c[0] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vuint4 insert<1>(const vuint4& a, unsigned b){
|
||||
vuint4 c = a;
|
||||
c[1] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vuint4 insert<2>(const vuint4& a, unsigned b){
|
||||
vuint4 c = a;
|
||||
c[2] = b;
|
||||
return c;
|
||||
}
|
||||
template<> __forceinline vuint4 insert<3>(const vuint4& a, unsigned b){
|
||||
vuint4 c = a;
|
||||
c[3] = b;
|
||||
return c;
|
||||
}
|
||||
|
||||
__forceinline unsigned int toScalar(const vuint4& v) {
|
||||
return v[0];
|
||||
}
|
||||
#else
|
||||
|
||||
template<> __forceinline unsigned int extract<0>(const vuint4& b) { return _mm_cvtsi128_si32(b); }
|
||||
|
||||
__forceinline unsigned int toScalar(const vuint4& v) { return _mm_cvtsi128_si32(v); }
|
||||
#endif
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -497,3 +417,10 @@ namespace embree
|
|||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
99
thirdparty/embree/common/simd/vuint8_avx.h
vendored
99
thirdparty/embree/common/simd/vuint8_avx.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX integer type */
|
||||
|
@ -69,24 +77,20 @@ namespace embree
|
|||
static __forceinline void store (void* ptr, const vuint8& f) { _mm256_store_ps((float*)ptr,_mm256_castsi256_ps(f)); }
|
||||
static __forceinline void storeu(void* ptr, const vuint8& f) { _mm256_storeu_ps((float*)ptr,_mm256_castsi256_ps(f)); }
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
static __forceinline void store (const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
|
||||
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); }
|
||||
#else
|
||||
static __forceinline void store (const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
|
||||
static __forceinline void storeu(const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask.v,_mm256_castsi256_ps(f)); }
|
||||
#endif
|
||||
|
||||
static __forceinline void store_nt(void* ptr, const vuint8& v) {
|
||||
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
|
||||
}
|
||||
|
||||
static __forceinline vuint8 load(const uint8_t* ptr) {
|
||||
static __forceinline vuint8 load(const unsigned char* ptr) {
|
||||
vuint4 il = vuint4::load(ptr+0);
|
||||
vuint4 ih = vuint4::load(ptr+4);
|
||||
return vuint8(il,ih);
|
||||
}
|
||||
|
||||
static __forceinline vuint8 loadu(const uint8_t* ptr) {
|
||||
static __forceinline vuint8 loadu(const unsigned char* ptr) {
|
||||
vuint4 il = vuint4::loadu(ptr+0);
|
||||
vuint4 ih = vuint4::loadu(ptr+4);
|
||||
return vuint8(il,ih);
|
||||
|
@ -104,7 +108,7 @@ namespace embree
|
|||
return vuint8(il,ih);
|
||||
}
|
||||
|
||||
static __forceinline void store(uint8_t* ptr, const vuint8& i) {
|
||||
static __forceinline void store(unsigned char* ptr, const vuint8& i) {
|
||||
vuint4 il(i.vl);
|
||||
vuint4 ih(i.vh);
|
||||
vuint4::store(ptr + 0,il);
|
||||
|
@ -119,54 +123,54 @@ namespace embree
|
|||
template<int scale = 4>
|
||||
static __forceinline vuint8 gather(const unsigned int* ptr, const vint8& index) {
|
||||
return vuint8(
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[0]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[1]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[2]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[3]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[4]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[5]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[6]),
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*index[7]));
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[0]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[1]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[2]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[3]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[4]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[5]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[6]),
|
||||
*(unsigned int*)(((char*)ptr)+scale*index[7]));
|
||||
}
|
||||
|
||||
template<int scale = 4>
|
||||
static __forceinline vuint8 gather(const vboolf8& mask, const unsigned int* ptr, const vint8& index) {
|
||||
vuint8 r = zero;
|
||||
if (likely(mask[0])) r[0] = *(unsigned int*)(((int8_t*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(unsigned int*)(((int8_t*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(unsigned int*)(((int8_t*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(unsigned int*)(((int8_t*)ptr)+scale*index[3]);
|
||||
if (likely(mask[4])) r[4] = *(unsigned int*)(((int8_t*)ptr)+scale*index[4]);
|
||||
if (likely(mask[5])) r[5] = *(unsigned int*)(((int8_t*)ptr)+scale*index[5]);
|
||||
if (likely(mask[6])) r[6] = *(unsigned int*)(((int8_t*)ptr)+scale*index[6]);
|
||||
if (likely(mask[7])) r[7] = *(unsigned int*)(((int8_t*)ptr)+scale*index[7]);
|
||||
if (likely(mask[0])) r[0] = *(unsigned int*)(((char*)ptr)+scale*index[0]);
|
||||
if (likely(mask[1])) r[1] = *(unsigned int*)(((char*)ptr)+scale*index[1]);
|
||||
if (likely(mask[2])) r[2] = *(unsigned int*)(((char*)ptr)+scale*index[2]);
|
||||
if (likely(mask[3])) r[3] = *(unsigned int*)(((char*)ptr)+scale*index[3]);
|
||||
if (likely(mask[4])) r[4] = *(unsigned int*)(((char*)ptr)+scale*index[4]);
|
||||
if (likely(mask[5])) r[5] = *(unsigned int*)(((char*)ptr)+scale*index[5]);
|
||||
if (likely(mask[6])) r[6] = *(unsigned int*)(((char*)ptr)+scale*index[6]);
|
||||
if (likely(mask[7])) r[7] = *(unsigned int*)(((char*)ptr)+scale*index[7]);
|
||||
return r;
|
||||
}
|
||||
|
||||
template<int scale = 4>
|
||||
static __forceinline void scatter(void* ptr, const vint8& ofs, const vuint8& v)
|
||||
{
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(unsigned int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
}
|
||||
|
||||
template<int scale = 4>
|
||||
static __forceinline void scatter(const vboolf8& mask, void* ptr, const vint8& ofs, const vuint8& v)
|
||||
{
|
||||
if (likely(mask[0])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
if (likely(mask[0])) *(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
}
|
||||
|
||||
|
||||
|
@ -294,10 +298,6 @@ namespace embree
|
|||
return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(f), _mm256_castsi256_ps(t), m));
|
||||
}
|
||||
|
||||
__forceinline vuint8 notand(const vboolf8& m, const vuint8& f) {
|
||||
return _mm256_castps_si256(_mm256_andnot_ps(m, _mm256_castsi256_ps(f)));
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Movement/Shifting/Shuffling Functions
|
||||
|
@ -335,7 +335,6 @@ namespace embree
|
|||
template<> __forceinline vuint8 shuffle<1, 1, 3, 3>(const vuint8& v) { return _mm256_castps_si256(_mm256_movehdup_ps(_mm256_castsi256_ps(v))); }
|
||||
template<> __forceinline vuint8 shuffle<0, 1, 0, 1>(const vuint8& v) { return _mm256_castps_si256(_mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(_mm256_castsi256_ps(v))))); }
|
||||
|
||||
__forceinline vuint8 broadcast(const unsigned int* ptr) { return _mm256_castps_si256(_mm256_broadcast_ss((const float*)ptr)); }
|
||||
template<int i> __forceinline vuint8 insert4(const vuint8& a, const vuint4& b) { return _mm256_insertf128_si256(a, b, i); }
|
||||
template<int i> __forceinline vuint4 extract4(const vuint8& a) { return _mm256_extractf128_si256(a, i); }
|
||||
template<> __forceinline vuint4 extract4<0>(const vuint8& a) { return _mm256_castsi256_si128(a); }
|
||||
|
@ -377,3 +376,11 @@ namespace embree
|
|||
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
67
thirdparty/embree/common/simd/vuint8_avx2.h
vendored
67
thirdparty/embree/common/simd/vuint8_avx2.h
vendored
|
@ -1,8 +1,16 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
||||
#define vboolf vboolf_impl
|
||||
#define vboold vboold_impl
|
||||
#define vint vint_impl
|
||||
#define vuint vuint_impl
|
||||
#define vllong vllong_impl
|
||||
#define vfloat vfloat_impl
|
||||
#define vdouble vdouble_impl
|
||||
|
||||
namespace embree
|
||||
{
|
||||
/* 8-wide AVX integer type */
|
||||
|
@ -66,8 +74,8 @@ namespace embree
|
|||
/// Loads and Stores
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static __forceinline vuint8 load(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vuint8 loadu(const uint8_t* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vuint8 load(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vuint8 loadu(const unsigned char* ptr) { return _mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); }
|
||||
static __forceinline vuint8 load(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_load_si128((__m128i*)ptr)); }
|
||||
static __forceinline vuint8 loadu(const unsigned short* ptr) { return _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr)); }
|
||||
|
||||
|
@ -107,7 +115,7 @@ namespace embree
|
|||
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v));
|
||||
}
|
||||
|
||||
static __forceinline void store(uint8_t* ptr, const vuint8& i)
|
||||
static __forceinline void store(unsigned char* ptr, const vuint8& i)
|
||||
{
|
||||
for (size_t j=0; j<8; j++)
|
||||
ptr[j] = i[j];
|
||||
|
@ -139,14 +147,14 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm256_i32scatter_epi32((int*)ptr, ofs, v, scale);
|
||||
#else
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[0]) = v[0];
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[1]) = v[1];
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[2]) = v[2];
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[3]) = v[3];
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[4]) = v[4];
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[5]) = v[5];
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[6]) = v[6];
|
||||
*(unsigned int*)(((int8_t*)ptr) + scale * ofs[7]) = v[7];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
*(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -156,14 +164,14 @@ namespace embree
|
|||
#if defined(__AVX512VL__)
|
||||
_mm256_mask_i32scatter_epi32((int*)ptr, mask, ofs, v, scale);
|
||||
#else
|
||||
if (likely(mask[0])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(unsigned int*)(((int8_t*)ptr)+scale*ofs[7]) = v[7];
|
||||
if (likely(mask[0])) *(unsigned int*)(((char*)ptr)+scale*ofs[0]) = v[0];
|
||||
if (likely(mask[1])) *(unsigned int*)(((char*)ptr)+scale*ofs[1]) = v[1];
|
||||
if (likely(mask[2])) *(unsigned int*)(((char*)ptr)+scale*ofs[2]) = v[2];
|
||||
if (likely(mask[3])) *(unsigned int*)(((char*)ptr)+scale*ofs[3]) = v[3];
|
||||
if (likely(mask[4])) *(unsigned int*)(((char*)ptr)+scale*ofs[4]) = v[4];
|
||||
if (likely(mask[5])) *(unsigned int*)(((char*)ptr)+scale*ofs[5]) = v[5];
|
||||
if (likely(mask[6])) *(unsigned int*)(((char*)ptr)+scale*ofs[6]) = v[6];
|
||||
if (likely(mask[7])) *(unsigned int*)(((char*)ptr)+scale*ofs[7]) = v[7];
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -371,16 +379,12 @@ namespace embree
|
|||
template<> __forceinline vuint8 shuffle<1, 1, 3, 3>(const vuint8& v) { return _mm256_castps_si256(_mm256_movehdup_ps(_mm256_castsi256_ps(v))); }
|
||||
template<> __forceinline vuint8 shuffle<0, 1, 0, 1>(const vuint8& v) { return _mm256_castps_si256(_mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(_mm256_castsi256_ps(v))))); }
|
||||
|
||||
__forceinline vuint8 broadcast(const unsigned int* ptr) { return _mm256_castps_si256(_mm256_broadcast_ss((const float*)ptr)); }
|
||||
|
||||
template<int i> __forceinline vuint8 insert4(const vuint8& a, const vuint4& b) { return _mm256_insertf128_si256(a, b, i); }
|
||||
template<int i> __forceinline vuint4 extract4(const vuint8& a) { return _mm256_extractf128_si256(a, i); }
|
||||
template<> __forceinline vuint4 extract4<0>(const vuint8& a) { return _mm256_castsi256_si128(a); }
|
||||
|
||||
__forceinline int toScalar(const vuint8& v) { return _mm_cvtsi128_si32(_mm256_castsi256_si128(v)); }
|
||||
|
||||
#if !defined(__aarch64__)
|
||||
|
||||
__forceinline vuint8 permute(const vuint8& v, const __m256i& index) {
|
||||
return _mm256_permutevar8x32_epi32(v, index);
|
||||
}
|
||||
|
@ -396,10 +400,7 @@ namespace embree
|
|||
#else
|
||||
return _mm256_alignr_epi8(a, b, 4*i);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Reductions
|
||||
|
@ -427,8 +428,6 @@ namespace embree
|
|||
//__forceinline size_t select_min(const vboolf8& valid, const vuint8& v) { const vuint8 a = select(valid,v,vuint8(pos_inf)); return bsf(movemask(valid & (a == vreduce_min(a)))); }
|
||||
//__forceinline size_t select_max(const vboolf8& valid, const vuint8& v) { const vuint8 a = select(valid,v,vuint8(neg_inf)); return bsf(movemask(valid & (a == vreduce_max(a)))); }
|
||||
|
||||
__forceinline vuint8 assign(const vuint4& a) { return _mm256_castsi128_si256(a); }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Output Operators
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -437,3 +436,11 @@ namespace embree
|
|||
return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ", " << a[4] << ", " << a[5] << ", " << a[6] << ", " << a[7] << ">";
|
||||
}
|
||||
}
|
||||
|
||||
#undef vboolf
|
||||
#undef vboold
|
||||
#undef vint
|
||||
#undef vuint
|
||||
#undef vllong
|
||||
#undef vfloat
|
||||
#undef vdouble
|
||||
|
|
4
thirdparty/embree/common/sys/alloc.cpp
vendored
4
thirdparty/embree/common/sys/alloc.cpp
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "alloc.h"
|
||||
|
@ -23,7 +23,7 @@ namespace embree
|
|||
if (size != 0 && ptr == nullptr)
|
||||
// -- GODOT start --
|
||||
// throw std::bad_alloc();
|
||||
abort();
|
||||
abort();
|
||||
// -- GODOT end --
|
||||
|
||||
return ptr;
|
||||
|
|
2
thirdparty/embree/common/sys/alloc.h
vendored
2
thirdparty/embree/common/sys/alloc.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
6
thirdparty/embree/common/sys/array.h
vendored
6
thirdparty/embree/common/sys/array.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -139,7 +139,7 @@ namespace embree
|
|||
__forceinline Ty& operator[](const unsigned i) { assert(i<N); return data[i]; }
|
||||
__forceinline const Ty& operator[](const unsigned i) const { assert(i<N); return data[i]; }
|
||||
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
#if defined(__64BIT__)
|
||||
__forceinline Ty& operator[](const size_t i) { assert(i<N); return data[i]; }
|
||||
__forceinline const Ty& operator[](const size_t i) const { assert(i<N); return data[i]; }
|
||||
#endif
|
||||
|
@ -196,7 +196,7 @@ namespace embree
|
|||
__forceinline Ty& operator[](const int i) { assert(i>=0 && i<max_total_elements); resize(i+1); return data[i]; }
|
||||
__forceinline Ty& operator[](const unsigned i) { assert(i<max_total_elements); resize(i+1); return data[i]; }
|
||||
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
#if defined(__64BIT__)
|
||||
__forceinline Ty& operator[](const size_t i) { assert(i<max_total_elements); resize(i+1); return data[i]; }
|
||||
#endif
|
||||
|
||||
|
|
2
thirdparty/embree/common/sys/atomic.h
vendored
2
thirdparty/embree/common/sys/atomic.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/sys/barrier.cpp
vendored
2
thirdparty/embree/common/sys/barrier.cpp
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "barrier.h"
|
||||
|
|
2
thirdparty/embree/common/sys/barrier.h
vendored
2
thirdparty/embree/common/sys/barrier.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
16
thirdparty/embree/common/sys/condition.cpp
vendored
16
thirdparty/embree/common/sys/condition.cpp
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "condition.h"
|
||||
|
@ -40,19 +40,23 @@ namespace embree
|
|||
struct ConditionImplementation
|
||||
{
|
||||
__forceinline ConditionImplementation () {
|
||||
pthread_cond_init(&cond,nullptr);
|
||||
if (pthread_cond_init(&cond,nullptr) != 0)
|
||||
THROW_RUNTIME_ERROR("pthread_cond_init failed");
|
||||
}
|
||||
|
||||
__forceinline ~ConditionImplementation() {
|
||||
pthread_cond_destroy(&cond);
|
||||
}
|
||||
MAYBE_UNUSED bool ok = pthread_cond_destroy(&cond) == 0;
|
||||
assert(ok);
|
||||
}
|
||||
|
||||
__forceinline void wait(MutexSys& mutex) {
|
||||
pthread_cond_wait(&cond, (pthread_mutex_t*)mutex.mutex);
|
||||
if (pthread_cond_wait(&cond, (pthread_mutex_t*)mutex.mutex) != 0)
|
||||
THROW_RUNTIME_ERROR("pthread_cond_wait failed");
|
||||
}
|
||||
|
||||
__forceinline void notify_all() {
|
||||
pthread_cond_broadcast(&cond);
|
||||
if (pthread_cond_broadcast(&cond) != 0)
|
||||
THROW_RUNTIME_ERROR("pthread_cond_broadcast failed");
|
||||
}
|
||||
|
||||
public:
|
||||
|
|
2
thirdparty/embree/common/sys/condition.h
vendored
2
thirdparty/embree/common/sys/condition.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
|
2
thirdparty/embree/common/sys/filename.cpp
vendored
2
thirdparty/embree/common/sys/filename.cpp
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#include "filename.h"
|
||||
|
|
4
thirdparty/embree/common/sys/filename.h
vendored
4
thirdparty/embree/common/sys/filename.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -73,7 +73,7 @@ namespace embree
|
|||
friend bool operator!=(const FileName& a, const FileName& b);
|
||||
|
||||
/*! output operator */
|
||||
friend embree_ostream operator<<(embree_ostream cout, const FileName& filename);
|
||||
friend std::ostream& operator<<(std::ostream& cout, const FileName& filename);
|
||||
|
||||
private:
|
||||
std::string filename;
|
||||
|
|
380
thirdparty/embree/common/sys/intrinsics.h
vendored
380
thirdparty/embree/common/sys/intrinsics.h
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2009-2020 Intel Corporation
|
||||
// Copyright 2009-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#pragma once
|
||||
|
@ -10,10 +10,7 @@
|
|||
#endif
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
#include "../math/SSE2NEON.h"
|
||||
#if defined(NEON_AVX2_EMULATION)
|
||||
#include "../math/AVX2NEON.h"
|
||||
#endif
|
||||
#include "../simd/arm/emulation.h"
|
||||
#else
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
|
@ -27,14 +24,6 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__)
|
||||
#if !defined(_lzcnt_u32)
|
||||
#define _lzcnt_u32 __builtin_clz
|
||||
#endif
|
||||
#if !defined(_lzcnt_u32)
|
||||
#define _lzcnt_u32 __builtin_clzll
|
||||
#endif
|
||||
#else
|
||||
#if defined(__LZCNT__)
|
||||
#if !defined(_lzcnt_u32)
|
||||
#define _lzcnt_u32 __lzcnt32
|
||||
|
@ -43,13 +32,16 @@
|
|||
#define _lzcnt_u64 __lzcnt64
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__WIN32__)
|
||||
# ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# endif
|
||||
# include <windows.h>
|
||||
// -- GODOT start --
|
||||
#if !defined(NOMINMAX)
|
||||
// -- GODOT end --
|
||||
#define NOMINMAX
|
||||
// -- GODOT start --
|
||||
#endif
|
||||
#include "windows.h"
|
||||
// -- GODOT end --
|
||||
#endif
|
||||
|
||||
/* normally defined in pmmintrin.h, but we always need this */
|
||||
|
@ -62,133 +54,133 @@
|
|||
|
||||
namespace embree
|
||||
{
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Windows Platform
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
#if defined(__WIN32__)
|
||||
|
||||
__forceinline size_t read_tsc()
|
||||
|
||||
__forceinline size_t read_tsc()
|
||||
{
|
||||
LARGE_INTEGER li;
|
||||
QueryPerformanceCounter(&li);
|
||||
return (size_t)li.QuadPart;
|
||||
}
|
||||
|
||||
|
||||
__forceinline int bsf(int v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _tzcnt_u32(v);
|
||||
#else
|
||||
unsigned long r = 0; _BitScanForward(&r,v); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
__forceinline unsigned bsf(unsigned v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _tzcnt_u32(v);
|
||||
#else
|
||||
unsigned long r = 0; _BitScanForward(&r,v); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if defined(__X86_64__)
|
||||
__forceinline size_t bsf(size_t v) {
|
||||
#if defined(__AVX2__)
|
||||
#if defined(__AVX2__)
|
||||
return _tzcnt_u64(v);
|
||||
#else
|
||||
unsigned long r = 0; _BitScanForward64(&r,v); return r;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
__forceinline int bscf(int& v)
|
||||
|
||||
__forceinline int bscf(int& v)
|
||||
{
|
||||
int i = bsf(v);
|
||||
v &= v-1;
|
||||
return i;
|
||||
}
|
||||
|
||||
__forceinline unsigned bscf(unsigned& v)
|
||||
|
||||
__forceinline unsigned bscf(unsigned& v)
|
||||
{
|
||||
unsigned i = bsf(v);
|
||||
v &= v-1;
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
#if defined(__X86_64__)
|
||||
__forceinline size_t bscf(size_t& v)
|
||||
__forceinline size_t bscf(size_t& v)
|
||||
{
|
||||
size_t i = bsf(v);
|
||||
v &= v-1;
|
||||
return i;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline int bsr(int v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return 31 - _lzcnt_u32(v);
|
||||
#else
|
||||
unsigned long r = 0; _BitScanReverse(&r,v); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
__forceinline unsigned bsr(unsigned v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return 31 - _lzcnt_u32(v);
|
||||
#else
|
||||
unsigned long r = 0; _BitScanReverse(&r,v); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if defined(__X86_64__)
|
||||
__forceinline size_t bsr(size_t v) {
|
||||
#if defined(__AVX2__)
|
||||
#if defined(__AVX2__)
|
||||
return 63 -_lzcnt_u64(v);
|
||||
#else
|
||||
unsigned long r = 0; _BitScanReverse64(&r, v); return r;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline int lzcnt(const int x)
|
||||
{
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _lzcnt_u32(x);
|
||||
#else
|
||||
if (unlikely(x == 0)) return 32;
|
||||
return 31 - bsr(x);
|
||||
return 31 - bsr(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
__forceinline int btc(int v, int i) {
|
||||
long r = v; _bittestandcomplement(&r,i); return r;
|
||||
}
|
||||
|
||||
|
||||
__forceinline int bts(int v, int i) {
|
||||
long r = v; _bittestandset(&r,i); return r;
|
||||
}
|
||||
|
||||
|
||||
__forceinline int btr(int v, int i) {
|
||||
long r = v; _bittestandreset(&r,i); return r;
|
||||
}
|
||||
|
||||
|
||||
#if defined(__X86_64__)
|
||||
|
||||
|
||||
__forceinline size_t btc(size_t v, size_t i) {
|
||||
size_t r = v; _bittestandcomplement64((__int64*)&r,i); return r;
|
||||
}
|
||||
|
||||
|
||||
__forceinline size_t bts(size_t v, size_t i) {
|
||||
__int64 r = v; _bittestandset64(&r,i); return r;
|
||||
}
|
||||
|
||||
|
||||
__forceinline size_t btr(size_t v, size_t i) {
|
||||
__int64 r = v; _bittestandreset64(&r,i); return r;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline int32_t atomic_cmpxchg(volatile int32_t* p, const int32_t c, const int32_t v) {
|
||||
return _InterlockedCompareExchange((volatile long*)p,v,c);
|
||||
}
|
||||
|
@ -196,174 +188,160 @@ namespace embree
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// Unix Platform
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
#else
|
||||
|
||||
|
||||
#if defined(__i386__) && defined(__PIC__)
|
||||
|
||||
__forceinline void __cpuid(int out[4], int op)
|
||||
|
||||
__forceinline void __cpuid(int out[4], int op)
|
||||
{
|
||||
asm volatile ("xchg{l}\t{%%}ebx, %1\n\t"
|
||||
"cpuid\n\t"
|
||||
"xchg{l}\t{%%}ebx, %1\n\t"
|
||||
: "=a"(out[0]), "=r"(out[1]), "=c"(out[2]), "=d"(out[3])
|
||||
: "0"(op));
|
||||
: "=a"(out[0]), "=r"(out[1]), "=c"(out[2]), "=d"(out[3])
|
||||
: "0"(op));
|
||||
}
|
||||
|
||||
__forceinline void __cpuid_count(int out[4], int op1, int op2)
|
||||
|
||||
__forceinline void __cpuid_count(int out[4], int op1, int op2)
|
||||
{
|
||||
asm volatile ("xchg{l}\t{%%}ebx, %1\n\t"
|
||||
"cpuid\n\t"
|
||||
"xchg{l}\t{%%}ebx, %1\n\t"
|
||||
: "=a" (out[0]), "=r" (out[1]), "=c" (out[2]), "=d" (out[3])
|
||||
: "0" (op1), "2" (op2));
|
||||
: "0" (op1), "2" (op2));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#elif defined(__X86_ASM__)
|
||||
|
||||
__forceinline void __cpuid(int out[4], int op) {
|
||||
#if defined(__ARM_NEON)
|
||||
if (op == 0) { // Get CPU name
|
||||
out[0] = 0x41524d20;
|
||||
out[1] = 0x41524d20;
|
||||
out[2] = 0x41524d20;
|
||||
out[3] = 0x41524d20;
|
||||
}
|
||||
#else
|
||||
asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op));
|
||||
#endif
|
||||
asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op));
|
||||
}
|
||||
|
||||
#if !defined(__ARM_NEON)
|
||||
|
||||
__forceinline void __cpuid_count(int out[4], int op1, int op2) {
|
||||
asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op1), "c"(op2));
|
||||
asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op1), "c"(op2));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline uint64_t read_tsc() {
|
||||
#if defined(__ARM_NEON)
|
||||
return 0; // FIXME(LTE): mimic rdtsc
|
||||
#else
|
||||
#if defined(__X86_ASM__)
|
||||
uint32_t high,low;
|
||||
asm volatile ("rdtsc" : "=d"(high), "=a"(low));
|
||||
return (((uint64_t)high) << 32) + (uint64_t)low;
|
||||
#else
|
||||
/* Not supported yet, meaning measuring traversal cost per pixel does not work. */
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
__forceinline int bsf(int v) {
|
||||
#if defined(__ARM_NEON)
|
||||
return __builtin_ctz(v);
|
||||
#else
|
||||
#if defined(__AVX2__)
|
||||
#if defined(__AVX2__)
|
||||
return _tzcnt_u32(v);
|
||||
#else
|
||||
#elif defined(__X86_ASM__)
|
||||
int r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
__forceinline unsigned bsf(unsigned v)
|
||||
{
|
||||
#if defined(__ARM_NEON)
|
||||
#else
|
||||
return __builtin_ctz(v);
|
||||
#else
|
||||
#if defined(__AVX2__)
|
||||
return _tzcnt_u32(v);
|
||||
#else
|
||||
unsigned r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__64BIT__)
|
||||
__forceinline unsigned bsf(unsigned v)
|
||||
{
|
||||
#if defined(__AVX2__)
|
||||
return _tzcnt_u32(v);
|
||||
#elif defined(__X86_ASM__)
|
||||
unsigned r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
|
||||
#else
|
||||
return __builtin_ctz(v);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline size_t bsf(size_t v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
#if defined(__X86_64__)
|
||||
return _tzcnt_u64(v);
|
||||
#else
|
||||
return _tzcnt_u32(v);
|
||||
#endif
|
||||
#elif defined(__ARM_NEON)
|
||||
return __builtin_ctzl(v);
|
||||
#else
|
||||
#elif defined(__X86_ASM__)
|
||||
size_t r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r;
|
||||
#else
|
||||
return __builtin_ctzl(v);
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline int bscf(int& v)
|
||||
__forceinline int bscf(int& v)
|
||||
{
|
||||
int i = bsf(v);
|
||||
v &= v-1;
|
||||
return i;
|
||||
}
|
||||
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
__forceinline unsigned int bscf(unsigned int& v)
|
||||
|
||||
#if defined(__64BIT__)
|
||||
__forceinline unsigned int bscf(unsigned int& v)
|
||||
{
|
||||
unsigned int i = bsf(v);
|
||||
v &= v-1;
|
||||
return i;
|
||||
}
|
||||
#endif
|
||||
|
||||
__forceinline size_t bscf(size_t& v)
|
||||
|
||||
__forceinline size_t bscf(size_t& v)
|
||||
{
|
||||
size_t i = bsf(v);
|
||||
v &= v-1;
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
__forceinline int bsr(int v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return 31 - _lzcnt_u32(v);
|
||||
#elif defined(__ARM_NEON)
|
||||
return __builtin_clz(v)^31;
|
||||
#else
|
||||
#elif defined(__X86_ASM__)
|
||||
int r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__X86_64__) || defined(__aarch64__)
|
||||
__forceinline unsigned bsr(unsigned v) {
|
||||
#if defined(__AVX2__)
|
||||
return 31 - _lzcnt_u32(v);
|
||||
#elif defined(__ARM_NEON)
|
||||
return __builtin_clz(v)^31;
|
||||
#else
|
||||
return __builtin_clz(v) ^ 31;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__64BIT__)
|
||||
__forceinline unsigned bsr(unsigned v) {
|
||||
#if defined(__AVX2__)
|
||||
return 31 - _lzcnt_u32(v);
|
||||
#elif defined(__X86_ASM__)
|
||||
unsigned r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
|
||||
#else
|
||||
return __builtin_clz(v) ^ 31;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__forceinline size_t bsr(size_t v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
#if defined(__X86_64__)
|
||||
return 63 - _lzcnt_u64(v);
|
||||
#else
|
||||
return 31 - _lzcnt_u32(v);
|
||||
#endif
|
||||
#elif defined(__aarch64__)
|
||||
return (sizeof(v) * 8 - 1) - __builtin_clzl(v);
|
||||
#else
|
||||
#elif defined(__X86_ASM__)
|
||||
size_t r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r;
|
||||
#else
|
||||
return (sizeof(v) * 8 - 1) - __builtin_clzl(v);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
__forceinline int lzcnt(const int x)
|
||||
{
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
return _lzcnt_u32(x);
|
||||
#else
|
||||
if (unlikely(x == 0)) return 32;
|
||||
return 31 - bsr(x);
|
||||
return 31 - bsr(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline size_t blsr(size_t v) {
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
#if defined(__INTEL_COMPILER)
|
||||
return _blsr_u64(v);
|
||||
#else
|
||||
|
@ -377,79 +355,65 @@ namespace embree
|
|||
return v & (v-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
__forceinline int btc(int v, int i) {
|
||||
#if defined(__aarch64__)
|
||||
// _bittestandcomplement(long *a, long b) {
|
||||
// unsigned char x = (*a >> b) & 1;
|
||||
// *a = *a ^ (1 << b);
|
||||
// return x;
|
||||
|
||||
// We only need `*a`
|
||||
return (v ^ (1 << i));
|
||||
#else
|
||||
#if defined(__X86_ASM__)
|
||||
int r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline int bts(int v, int i) {
|
||||
#if defined(__aarch64__)
|
||||
// _bittestandset(long *a, long b) {
|
||||
// unsigned char x = (*a >> b) & 1;
|
||||
// *a = *a | (1 << b);
|
||||
// return x;
|
||||
return (v | (v << i));
|
||||
#else
|
||||
int r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline int btr(int v, int i) {
|
||||
#if defined(__aarch64__)
|
||||
// _bittestandreset(long *a, long b) {
|
||||
// unsigned char x = (*a >> b) & 1;
|
||||
// *a = *a & ~(1 << b);
|
||||
// return x;
|
||||
return (v & ~(v << i));
|
||||
#else
|
||||
int r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline size_t btc(size_t v, size_t i) {
|
||||
#if defined(__aarch64__)
|
||||
return (v ^ (1 << i));
|
||||
#else
|
||||
size_t r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline size_t bts(size_t v, size_t i) {
|
||||
#if defined(__aarch64__)
|
||||
|
||||
__forceinline int bts(int v, int i) {
|
||||
#if defined(__X86_ASM__)
|
||||
int r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
|
||||
#else
|
||||
return (v | (v << i));
|
||||
#else
|
||||
size_t r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline size_t btr(size_t v, size_t i) {
|
||||
#if defined(__ARM_NEON)
|
||||
return (v & ~(v << i));
|
||||
|
||||
__forceinline int btr(int v, int i) {
|
||||
#if defined(__X86_ASM__)
|
||||
int r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
|
||||
#else
|
||||
return (v & ~(v << i));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline size_t btc(size_t v, size_t i) {
|
||||
#if defined(__X86_ASM__)
|
||||
size_t r = 0; asm ("btc %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags" ); return r;
|
||||
#else
|
||||
return (v ^ (1 << i));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline size_t bts(size_t v, size_t i) {
|
||||
#if defined(__X86_ASM__)
|
||||
size_t r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
|
||||
#else
|
||||
return (v | (v << i));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline size_t btr(size_t v, size_t i) {
|
||||
#if defined(__X86_ASM__)
|
||||
size_t r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r;
|
||||
#else
|
||||
return (v & ~(v << i));
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline int32_t atomic_cmpxchg(int32_t volatile* value, int32_t comparand, const int32_t input) {
|
||||
return __sync_val_compare_and_swap(value, comparand, input);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// All Platforms
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
#if !defined(_mm_undefined_ps)
|
||||
__forceinline __m128 _mm_undefined_ps() { return _mm_setzero_ps(); }
|
||||
|
@ -471,39 +435,41 @@ namespace embree
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__SSE4_2__) || defined(__ARM_NEON)
|
||||
|
||||
#if defined(__SSE4_2__)
|
||||
|
||||
__forceinline int popcnt(int in) {
|
||||
return _mm_popcnt_u32(in);
|
||||
}
|
||||
|
||||
|
||||
__forceinline unsigned popcnt(unsigned in) {
|
||||
return _mm_popcnt_u32(in);
|
||||
}
|
||||
|
||||
#if defined(__X86_64__) || defined(__ARM_NEON)
|
||||
|
||||
#if defined(__64BIT__)
|
||||
__forceinline size_t popcnt(size_t in) {
|
||||
return _mm_popcnt_u64(in);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__X86_ASM__)
|
||||
__forceinline uint64_t rdtsc()
|
||||
{
|
||||
int dummy[4];
|
||||
__cpuid(dummy,0);
|
||||
uint64_t clock = read_tsc();
|
||||
__cpuid(dummy,0);
|
||||
int dummy[4];
|
||||
__cpuid(dummy,0);
|
||||
uint64_t clock = read_tsc();
|
||||
__cpuid(dummy,0);
|
||||
return clock;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
__forceinline void pause_cpu(const size_t N = 8)
|
||||
{
|
||||
for (size_t i=0; i<N; i++)
|
||||
_mm_pause();
|
||||
_mm_pause();
|
||||
}
|
||||
|
||||
|
||||
/* prefetches */
|
||||
__forceinline void prefetchL1 (const void* ptr) { _mm_prefetch((const char*)ptr,_MM_HINT_T0); }
|
||||
__forceinline void prefetchL2 (const void* ptr) { _mm_prefetch((const char*)ptr,_MM_HINT_T1); }
|
||||
|
@ -513,18 +479,18 @@ namespace embree
|
|||
#if defined(__INTEL_COMPILER)
|
||||
_mm_prefetch((const char*)ptr,_MM_HINT_ET0);
|
||||
#else
|
||||
_mm_prefetch((const char*)ptr,_MM_HINT_T0);
|
||||
_mm_prefetch((const char*)ptr,_MM_HINT_T0);
|
||||
#endif
|
||||
}
|
||||
|
||||
__forceinline void prefetchL1EX(const void* ptr) {
|
||||
prefetchEX(ptr);
|
||||
__forceinline void prefetchL1EX(const void* ptr) {
|
||||
prefetchEX(ptr);
|
||||
}
|
||||
|
||||
__forceinline void prefetchL2EX(const void* ptr) {
|
||||
prefetchEX(ptr);
|
||||
|
||||
__forceinline void prefetchL2EX(const void* ptr) {
|
||||
prefetchEX(ptr);
|
||||
}
|
||||
#if defined(__AVX2__) && !defined(__aarch64__)
|
||||
#if defined(__AVX2__)
|
||||
__forceinline unsigned int pext(unsigned int a, unsigned int b) { return _pext_u32(a, b); }
|
||||
__forceinline unsigned int pdep(unsigned int a, unsigned int b) { return _pdep_u32(a, b); }
|
||||
#if defined(__X86_64__)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue