vhacd: Recommit unmodified upstream code without style changes
Godot-specific changes will then be redone without touching upstream formatting. Also documented current state in thirdparty/README.md and added LICENSE. Add vhacd to COPYRIGHT.txt.
This commit is contained in:
parent
7f2ad8bd3f
commit
531b158897
11 changed files with 3302 additions and 3050 deletions
|
@ -385,6 +385,12 @@ Copyright: 2014-2018, Syoyo Fujita
|
|||
2002, Industrial Light & Magic, a division of Lucas Digital Ltd. LLC
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: ./thirdparty/vhacd/
|
||||
Comment: V-HACD
|
||||
Copyright: 2011, Khaled Mamou
|
||||
2003-2009, Erwin Coumans
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: ./thirdparty/zlib/
|
||||
Comment: zlib
|
||||
Copyright: 1995-2017, Jean-loup Gailly and Mark Adler
|
||||
|
|
21
thirdparty/README.md
vendored
21
thirdparty/README.md
vendored
|
@ -1,5 +1,6 @@
|
|||
# Third party libraries
|
||||
|
||||
|
||||
## assimp
|
||||
|
||||
- Upstream: http://github.com/assimp/assimp
|
||||
|
@ -294,8 +295,12 @@ Godot build configurations, check them out when updating.
|
|||
File extracted from upstream release tarball `mbedtls-2.16.0-apache.tgz`:
|
||||
- All `*.h` from `include/mbedtls/` to `thirdparty/mbedtls/include/mbedtls/`
|
||||
- All `*.c` from `library/` to `thirdparty/mbedtls/library/`
|
||||
- Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453). Soon to be merged upstream. Check it out at next update.
|
||||
- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA padlock support which defines a symbol `unsupported` which clashses with a symbol in libwebsockets.
|
||||
- Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453).
|
||||
Soon to be merged upstream. Check it out at next update.
|
||||
- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA
|
||||
padlock support which defines a symbol `unsupported` which clashes with
|
||||
a symbol in libwebsockets.
|
||||
|
||||
|
||||
## miniupnpc
|
||||
|
||||
|
@ -523,6 +528,18 @@ Files extracted from upstream source:
|
|||
- `tinyexr.{cc,h}`
|
||||
|
||||
|
||||
## vhacd
|
||||
|
||||
- Upstream: https://github.com/kmammou/v-hacd
|
||||
- Version: git (2297aa1, 2018)
|
||||
- License: BSD-3-Clause
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
||||
- From `src/VHACD_Lib/`: `inc`, `public` and `src`
|
||||
- `LICENSE`
|
||||
|
||||
|
||||
## zlib
|
||||
|
||||
- Upstream: http://www.zlib.net
|
||||
|
|
29
thirdparty/vhacd/LICENSE
vendored
Normal file
29
thirdparty/vhacd/LICENSE
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2011, Khaled Mamou (kmamou at gmail dot com)
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
15
thirdparty/vhacd/inc/btAlignedAllocator.h
vendored
15
thirdparty/vhacd/inc/btAlignedAllocator.h
vendored
|
@ -21,11 +21,6 @@ subject to the following restrictions:
|
|||
///that is better portable and more predictable
|
||||
|
||||
#include "btScalar.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
//#define BT_DEBUG_MEMORY_ALLOCATIONS 1
|
||||
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
|
@ -84,12 +79,14 @@ public:
|
|||
|
||||
pointer address(reference ref) const { return &ref; }
|
||||
const_pointer address(const_reference ref) const { return &ref; }
|
||||
pointer allocate(size_type n, const_pointer *hint = 0) {
|
||||
pointer allocate(size_type n, const_pointer* hint = 0)
|
||||
{
|
||||
(void)hint;
|
||||
return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment));
|
||||
}
|
||||
void construct(pointer ptr, const value_type& value) { new (ptr) value_type(value); }
|
||||
void deallocate(pointer ptr) {
|
||||
void deallocate(pointer ptr)
|
||||
{
|
||||
btAlignedFree(reinterpret_cast<void*>(ptr));
|
||||
}
|
||||
void destroy(pointer ptr) { ptr->~value_type(); }
|
||||
|
@ -104,8 +101,4 @@ public:
|
|||
friend bool operator==(const self_type&, const self_type&) { return true; }
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
}; // namespace VHACD
|
||||
//
|
||||
|
||||
#endif //BT_ALIGNED_ALLOCATOR
|
||||
|
|
116
thirdparty/vhacd/inc/btAlignedObjectArray.h
vendored
116
thirdparty/vhacd/inc/btAlignedObjectArray.h
vendored
|
@ -38,10 +38,6 @@ subject to the following restrictions:
|
|||
#include <new> //for placement new
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods
|
||||
///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data
|
||||
template <typename T>
|
||||
|
@ -57,7 +53,8 @@ class btAlignedObjectArray {
|
|||
|
||||
#ifdef BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
public:
|
||||
SIMD_FORCE_INLINE btAlignedObjectArray<T> &operator=(const btAlignedObjectArray<T> &other) {
|
||||
SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other)
|
||||
{
|
||||
copyFromArray(other);
|
||||
return *this;
|
||||
}
|
||||
|
@ -67,10 +64,12 @@ private:
|
|||
#endif //BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
|
||||
protected:
|
||||
SIMD_FORCE_INLINE int32_t allocSize(int32_t size) {
|
||||
SIMD_FORCE_INLINE int32_t allocSize(int32_t size)
|
||||
{
|
||||
return (size ? size * 2 : 1);
|
||||
}
|
||||
SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T *dest) const {
|
||||
SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const
|
||||
{
|
||||
int32_t i;
|
||||
for (i = start; i < end; ++i)
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
|
@ -80,27 +79,31 @@ protected:
|
|||
#endif //BT_USE_PLACEMENT_NEW
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void init() {
|
||||
SIMD_FORCE_INLINE void init()
|
||||
{
|
||||
//PCK: added this line
|
||||
m_ownsMemory = true;
|
||||
m_data = 0;
|
||||
m_size = 0;
|
||||
m_capacity = 0;
|
||||
}
|
||||
SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) {
|
||||
SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last)
|
||||
{
|
||||
int32_t i;
|
||||
for (i = first; i < last; i++) {
|
||||
m_data[i].~T();
|
||||
}
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void *allocate(int32_t size) {
|
||||
SIMD_FORCE_INLINE void* allocate(int32_t size)
|
||||
{
|
||||
if (size)
|
||||
return m_allocator.allocate(size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void deallocate() {
|
||||
SIMD_FORCE_INLINE void deallocate()
|
||||
{
|
||||
if (m_data) {
|
||||
//PCK: enclosed the deallocation in this block
|
||||
if (m_ownsMemory) {
|
||||
|
@ -111,16 +114,19 @@ protected:
|
|||
}
|
||||
|
||||
public:
|
||||
btAlignedObjectArray() {
|
||||
btAlignedObjectArray()
|
||||
{
|
||||
init();
|
||||
}
|
||||
|
||||
~btAlignedObjectArray() {
|
||||
~btAlignedObjectArray()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead.
|
||||
btAlignedObjectArray(const btAlignedObjectArray &otherArray) {
|
||||
btAlignedObjectArray(const btAlignedObjectArray& otherArray)
|
||||
{
|
||||
init();
|
||||
|
||||
int32_t otherSize = otherArray.size();
|
||||
|
@ -129,36 +135,42 @@ public:
|
|||
}
|
||||
|
||||
/// return the number of elements in the array
|
||||
SIMD_FORCE_INLINE int32_t size() const {
|
||||
SIMD_FORCE_INLINE int32_t size() const
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE const T &at(int32_t n) const {
|
||||
SIMD_FORCE_INLINE const T& at(int32_t n) const
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &at(int32_t n) {
|
||||
SIMD_FORCE_INLINE T& at(int32_t n)
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE const T &operator[](int32_t n) const {
|
||||
SIMD_FORCE_INLINE const T& operator[](int32_t n) const
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &operator[](int32_t n) {
|
||||
SIMD_FORCE_INLINE T& operator[](int32_t n)
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations.
|
||||
SIMD_FORCE_INLINE void clear() {
|
||||
SIMD_FORCE_INLINE void clear()
|
||||
{
|
||||
destroy(0, size());
|
||||
|
||||
deallocate();
|
||||
|
@ -166,7 +178,8 @@ public:
|
|||
init();
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void pop_back() {
|
||||
SIMD_FORCE_INLINE void pop_back()
|
||||
{
|
||||
btAssert(m_size > 0);
|
||||
m_size--;
|
||||
m_data[m_size].~T();
|
||||
|
@ -174,14 +187,16 @@ public:
|
|||
|
||||
///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument.
|
||||
///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations.
|
||||
SIMD_FORCE_INLINE void resize(int32_t newsize, const T &fillData = T()) {
|
||||
SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T())
|
||||
{
|
||||
int32_t curSize = size();
|
||||
|
||||
if (newsize < curSize) {
|
||||
for (int32_t i = newsize; i < curSize; i++) {
|
||||
m_data[i].~T();
|
||||
}
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
if (newsize > size()) {
|
||||
reserve(newsize);
|
||||
}
|
||||
|
@ -195,7 +210,8 @@ public:
|
|||
m_size = newsize;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &expandNonInitializing() {
|
||||
SIMD_FORCE_INLINE T& expandNonInitializing()
|
||||
{
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
|
@ -205,7 +221,8 @@ public:
|
|||
return m_data[sz];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &expand(const T &fillValue = T()) {
|
||||
SIMD_FORCE_INLINE T& expand(const T& fillValue = T())
|
||||
{
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
|
@ -218,7 +235,8 @@ public:
|
|||
return m_data[sz];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void push_back(const T &_Val) {
|
||||
SIMD_FORCE_INLINE void push_back(const T& _Val)
|
||||
{
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
|
@ -234,11 +252,13 @@ public:
|
|||
}
|
||||
|
||||
/// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve()
|
||||
SIMD_FORCE_INLINE int32_t capacity() const {
|
||||
SIMD_FORCE_INLINE int32_t capacity() const
|
||||
{
|
||||
return m_capacity;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage
|
||||
SIMD_FORCE_INLINE void reserve(int32_t _Count)
|
||||
{ // determine new minimum length of allocated storage
|
||||
if (capacity() < _Count) { // not enough room, reallocate
|
||||
T* s = (T*)allocate(_Count);
|
||||
|
||||
|
@ -259,13 +279,15 @@ public:
|
|||
|
||||
class less {
|
||||
public:
|
||||
bool operator()(const T &a, const T &b) {
|
||||
bool operator()(const T& a, const T& b)
|
||||
{
|
||||
return (a < b);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L>
|
||||
void quickSortInternal(const L &CompareFunc, int32_t lo, int32_t hi) {
|
||||
void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi)
|
||||
{
|
||||
// lo is the lower index, hi is the upper index
|
||||
// of the region of array a that is to be sorted
|
||||
int32_t i = lo, j = hi;
|
||||
|
@ -292,7 +314,8 @@ public:
|
|||
}
|
||||
|
||||
template <typename L>
|
||||
void quickSort(const L &CompareFunc) {
|
||||
void quickSort(const L& CompareFunc)
|
||||
{
|
||||
//don't sort 0 or 1 elements
|
||||
if (size() > 1) {
|
||||
quickSortInternal(CompareFunc, 0, size() - 1);
|
||||
|
@ -301,7 +324,8 @@ public:
|
|||
|
||||
///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/
|
||||
template <typename L>
|
||||
void downHeap(T *pArr, int32_t k, int32_t n, const L &CompareFunc) {
|
||||
void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc)
|
||||
{
|
||||
/* PRE: a[k+1..N] is a heap */
|
||||
/* POST: a[k..N] is a heap */
|
||||
|
||||
|
@ -318,14 +342,16 @@ public:
|
|||
/* move child up */
|
||||
pArr[k - 1] = pArr[child - 1];
|
||||
k = child;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
pArr[k - 1] = temp;
|
||||
} /*downHeap*/
|
||||
|
||||
void swap(int32_t index0, int32_t index1) {
|
||||
void swap(int32_t index0, int32_t index1)
|
||||
{
|
||||
#ifdef BT_USE_MEMCPY
|
||||
char temp[sizeof(T)];
|
||||
memcpy(temp, &m_data[index0], sizeof(T));
|
||||
|
@ -339,7 +365,8 @@ public:
|
|||
}
|
||||
|
||||
template <typename L>
|
||||
void heapSort(const L &CompareFunc) {
|
||||
void heapSort(const L& CompareFunc)
|
||||
{
|
||||
/* sort a[0..N-1], N.B. 0 to N-1 */
|
||||
int32_t k;
|
||||
int32_t n = m_size;
|
||||
|
@ -358,7 +385,8 @@ public:
|
|||
}
|
||||
|
||||
///non-recursive binary search, assumes sorted array
|
||||
int32_t findBinarySearch(const T &key) const {
|
||||
int32_t findBinarySearch(const T& key) const
|
||||
{
|
||||
int32_t first = 0;
|
||||
int32_t last = size() - 1;
|
||||
|
||||
|
@ -375,7 +403,8 @@ public:
|
|||
return size(); // failed to find key
|
||||
}
|
||||
|
||||
int32_t findLinearSearch(const T &key) const {
|
||||
int32_t findLinearSearch(const T& key) const
|
||||
{
|
||||
int32_t index = size();
|
||||
int32_t i;
|
||||
|
||||
|
@ -388,7 +417,8 @@ public:
|
|||
return index;
|
||||
}
|
||||
|
||||
void remove(const T &key) {
|
||||
void remove(const T& key)
|
||||
{
|
||||
|
||||
int32_t findIndex = findLinearSearch(key);
|
||||
if (findIndex < size()) {
|
||||
|
@ -398,7 +428,8 @@ public:
|
|||
}
|
||||
|
||||
//PCK: whole function
|
||||
void initializeFromBuffer(void *buffer, int32_t size, int32_t capacity) {
|
||||
void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity)
|
||||
{
|
||||
clear();
|
||||
m_ownsMemory = false;
|
||||
m_data = (T*)buffer;
|
||||
|
@ -406,15 +437,12 @@ public:
|
|||
m_capacity = capacity;
|
||||
}
|
||||
|
||||
void copyFromArray(const btAlignedObjectArray &otherArray) {
|
||||
void copyFromArray(const btAlignedObjectArray& otherArray)
|
||||
{
|
||||
int32_t otherSize = otherArray.size();
|
||||
resize(otherSize);
|
||||
otherArray.copy(0, otherSize, m_data);
|
||||
}
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
}; // namespace VHACD
|
||||
//
|
||||
|
||||
#endif //BT_OBJECT_ARRAY__
|
||||
|
|
23
thirdparty/vhacd/inc/btConvexHullComputer.h
vendored
23
thirdparty/vhacd/inc/btConvexHullComputer.h
vendored
|
@ -18,10 +18,6 @@ subject to the following restrictions:
|
|||
#include "btAlignedObjectArray.h"
|
||||
#include "btVector3.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
/// Convex hull implementation based on Preparata and Hong
|
||||
/// See http://code.google.com/p/bullet/issues/detail?id=275
|
||||
/// Ole Kniemeyer, MAXON Computer GmbH
|
||||
|
@ -39,11 +35,13 @@ public:
|
|||
friend class btConvexHullComputer;
|
||||
|
||||
public:
|
||||
int32_t getSourceVertex() const {
|
||||
int32_t getSourceVertex() const
|
||||
{
|
||||
return (this + reverse)->targetVertex;
|
||||
}
|
||||
|
||||
int32_t getTargetVertex() const {
|
||||
int32_t getTargetVertex() const
|
||||
{
|
||||
return targetVertex;
|
||||
}
|
||||
|
||||
|
@ -57,7 +55,8 @@ public:
|
|||
return (this + reverse)->getNextEdgeOfVertex();
|
||||
}
|
||||
|
||||
const Edge *getReverseEdge() const {
|
||||
const Edge* getReverseEdge() const
|
||||
{
|
||||
return this + reverse;
|
||||
}
|
||||
};
|
||||
|
@ -83,18 +82,16 @@ public:
|
|||
|
||||
The output convex hull can be found in the member variables "vertices", "edges", "faces".
|
||||
*/
|
||||
btScalar compute(const float *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) {
|
||||
btScalar compute(const float* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp)
|
||||
{
|
||||
return compute(coords, false, stride, count, shrink, shrinkClamp);
|
||||
}
|
||||
|
||||
// same as above, but double precision
|
||||
btScalar compute(const double *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) {
|
||||
btScalar compute(const double* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp)
|
||||
{
|
||||
return compute(coords, true, stride, count, shrink, shrinkClamp);
|
||||
}
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
}; // namespace VHACD
|
||||
//
|
||||
|
||||
#endif //BT_CONVEX_HULL_COMPUTER_H
|
||||
|
|
29
thirdparty/vhacd/inc/btMinMax.h
vendored
29
thirdparty/vhacd/inc/btMinMax.h
vendored
|
@ -17,50 +17,49 @@ subject to the following restrictions:
|
|||
|
||||
#include "btScalar.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T &btMin(const T &a, const T &b) {
|
||||
SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b)
|
||||
{
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T &btMax(const T &a, const T &b) {
|
||||
SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b)
|
||||
{
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T &btClamped(const T &a, const T &lb, const T &ub) {
|
||||
SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub)
|
||||
{
|
||||
return a < lb ? lb : (ub < a ? ub : a);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btSetMin(T &a, const T &b) {
|
||||
SIMD_FORCE_INLINE void btSetMin(T& a, const T& b)
|
||||
{
|
||||
if (b < a) {
|
||||
a = b;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btSetMax(T &a, const T &b) {
|
||||
SIMD_FORCE_INLINE void btSetMax(T& a, const T& b)
|
||||
{
|
||||
if (a < b) {
|
||||
a = b;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btClamp(T &a, const T &lb, const T &ub) {
|
||||
SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub)
|
||||
{
|
||||
if (a < lb) {
|
||||
a = lb;
|
||||
} else if (ub < a) {
|
||||
}
|
||||
else if (ub < a) {
|
||||
a = ub;
|
||||
}
|
||||
}
|
||||
|
||||
//GODOT ADDITION
|
||||
}; // namespace VHACD
|
||||
//
|
||||
|
||||
#endif //BT_GEN_MINMAX_H
|
||||
|
|
206
thirdparty/vhacd/inc/btScalar.h
vendored
206
thirdparty/vhacd/inc/btScalar.h
vendored
|
@ -22,24 +22,17 @@ subject to the following restrictions:
|
|||
|
||||
#include <float.h>
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h> //size_t for MSVC 6.0
|
||||
#include <stdint.h>
|
||||
|
||||
/* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/
|
||||
#define BT_BULLET_VERSION 279
|
||||
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
inline int32_t btGetVersion() {
|
||||
inline int32_t btGetVersion()
|
||||
{
|
||||
return BT_BULLET_VERSION;
|
||||
}
|
||||
|
||||
//GODOT ADDITION
|
||||
}; // namespace VHACD
|
||||
//
|
||||
|
||||
#if defined(DEBUG) || defined(_DEBUG)
|
||||
#define BT_DEBUG
|
||||
#endif
|
||||
|
@ -206,10 +199,6 @@ inline int32_t btGetVersion() {
|
|||
#endif //__CELLOS_LV2__
|
||||
#endif
|
||||
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision.
|
||||
#if defined(BT_USE_DOUBLE_PRECISION)
|
||||
typedef double btScalar;
|
||||
|
@ -233,57 +222,41 @@ typedef float btScalar;
|
|||
|
||||
#if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS)
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) {
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar x)
|
||||
{
|
||||
return sqrt(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) {
|
||||
return fabs(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) {
|
||||
return cos(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) {
|
||||
return sin(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) {
|
||||
return tan(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) {
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); }
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); }
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); }
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return acos(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) {
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return asin(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) {
|
||||
return atan(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) {
|
||||
return atan2(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) {
|
||||
return exp(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) {
|
||||
return log(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) {
|
||||
return pow(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
|
||||
return fmod(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); }
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); }
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); }
|
||||
|
||||
#else
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) {
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar y)
|
||||
{
|
||||
#ifdef USE_APPROXIMATION
|
||||
double x, z, tempf;
|
||||
unsigned long* tfptr = ((unsigned long*)&tempf) + 1;
|
||||
|
@ -302,50 +275,32 @@ SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) {
|
|||
return sqrtf(y);
|
||||
#endif
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) {
|
||||
return fabsf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) {
|
||||
return cosf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) {
|
||||
return sinf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) {
|
||||
return tanf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) {
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return acosf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) {
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return asinf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) {
|
||||
return atanf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) {
|
||||
return atan2f(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) {
|
||||
return expf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) {
|
||||
return logf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) {
|
||||
return powf(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
|
||||
return fmodf(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); }
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -366,7 +321,8 @@ SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
|
|||
#define SIMD_INFINITY FLT_MAX
|
||||
#endif
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) {
|
||||
SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x)
|
||||
{
|
||||
btScalar coeff_1 = SIMD_PI / 4.0f;
|
||||
btScalar coeff_2 = 3.0f * coeff_1;
|
||||
btScalar abs_y = btFabs(y);
|
||||
|
@ -374,34 +330,32 @@ SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) {
|
|||
if (x >= 0.0f) {
|
||||
btScalar r = (x - abs_y) / (x + abs_y);
|
||||
angle = coeff_1 - coeff_1 * r;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
btScalar r = (x + abs_y) / (abs_y - x);
|
||||
angle = coeff_2 - coeff_1 * r;
|
||||
}
|
||||
return (y < 0.0f) ? -angle : angle;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) {
|
||||
return btFabs(x) < SIMD_EPSILON;
|
||||
}
|
||||
SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; }
|
||||
|
||||
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) {
|
||||
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps)
|
||||
{
|
||||
return (((a) <= eps) && !((a) < -eps));
|
||||
}
|
||||
SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps) {
|
||||
SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps)
|
||||
{
|
||||
return (!((a) <= eps));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x) {
|
||||
SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x)
|
||||
{
|
||||
return x < btScalar(0.0) ? 1 : 0;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btRadians(btScalar x) {
|
||||
return x * SIMD_RADS_PER_DEG;
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) {
|
||||
return x * SIMD_DEGS_PER_RAD;
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; }
|
||||
SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; }
|
||||
|
||||
#define BT_DECLARE_HANDLE(name) \
|
||||
typedef struct name##__ { \
|
||||
|
@ -409,13 +363,15 @@ SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) {
|
|||
} * name
|
||||
|
||||
#ifndef btFsel
|
||||
SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) {
|
||||
SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c)
|
||||
{
|
||||
return a >= 0 ? b : c;
|
||||
}
|
||||
#endif
|
||||
#define btFsels(a, b, c) (btScalar) btFsel(a, b, c)
|
||||
|
||||
SIMD_FORCE_INLINE bool btMachineIsLittleEndian() {
|
||||
SIMD_FORCE_INLINE bool btMachineIsLittleEndian()
|
||||
{
|
||||
long int i = 1;
|
||||
const char* p = (const char*)&i;
|
||||
if (p[0] == 1) // Lowest address contains the least significant byte
|
||||
|
@ -426,7 +382,8 @@ SIMD_FORCE_INLINE bool btMachineIsLittleEndian() {
|
|||
|
||||
///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360
|
||||
///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html
|
||||
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) {
|
||||
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero)
|
||||
{
|
||||
// Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero
|
||||
// Rely on positive value or'ed with its negative having sign bit on
|
||||
// and zero value or'ed with its negative (which is still zero) having sign bit off
|
||||
|
@ -435,12 +392,14 @@ SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditio
|
|||
unsigned testEqz = ~testNz;
|
||||
return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
}
|
||||
SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero) {
|
||||
SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero)
|
||||
{
|
||||
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
|
||||
unsigned testEqz = ~testNz;
|
||||
return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
}
|
||||
SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) {
|
||||
SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero)
|
||||
{
|
||||
#ifdef BT_HAVE_NATIVE_FSEL
|
||||
return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero);
|
||||
#else
|
||||
|
@ -449,26 +408,31 @@ SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZe
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
SIMD_FORCE_INLINE void btSwap(T &a, T &b) {
|
||||
SIMD_FORCE_INLINE void btSwap(T& a, T& b)
|
||||
{
|
||||
T tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
|
||||
//PCK: endian swapping functions
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) {
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val)
|
||||
{
|
||||
return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val) {
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val)
|
||||
{
|
||||
return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val) {
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val)
|
||||
{
|
||||
return btSwapEndian((unsigned)val);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) {
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val)
|
||||
{
|
||||
return btSwapEndian((unsigned short)val);
|
||||
}
|
||||
|
||||
|
@ -478,7 +442,8 @@ SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) {
|
|||
///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception.
|
||||
///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you.
|
||||
///so instead of returning a float/double, we return integer/long long integer
|
||||
SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) {
|
||||
SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d)
|
||||
{
|
||||
uint32_t a = 0;
|
||||
unsigned char* dst = (unsigned char*)&a;
|
||||
unsigned char* src = (unsigned char*)&d;
|
||||
|
@ -491,7 +456,8 @@ SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) {
|
|||
}
|
||||
|
||||
// unswap using char pointers
|
||||
SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) {
|
||||
SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a)
|
||||
{
|
||||
float d = 0.0f;
|
||||
unsigned char* src = (unsigned char*)&a;
|
||||
unsigned char* dst = (unsigned char*)&d;
|
||||
|
@ -505,7 +471,8 @@ SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) {
|
|||
}
|
||||
|
||||
// swap using char pointers
|
||||
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char *dst) {
|
||||
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst)
|
||||
{
|
||||
unsigned char* src = (unsigned char*)&d;
|
||||
|
||||
dst[0] = src[7];
|
||||
|
@ -519,7 +486,8 @@ SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char *dst) {
|
|||
}
|
||||
|
||||
// unswap using char pointers
|
||||
SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src) {
|
||||
SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char* src)
|
||||
{
|
||||
double d = 0.0;
|
||||
unsigned char* dst = (unsigned char*)&d;
|
||||
|
||||
|
@ -536,30 +504,30 @@ SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src) {
|
|||
}
|
||||
|
||||
// returns normalized value in range [-SIMD_PI, SIMD_PI]
|
||||
SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) {
|
||||
SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians)
|
||||
{
|
||||
angleInRadians = btFmod(angleInRadians, SIMD_2_PI);
|
||||
if (angleInRadians < -SIMD_PI) {
|
||||
return angleInRadians + SIMD_2_PI;
|
||||
} else if (angleInRadians > SIMD_PI) {
|
||||
}
|
||||
else if (angleInRadians > SIMD_PI) {
|
||||
return angleInRadians - SIMD_2_PI;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
return angleInRadians;
|
||||
}
|
||||
}
|
||||
|
||||
///rudimentary class to provide type info
|
||||
struct btTypedObject {
|
||||
btTypedObject(int32_t objectType) :
|
||||
m_objectType(objectType) {
|
||||
btTypedObject(int32_t objectType)
|
||||
: m_objectType(objectType)
|
||||
{
|
||||
}
|
||||
int32_t m_objectType;
|
||||
inline int32_t getObjectType() const {
|
||||
inline int32_t getObjectType() const
|
||||
{
|
||||
return m_objectType;
|
||||
}
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
}; // namespace VHACD
|
||||
//
|
||||
|
||||
#endif //BT_SCALAR_H
|
||||
|
|
220
thirdparty/vhacd/inc/btVector3.h
vendored
220
thirdparty/vhacd/inc/btVector3.h
vendored
|
@ -30,18 +30,16 @@ subject to the following restrictions:
|
|||
* It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user
|
||||
* Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers
|
||||
*/
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
ATTRIBUTE_ALIGNED16(class)
|
||||
btVector3 {
|
||||
btVector3
|
||||
{
|
||||
public:
|
||||
#if defined(__SPU__) && defined(__CELLOS_LV2__)
|
||||
btScalar m_floats[4];
|
||||
|
||||
public:
|
||||
SIMD_FORCE_INLINE const vec_float4 &get128() const {
|
||||
SIMD_FORCE_INLINE const vec_float4& get128() const
|
||||
{
|
||||
return *((const vec_float4*)&m_floats[0]);
|
||||
}
|
||||
|
||||
|
@ -52,10 +50,12 @@ public:
|
|||
__m128 mVec128;
|
||||
btScalar m_floats[4];
|
||||
};
|
||||
SIMD_FORCE_INLINE __m128 get128() const {
|
||||
SIMD_FORCE_INLINE __m128 get128() const
|
||||
{
|
||||
return mVec128;
|
||||
}
|
||||
SIMD_FORCE_INLINE void set128(__m128 v128) {
|
||||
SIMD_FORCE_INLINE void set128(__m128 v128)
|
||||
{
|
||||
mVec128 = v128;
|
||||
}
|
||||
#else
|
||||
|
@ -72,7 +72,8 @@ public:
|
|||
* @param y Y value
|
||||
* @param z Z value
|
||||
*/
|
||||
SIMD_FORCE_INLINE btVector3(const btScalar &x, const btScalar &y, const btScalar &z) {
|
||||
SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z)
|
||||
{
|
||||
m_floats[0] = x;
|
||||
m_floats[1] = y;
|
||||
m_floats[2] = z;
|
||||
|
@ -81,7 +82,8 @@ public:
|
|||
|
||||
/**@brief Add a vector to this one
|
||||
* @param The vector to add to this one */
|
||||
SIMD_FORCE_INLINE btVector3 &operator+=(const btVector3 &v) {
|
||||
SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v)
|
||||
{
|
||||
|
||||
m_floats[0] += v.m_floats[0];
|
||||
m_floats[1] += v.m_floats[1];
|
||||
|
@ -91,7 +93,8 @@ public:
|
|||
|
||||
/**@brief Subtract a vector from this one
|
||||
* @param The vector to subtract */
|
||||
SIMD_FORCE_INLINE btVector3 &operator-=(const btVector3 &v) {
|
||||
SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v)
|
||||
{
|
||||
m_floats[0] -= v.m_floats[0];
|
||||
m_floats[1] -= v.m_floats[1];
|
||||
m_floats[2] -= v.m_floats[2];
|
||||
|
@ -99,7 +102,8 @@ public:
|
|||
}
|
||||
/**@brief Scale the vector
|
||||
* @param s Scale factor */
|
||||
SIMD_FORCE_INLINE btVector3 &operator*=(const btScalar &s) {
|
||||
SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s)
|
||||
{
|
||||
m_floats[0] *= s;
|
||||
m_floats[1] *= s;
|
||||
m_floats[2] *= s;
|
||||
|
@ -108,24 +112,28 @@ public:
|
|||
|
||||
/**@brief Inversely scale the vector
|
||||
* @param s Scale factor to divide by */
|
||||
SIMD_FORCE_INLINE btVector3 &operator/=(const btScalar &s) {
|
||||
SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s)
|
||||
{
|
||||
btFullAssert(s != btScalar(0.0));
|
||||
return * this *= btScalar(1.0) / s;
|
||||
}
|
||||
|
||||
/**@brief Return the dot product
|
||||
* @param v The other vector in the dot product */
|
||||
SIMD_FORCE_INLINE btScalar dot(const btVector3 &v) const {
|
||||
SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const
|
||||
{
|
||||
return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2];
|
||||
}
|
||||
|
||||
/**@brief Return the length of the vector squared */
|
||||
SIMD_FORCE_INLINE btScalar length2() const {
|
||||
SIMD_FORCE_INLINE btScalar length2() const
|
||||
{
|
||||
return dot(*this);
|
||||
}
|
||||
|
||||
/**@brief Return the length of the vector */
|
||||
SIMD_FORCE_INLINE btScalar length() const {
|
||||
SIMD_FORCE_INLINE btScalar length() const
|
||||
{
|
||||
return btSqrt(length2());
|
||||
}
|
||||
|
||||
|
@ -137,7 +145,8 @@ public:
|
|||
* This is symantically treating the vector like a point */
|
||||
SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const;
|
||||
|
||||
SIMD_FORCE_INLINE btVector3 &safeNormalize() {
|
||||
SIMD_FORCE_INLINE btVector3& safeNormalize()
|
||||
{
|
||||
btVector3 absVec = this->absolute();
|
||||
int32_t maxIndex = absVec.maxAxis();
|
||||
if (absVec[maxIndex] > 0) {
|
||||
|
@ -150,7 +159,8 @@ public:
|
|||
|
||||
/**@brief Normalize this vector
|
||||
* x^2 + y^2 + z^2 = 1 */
|
||||
SIMD_FORCE_INLINE btVector3 &normalize() {
|
||||
SIMD_FORCE_INLINE btVector3& normalize()
|
||||
{
|
||||
return * this /= length();
|
||||
}
|
||||
|
||||
|
@ -164,13 +174,15 @@ public:
|
|||
|
||||
/**@brief Return the angle between this and another vector
|
||||
* @param v The other vector */
|
||||
SIMD_FORCE_INLINE btScalar angle(const btVector3 &v) const {
|
||||
SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const
|
||||
{
|
||||
btScalar s = btSqrt(length2() * v.length2());
|
||||
btFullAssert(s != btScalar(0.0));
|
||||
return btAcos(dot(v) / s);
|
||||
}
|
||||
/**@brief Return a vector will the absolute values of each element */
|
||||
SIMD_FORCE_INLINE btVector3 absolute() const {
|
||||
SIMD_FORCE_INLINE btVector3 absolute() const
|
||||
{
|
||||
return btVector3(
|
||||
btFabs(m_floats[0]),
|
||||
btFabs(m_floats[1]),
|
||||
|
@ -178,38 +190,45 @@ public:
|
|||
}
|
||||
/**@brief Return the cross product between this and another vector
|
||||
* @param v The other vector */
|
||||
SIMD_FORCE_INLINE btVector3 cross(const btVector3 &v) const {
|
||||
SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const
|
||||
{
|
||||
return btVector3(
|
||||
m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1],
|
||||
m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2],
|
||||
m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btScalar triple(const btVector3 &v1, const btVector3 &v2) const {
|
||||
SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const
|
||||
{
|
||||
return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]);
|
||||
}
|
||||
|
||||
/**@brief Return the axis with the smallest value
|
||||
* Note return values are 0,1,2 for x, y, or z */
|
||||
SIMD_FORCE_INLINE int32_t minAxis() const {
|
||||
SIMD_FORCE_INLINE int32_t minAxis() const
|
||||
{
|
||||
return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2);
|
||||
}
|
||||
|
||||
/**@brief Return the axis with the largest value
|
||||
* Note return values are 0,1,2 for x, y, or z */
|
||||
SIMD_FORCE_INLINE int32_t maxAxis() const {
|
||||
SIMD_FORCE_INLINE int32_t maxAxis() const
|
||||
{
|
||||
return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE int32_t furthestAxis() const {
|
||||
SIMD_FORCE_INLINE int32_t furthestAxis() const
|
||||
{
|
||||
return absolute().minAxis();
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE int32_t closestAxis() const {
|
||||
SIMD_FORCE_INLINE int32_t closestAxis() const
|
||||
{
|
||||
return absolute().maxAxis();
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void setInterpolate3(const btVector3 &v0, const btVector3 &v1, btScalar rt) {
|
||||
SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt)
|
||||
{
|
||||
btScalar s = btScalar(1.0) - rt;
|
||||
m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0];
|
||||
m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1];
|
||||
|
@ -221,7 +240,8 @@ public:
|
|||
/**@brief Return the linear interpolation between this and another vector
|
||||
* @param v The other vector
|
||||
* @param t The ration of this to v (t = 0 => return this, t=1 => return other) */
|
||||
SIMD_FORCE_INLINE btVector3 lerp(const btVector3 &v, const btScalar &t) const {
|
||||
SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const
|
||||
{
|
||||
return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t,
|
||||
m_floats[1] + (v.m_floats[1] - m_floats[1]) * t,
|
||||
m_floats[2] + (v.m_floats[2] - m_floats[2]) * t);
|
||||
|
@ -229,7 +249,8 @@ public:
|
|||
|
||||
/**@brief Elementwise multiply this vector by the other
|
||||
* @param v The other vector */
|
||||
SIMD_FORCE_INLINE btVector3 &operator*=(const btVector3 &v) {
|
||||
SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v)
|
||||
{
|
||||
m_floats[0] *= v.m_floats[0];
|
||||
m_floats[1] *= v.m_floats[1];
|
||||
m_floats[2] *= v.m_floats[2];
|
||||
|
@ -265,18 +286,21 @@ public:
|
|||
SIMD_FORCE_INLINE operator btScalar*() { return &m_floats[0]; }
|
||||
SIMD_FORCE_INLINE operator const btScalar*() const { return &m_floats[0]; }
|
||||
|
||||
SIMD_FORCE_INLINE bool operator==(const btVector3 &other) const {
|
||||
SIMD_FORCE_INLINE bool operator==(const btVector3& other) const
|
||||
{
|
||||
return ((m_floats[3] == other.m_floats[3]) && (m_floats[2] == other.m_floats[2]) && (m_floats[1] == other.m_floats[1]) && (m_floats[0] == other.m_floats[0]));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE bool operator!=(const btVector3 &other) const {
|
||||
SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const
|
||||
{
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
/**@brief Set each element to the max of the current values and the values of another btVector3
|
||||
* @param other The other btVector3 to compare with
|
||||
*/
|
||||
SIMD_FORCE_INLINE void setMax(const btVector3 &other) {
|
||||
SIMD_FORCE_INLINE void setMax(const btVector3& other)
|
||||
{
|
||||
btSetMax(m_floats[0], other.m_floats[0]);
|
||||
btSetMax(m_floats[1], other.m_floats[1]);
|
||||
btSetMax(m_floats[2], other.m_floats[2]);
|
||||
|
@ -285,35 +309,41 @@ public:
|
|||
/**@brief Set each element to the min of the current values and the values of another btVector3
|
||||
* @param other The other btVector3 to compare with
|
||||
*/
|
||||
SIMD_FORCE_INLINE void setMin(const btVector3 &other) {
|
||||
SIMD_FORCE_INLINE void setMin(const btVector3& other)
|
||||
{
|
||||
btSetMin(m_floats[0], other.m_floats[0]);
|
||||
btSetMin(m_floats[1], other.m_floats[1]);
|
||||
btSetMin(m_floats[2], other.m_floats[2]);
|
||||
btSetMin(m_floats[3], other.w());
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void setValue(const btScalar &x, const btScalar &y, const btScalar &z) {
|
||||
SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z)
|
||||
{
|
||||
m_floats[0] = x;
|
||||
m_floats[1] = y;
|
||||
m_floats[2] = z;
|
||||
m_floats[3] = btScalar(0.);
|
||||
}
|
||||
|
||||
void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const {
|
||||
void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const
|
||||
{
|
||||
v0->setValue(0., -z(), y());
|
||||
v1->setValue(z(), 0., -x());
|
||||
v2->setValue(-y(), x(), 0.);
|
||||
}
|
||||
|
||||
void setZero() {
|
||||
void setZero()
|
||||
{
|
||||
setValue(btScalar(0.), btScalar(0.), btScalar(0.));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE bool isZero() const {
|
||||
SIMD_FORCE_INLINE bool isZero() const
|
||||
{
|
||||
return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE bool fuzzyZero() const {
|
||||
SIMD_FORCE_INLINE bool fuzzyZero() const
|
||||
{
|
||||
return length2() < SIMD_EPSILON;
|
||||
}
|
||||
|
||||
|
@ -332,84 +362,98 @@ public:
|
|||
|
||||
/**@brief Return the sum of two vectors (Point symantics)*/
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator+(const btVector3 &v1, const btVector3 &v2) {
|
||||
operator+(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]);
|
||||
}
|
||||
|
||||
/**@brief Return the elementwise product of two vectors */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator*(const btVector3 &v1, const btVector3 &v2) {
|
||||
operator*(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]);
|
||||
}
|
||||
|
||||
/**@brief Return the difference between two vectors */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator-(const btVector3 &v1, const btVector3 &v2) {
|
||||
operator-(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]);
|
||||
}
|
||||
/**@brief Return the negative of the vector */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator-(const btVector3 &v) {
|
||||
operator-(const btVector3& v)
|
||||
{
|
||||
return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]);
|
||||
}
|
||||
|
||||
/**@brief Return the vector scaled by s */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator*(const btVector3 &v, const btScalar &s) {
|
||||
operator*(const btVector3& v, const btScalar& s)
|
||||
{
|
||||
return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s);
|
||||
}
|
||||
|
||||
/**@brief Return the vector scaled by s */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator*(const btScalar &s, const btVector3 &v) {
|
||||
operator*(const btScalar& s, const btVector3& v)
|
||||
{
|
||||
return v * s;
|
||||
}
|
||||
|
||||
/**@brief Return the vector inversely scaled by s */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator/(const btVector3 &v, const btScalar &s) {
|
||||
operator/(const btVector3& v, const btScalar& s)
|
||||
{
|
||||
btFullAssert(s != btScalar(0.0));
|
||||
return v * (btScalar(1.0) / s);
|
||||
}
|
||||
|
||||
/**@brief Return the vector inversely scaled by s */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
operator/(const btVector3 &v1, const btVector3 &v2) {
|
||||
operator/(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]);
|
||||
}
|
||||
|
||||
/**@brief Return the dot product between two vectors */
|
||||
SIMD_FORCE_INLINE btScalar
|
||||
btDot(const btVector3 &v1, const btVector3 &v2) {
|
||||
btDot(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return v1.dot(v2);
|
||||
}
|
||||
|
||||
/**@brief Return the distance squared between two vectors */
|
||||
SIMD_FORCE_INLINE btScalar
|
||||
btDistance2(const btVector3 &v1, const btVector3 &v2) {
|
||||
btDistance2(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return v1.distance2(v2);
|
||||
}
|
||||
|
||||
/**@brief Return the distance between two vectors */
|
||||
SIMD_FORCE_INLINE btScalar
|
||||
btDistance(const btVector3 &v1, const btVector3 &v2) {
|
||||
btDistance(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return v1.distance(v2);
|
||||
}
|
||||
|
||||
/**@brief Return the angle between two vectors */
|
||||
SIMD_FORCE_INLINE btScalar
|
||||
btAngle(const btVector3 &v1, const btVector3 &v2) {
|
||||
btAngle(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return v1.angle(v2);
|
||||
}
|
||||
|
||||
/**@brief Return the cross product of two vectors */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
btCross(const btVector3 &v1, const btVector3 &v2) {
|
||||
btCross(const btVector3& v1, const btVector3& v2)
|
||||
{
|
||||
return v1.cross(v2);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btScalar
|
||||
btTriple(const btVector3 &v1, const btVector3 &v2, const btVector3 &v3) {
|
||||
btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3)
|
||||
{
|
||||
return v1.triple(v2, v3);
|
||||
}
|
||||
|
||||
|
@ -418,23 +462,28 @@ btTriple(const btVector3 &v1, const btVector3 &v2, const btVector3 &v3) {
|
|||
* @param v2 The other vector
|
||||
* @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */
|
||||
SIMD_FORCE_INLINE btVector3
|
||||
lerp(const btVector3 &v1, const btVector3 &v2, const btScalar &t) {
|
||||
lerp(const btVector3& v1, const btVector3& v2, const btScalar& t)
|
||||
{
|
||||
return v1.lerp(v2, t);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3 &v) const {
|
||||
SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const
|
||||
{
|
||||
return (v - *this).length2();
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3 &v) const {
|
||||
SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const
|
||||
{
|
||||
return (v - *this).length();
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btVector3 btVector3::normalized() const {
|
||||
SIMD_FORCE_INLINE btVector3 btVector3::normalized() const
|
||||
{
|
||||
return *this / length();
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3 &wAxis, const btScalar angle) const {
|
||||
SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar angle) const
|
||||
{
|
||||
// wAxis must be a unit lenght vector
|
||||
|
||||
btVector3 o = wAxis * wAxis.dot(*this);
|
||||
|
@ -450,12 +499,14 @@ class btVector4 : public btVector3 {
|
|||
public:
|
||||
SIMD_FORCE_INLINE btVector4() {}
|
||||
|
||||
SIMD_FORCE_INLINE btVector4(const btScalar &x, const btScalar &y, const btScalar &z, const btScalar &w) :
|
||||
btVector3(x, y, z) {
|
||||
SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w)
|
||||
: btVector3(x, y, z)
|
||||
{
|
||||
m_floats[3] = w;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btVector4 absolute4() const {
|
||||
SIMD_FORCE_INLINE btVector4 absolute4() const
|
||||
{
|
||||
return btVector4(
|
||||
btFabs(m_floats[0]),
|
||||
btFabs(m_floats[1]),
|
||||
|
@ -465,7 +516,8 @@ public:
|
|||
|
||||
btScalar getW() const { return m_floats[3]; }
|
||||
|
||||
SIMD_FORCE_INLINE int32_t maxAxis4() const {
|
||||
SIMD_FORCE_INLINE int32_t maxAxis4() const
|
||||
{
|
||||
int32_t maxIndex = -1;
|
||||
btScalar maxVal = btScalar(-BT_LARGE_FLOAT);
|
||||
if (m_floats[0] > maxVal) {
|
||||
|
@ -486,7 +538,8 @@ public:
|
|||
return maxIndex;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE int32_t minAxis4() const {
|
||||
SIMD_FORCE_INLINE int32_t minAxis4() const
|
||||
{
|
||||
int32_t minIndex = -1;
|
||||
btScalar minVal = btScalar(BT_LARGE_FLOAT);
|
||||
if (m_floats[0] < minVal) {
|
||||
|
@ -508,7 +561,8 @@ public:
|
|||
return minIndex;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE int32_t closestAxis4() const {
|
||||
SIMD_FORCE_INLINE int32_t closestAxis4() const
|
||||
{
|
||||
return absolute4().maxAxis4();
|
||||
}
|
||||
|
||||
|
@ -531,7 +585,8 @@ public:
|
|||
* @param z Value of z
|
||||
* @param w Value of w
|
||||
*/
|
||||
SIMD_FORCE_INLINE void setValue(const btScalar &x, const btScalar &y, const btScalar &z, const btScalar &w) {
|
||||
SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w)
|
||||
{
|
||||
m_floats[0] = x;
|
||||
m_floats[1] = y;
|
||||
m_floats[2] = z;
|
||||
|
@ -540,7 +595,8 @@ public:
|
|||
};
|
||||
|
||||
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar &sourceVal, btScalar &destVal) {
|
||||
SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal)
|
||||
{
|
||||
#ifdef BT_USE_DOUBLE_PRECISION
|
||||
unsigned char* dest = (unsigned char*)&destVal;
|
||||
unsigned char* src = (unsigned char*)&sourceVal;
|
||||
|
@ -562,14 +618,16 @@ SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar &sourceVal, btScalar &d
|
|||
#endif //BT_USE_DOUBLE_PRECISION
|
||||
}
|
||||
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3 &sourceVec, btVector3 &destVec) {
|
||||
SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec)
|
||||
{
|
||||
for (int32_t i = 0; i < 4; i++) {
|
||||
btSwapScalarEndian(sourceVec[i], destVec[i]);
|
||||
}
|
||||
}
|
||||
|
||||
///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
|
||||
SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3 &vector) {
|
||||
SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector)
|
||||
{
|
||||
|
||||
btVector3 swappedVec;
|
||||
for (int32_t i = 0; i < 4; i++) {
|
||||
|
@ -579,7 +637,8 @@ SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3 &vector) {
|
|||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btPlaneSpace1(const T &n, T &p, T &q) {
|
||||
SIMD_FORCE_INLINE void btPlaneSpace1(const T& n, T& p, T& q)
|
||||
{
|
||||
if (btFabs(n[2]) > SIMDSQRT12) {
|
||||
// choose p in y-z plane
|
||||
btScalar a = n[1] * n[1] + n[2] * n[2];
|
||||
|
@ -591,7 +650,8 @@ SIMD_FORCE_INLINE void btPlaneSpace1(const T &n, T &p, T &q) {
|
|||
q[0] = a * k;
|
||||
q[1] = -n[0] * p[2];
|
||||
q[2] = n[0] * p[1];
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
// choose p in x-y plane
|
||||
btScalar a = n[0] * n[0] + n[1] * n[1];
|
||||
btScalar k = btRecipSqrt(a);
|
||||
|
@ -613,41 +673,43 @@ struct btVector3DoubleData {
|
|||
double m_floats[4];
|
||||
};
|
||||
|
||||
SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData &dataOut) const {
|
||||
SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const
|
||||
{
|
||||
///could also do a memcpy, check if it is worth it
|
||||
for (int32_t i = 0; i < 4; i++)
|
||||
dataOut.m_floats[i] = float(m_floats[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData &dataIn) {
|
||||
SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn)
|
||||
{
|
||||
for (int32_t i = 0; i < 4; i++)
|
||||
m_floats[i] = btScalar(dataIn.m_floats[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData &dataOut) const {
|
||||
SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const
|
||||
{
|
||||
///could also do a memcpy, check if it is worth it
|
||||
for (int32_t i = 0; i < 4; i++)
|
||||
dataOut.m_floats[i] = double(m_floats[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData &dataIn) {
|
||||
SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn)
|
||||
{
|
||||
for (int32_t i = 0; i < 4; i++)
|
||||
m_floats[i] = btScalar(dataIn.m_floats[i]);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data &dataOut) const {
|
||||
SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const
|
||||
{
|
||||
///could also do a memcpy, check if it is worth it
|
||||
for (int32_t i = 0; i < 4; i++)
|
||||
dataOut.m_floats[i] = m_floats[i];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data &dataIn) {
|
||||
SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn)
|
||||
{
|
||||
for (int32_t i = 0; i < 4; i++)
|
||||
m_floats[i] = dataIn.m_floats[i];
|
||||
}
|
||||
|
||||
//GODOT ADDITION
|
||||
}; // namespace VHACD
|
||||
//
|
||||
|
||||
#endif //BT_VECTOR3_H
|
||||
|
|
59
thirdparty/vhacd/src/btAlignedAllocator.cpp
vendored
59
thirdparty/vhacd/src/btAlignedAllocator.cpp
vendored
|
@ -15,10 +15,6 @@ subject to the following restrictions:
|
|||
|
||||
#include "btAlignedAllocator.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
namespace VHACD {
|
||||
//
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable:4311 4302)
|
||||
#endif
|
||||
|
@ -27,11 +23,13 @@ int32_t gNumAlignedAllocs = 0;
|
|||
int32_t gNumAlignedFree = 0;
|
||||
int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks
|
||||
|
||||
static void *btAllocDefault(size_t size) {
|
||||
static void* btAllocDefault(size_t size)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
static void btFreeDefault(void *ptr) {
|
||||
static void btFreeDefault(void* ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
|
@ -40,25 +38,30 @@ static btFreeFunc *sFreeFunc = btFreeDefault;
|
|||
|
||||
#if defined(BT_HAS_ALIGNED_ALLOCATOR)
|
||||
#include <malloc.h>
|
||||
static void *btAlignedAllocDefault(size_t size, int32_t alignment) {
|
||||
static void* btAlignedAllocDefault(size_t size, int32_t alignment)
|
||||
{
|
||||
return _aligned_malloc(size, (size_t)alignment);
|
||||
}
|
||||
|
||||
static void btAlignedFreeDefault(void *ptr) {
|
||||
static void btAlignedFreeDefault(void* ptr)
|
||||
{
|
||||
_aligned_free(ptr);
|
||||
}
|
||||
#elif defined(__CELLOS_LV2__)
|
||||
#include <stdlib.h>
|
||||
|
||||
static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) {
|
||||
static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
|
||||
{
|
||||
return memalign(alignment, size);
|
||||
}
|
||||
|
||||
static inline void btAlignedFreeDefault(void *ptr) {
|
||||
static inline void btAlignedFreeDefault(void* ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
#else
|
||||
static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) {
|
||||
static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
|
||||
{
|
||||
void* ret;
|
||||
char* real;
|
||||
unsigned long offset;
|
||||
|
@ -68,13 +71,15 @@ static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) {
|
|||
offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1);
|
||||
ret = (void*)((real + sizeof(void*)) + offset);
|
||||
*((void**)(ret)-1) = (void*)(real);
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
ret = (void*)(real);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static inline void btAlignedFreeDefault(void *ptr) {
|
||||
static inline void btAlignedFreeDefault(void* ptr)
|
||||
{
|
||||
void* real;
|
||||
|
||||
if (ptr) {
|
||||
|
@ -87,12 +92,14 @@ static inline void btAlignedFreeDefault(void *ptr) {
|
|||
static btAlignedAllocFunc* sAlignedAllocFunc = btAlignedAllocDefault;
|
||||
static btAlignedFreeFunc* sAlignedFreeFunc = btAlignedFreeDefault;
|
||||
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc) {
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc)
|
||||
{
|
||||
sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault;
|
||||
sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault;
|
||||
}
|
||||
|
||||
void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc) {
|
||||
void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc)
|
||||
{
|
||||
sAllocFunc = allocFunc ? allocFunc : btAllocDefault;
|
||||
sFreeFunc = freeFunc ? freeFunc : btFreeDefault;
|
||||
}
|
||||
|
@ -101,7 +108,8 @@ void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc) {
|
|||
//this generic allocator provides the total allocated number of bytes
|
||||
#include <stdio.h>
|
||||
|
||||
void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char *filename) {
|
||||
void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename)
|
||||
{
|
||||
void* ret;
|
||||
char* real;
|
||||
unsigned long offset;
|
||||
|
@ -115,7 +123,8 @@ void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char
|
|||
ret = (void*)((real + 2 * sizeof(void*)) + offset);
|
||||
*((void**)(ret)-1) = (void*)(real);
|
||||
*((int32_t*)(ret)-2) = size;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
ret = (void*)(real); //??
|
||||
}
|
||||
|
||||
|
@ -126,7 +135,8 @@ void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char
|
|||
return (ret);
|
||||
}
|
||||
|
||||
void btAlignedFreeInternal(void *ptr, int32_t line, char *filename) {
|
||||
void btAlignedFreeInternal(void* ptr, int32_t line, char* filename)
|
||||
{
|
||||
|
||||
void* real;
|
||||
gNumAlignedFree++;
|
||||
|
@ -139,14 +149,16 @@ void btAlignedFreeInternal(void *ptr, int32_t line, char *filename) {
|
|||
printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size);
|
||||
|
||||
sFreeFunc(real);
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
printf("NULL ptr\n");
|
||||
}
|
||||
}
|
||||
|
||||
#else //BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
void *btAlignedAllocInternal(size_t size, int32_t alignment) {
|
||||
void* btAlignedAllocInternal(size_t size, int32_t alignment)
|
||||
{
|
||||
gNumAlignedAllocs++;
|
||||
void* ptr;
|
||||
ptr = sAlignedAllocFunc(size, alignment);
|
||||
|
@ -154,7 +166,8 @@ void *btAlignedAllocInternal(size_t size, int32_t alignment) {
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void btAlignedFreeInternal(void *ptr) {
|
||||
void btAlignedFreeInternal(void* ptr)
|
||||
{
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
|
@ -164,8 +177,4 @@ void btAlignedFreeInternal(void *ptr) {
|
|||
sAlignedFreeFunc(ptr);
|
||||
}
|
||||
|
||||
//GODOT ADDITION
|
||||
};
|
||||
//
|
||||
|
||||
#endif //BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
|
542
thirdparty/vhacd/src/btConvexHullComputer.cpp
vendored
542
thirdparty/vhacd/src/btConvexHullComputer.cpp
vendored
File diff suppressed because it is too large
Load diff
Loading…
Reference in a new issue