From 531b1588973ef0d0631e06353ca65131d97c00ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Verschelde?= Date: Thu, 11 Apr 2019 17:30:12 +0200 Subject: [PATCH 1/3] vhacd: Recommit unmodified upstream code without style changes Godot-specific changes will then be redone without touching upstream formatting. Also documented current state in thirdparty/README.md and added LICENSE. Add vhacd to COPYRIGHT.txt. --- COPYRIGHT.txt | 6 + thirdparty/README.md | 21 +- thirdparty/vhacd/LICENSE | 29 + thirdparty/vhacd/inc/btAlignedAllocator.h | 93 +- thirdparty/vhacd/inc/btAlignedObjectArray.h | 618 +-- thirdparty/vhacd/inc/btConvexHullComputer.h | 93 +- thirdparty/vhacd/inc/btMinMax.h | 55 +- thirdparty/vhacd/inc/btScalar.h | 486 +-- thirdparty/vhacd/inc/btVector3.h | 876 ++-- thirdparty/vhacd/src/btAlignedAllocator.cpp | 197 +- thirdparty/vhacd/src/btConvexHullComputer.cpp | 3878 +++++++++-------- 11 files changed, 3302 insertions(+), 3050 deletions(-) create mode 100644 thirdparty/vhacd/LICENSE diff --git a/COPYRIGHT.txt b/COPYRIGHT.txt index d28aabd0a60..738a0e62b58 100644 --- a/COPYRIGHT.txt +++ b/COPYRIGHT.txt @@ -385,6 +385,12 @@ Copyright: 2014-2018, Syoyo Fujita 2002, Industrial Light & Magic, a division of Lucas Digital Ltd. LLC License: BSD-3-clause +Files: ./thirdparty/vhacd/ +Comment: V-HACD +Copyright: 2011, Khaled Mamou + 2003-2009, Erwin Coumans +License: BSD-3-clause + Files: ./thirdparty/zlib/ Comment: zlib Copyright: 1995-2017, Jean-loup Gailly and Mark Adler diff --git a/thirdparty/README.md b/thirdparty/README.md index 7f9fc28b3e0..6e8be5d5ee0 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -1,5 +1,6 @@ # Third party libraries + ## assimp - Upstream: http://github.com/assimp/assimp @@ -294,8 +295,12 @@ Godot build configurations, check them out when updating. File extracted from upstream release tarball `mbedtls-2.16.0-apache.tgz`: - All `*.h` from `include/mbedtls/` to `thirdparty/mbedtls/include/mbedtls/` - All `*.c` from `library/` to `thirdparty/mbedtls/library/` -- Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453). Soon to be merged upstream. Check it out at next update. -- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA padlock support which defines a symbol `unsupported` which clashses with a symbol in libwebsockets. +- Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453). + Soon to be merged upstream. Check it out at next update. +- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA + padlock support which defines a symbol `unsupported` which clashes with + a symbol in libwebsockets. + ## miniupnpc @@ -523,6 +528,18 @@ Files extracted from upstream source: - `tinyexr.{cc,h}` +## vhacd + +- Upstream: https://github.com/kmammou/v-hacd +- Version: git (2297aa1, 2018) +- License: BSD-3-Clause + +Files extracted from upstream source: + +- From `src/VHACD_Lib/`: `inc`, `public` and `src` +- `LICENSE` + + ## zlib - Upstream: http://www.zlib.net diff --git a/thirdparty/vhacd/LICENSE b/thirdparty/vhacd/LICENSE new file mode 100644 index 00000000000..efc7f64399c --- /dev/null +++ b/thirdparty/vhacd/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2011, Khaled Mamou (kmamou at gmail dot com) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/vhacd/inc/btAlignedAllocator.h b/thirdparty/vhacd/inc/btAlignedAllocator.h index 4f534e43f07..11f6e12dcaf 100644 --- a/thirdparty/vhacd/inc/btAlignedAllocator.h +++ b/thirdparty/vhacd/inc/btAlignedAllocator.h @@ -21,27 +21,22 @@ subject to the following restrictions: ///that is better portable and more predictable #include "btScalar.h" - -//GODOT ADDITION -namespace VHACD { -// - //#define BT_DEBUG_MEMORY_ALLOCATIONS 1 #ifdef BT_DEBUG_MEMORY_ALLOCATIONS #define btAlignedAlloc(a, b) \ - btAlignedAllocInternal(a, b, __LINE__, __FILE__) + btAlignedAllocInternal(a, b, __LINE__, __FILE__) #define btAlignedFree(ptr) \ - btAlignedFreeInternal(ptr, __LINE__, __FILE__) + btAlignedFreeInternal(ptr, __LINE__, __FILE__) -void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char *filename); +void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename); -void btAlignedFreeInternal(void *ptr, int32_t line, char *filename); +void btAlignedFreeInternal(void* ptr, int32_t line, char* filename); #else -void *btAlignedAllocInternal(size_t size, int32_t alignment); -void btAlignedFreeInternal(void *ptr); +void* btAlignedAllocInternal(size_t size, int32_t alignment); +void btAlignedFreeInternal(void* ptr); #define btAlignedAlloc(size, alignment) btAlignedAllocInternal(size, alignment) #define btAlignedFree(ptr) btAlignedFreeInternal(ptr) @@ -49,63 +44,61 @@ void btAlignedFreeInternal(void *ptr); #endif typedef int32_t size_type; -typedef void *(btAlignedAllocFunc)(size_t size, int32_t alignment); -typedef void(btAlignedFreeFunc)(void *memblock); -typedef void *(btAllocFunc)(size_t size); -typedef void(btFreeFunc)(void *memblock); +typedef void*(btAlignedAllocFunc)(size_t size, int32_t alignment); +typedef void(btAlignedFreeFunc)(void* memblock); +typedef void*(btAllocFunc)(size_t size); +typedef void(btFreeFunc)(void* memblock); ///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom -void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc); +void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc); ///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it. -void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc); +void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc); ///The btAlignedAllocator is a portable class for aligned memory allocations. ///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned. template class btAlignedAllocator { - typedef btAlignedAllocator self_type; + typedef btAlignedAllocator self_type; public: - //just going down a list: - btAlignedAllocator() {} - /* + //just going down a list: + btAlignedAllocator() {} + /* btAlignedAllocator( const self_type & ) {} */ - template - btAlignedAllocator(const btAlignedAllocator &) {} + template + btAlignedAllocator(const btAlignedAllocator&) {} - typedef const T *const_pointer; - typedef const T &const_reference; - typedef T *pointer; - typedef T &reference; - typedef T value_type; + typedef const T* const_pointer; + typedef const T& const_reference; + typedef T* pointer; + typedef T& reference; + typedef T value_type; - pointer address(reference ref) const { return &ref; } - const_pointer address(const_reference ref) const { return &ref; } - pointer allocate(size_type n, const_pointer *hint = 0) { - (void)hint; - return reinterpret_cast(btAlignedAlloc(sizeof(value_type) * n, Alignment)); - } - void construct(pointer ptr, const value_type &value) { new (ptr) value_type(value); } - void deallocate(pointer ptr) { - btAlignedFree(reinterpret_cast(ptr)); - } - void destroy(pointer ptr) { ptr->~value_type(); } + pointer address(reference ref) const { return &ref; } + const_pointer address(const_reference ref) const { return &ref; } + pointer allocate(size_type n, const_pointer* hint = 0) + { + (void)hint; + return reinterpret_cast(btAlignedAlloc(sizeof(value_type) * n, Alignment)); + } + void construct(pointer ptr, const value_type& value) { new (ptr) value_type(value); } + void deallocate(pointer ptr) + { + btAlignedFree(reinterpret_cast(ptr)); + } + void destroy(pointer ptr) { ptr->~value_type(); } - template - struct rebind { - typedef btAlignedAllocator other; - }; - template - self_type &operator=(const btAlignedAllocator &) { return *this; } + template + struct rebind { + typedef btAlignedAllocator other; + }; + template + self_type& operator=(const btAlignedAllocator&) { return *this; } - friend bool operator==(const self_type &, const self_type &) { return true; } + friend bool operator==(const self_type&, const self_type&) { return true; } }; -//GODOT ADDITION -}; // namespace VHACD -// - #endif //BT_ALIGNED_ALLOCATOR diff --git a/thirdparty/vhacd/inc/btAlignedObjectArray.h b/thirdparty/vhacd/inc/btAlignedObjectArray.h index d82449e8fda..e6620adf6fa 100644 --- a/thirdparty/vhacd/inc/btAlignedObjectArray.h +++ b/thirdparty/vhacd/inc/btAlignedObjectArray.h @@ -38,383 +38,411 @@ subject to the following restrictions: #include //for placement new #endif //BT_USE_PLACEMENT_NEW -//GODOT ADDITION -namespace VHACD { -// - ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data template //template class btAlignedObjectArray { - btAlignedAllocator m_allocator; + btAlignedAllocator m_allocator; - int32_t m_size; - int32_t m_capacity; - T *m_data; - //PCK: added this line - bool m_ownsMemory; + int32_t m_size; + int32_t m_capacity; + T* m_data; + //PCK: added this line + bool m_ownsMemory; #ifdef BT_ALLOW_ARRAY_COPY_OPERATOR public: - SIMD_FORCE_INLINE btAlignedObjectArray &operator=(const btAlignedObjectArray &other) { - copyFromArray(other); - return *this; - } + SIMD_FORCE_INLINE btAlignedObjectArray& operator=(const btAlignedObjectArray& other) + { + copyFromArray(other); + return *this; + } #else //BT_ALLOW_ARRAY_COPY_OPERATOR private: - SIMD_FORCE_INLINE btAlignedObjectArray &operator=(const btAlignedObjectArray &other); + SIMD_FORCE_INLINE btAlignedObjectArray& operator=(const btAlignedObjectArray& other); #endif //BT_ALLOW_ARRAY_COPY_OPERATOR protected: - SIMD_FORCE_INLINE int32_t allocSize(int32_t size) { - return (size ? size * 2 : 1); - } - SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T *dest) const { - int32_t i; - for (i = start; i < end; ++i) + SIMD_FORCE_INLINE int32_t allocSize(int32_t size) + { + return (size ? size * 2 : 1); + } + SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const + { + int32_t i; + for (i = start; i < end; ++i) #ifdef BT_USE_PLACEMENT_NEW - new (&dest[i]) T(m_data[i]); + new (&dest[i]) T(m_data[i]); #else - dest[i] = m_data[i]; + dest[i] = m_data[i]; #endif //BT_USE_PLACEMENT_NEW - } + } - SIMD_FORCE_INLINE void init() { - //PCK: added this line - m_ownsMemory = true; - m_data = 0; - m_size = 0; - m_capacity = 0; - } - SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) { - int32_t i; - for (i = first; i < last; i++) { - m_data[i].~T(); - } - } + SIMD_FORCE_INLINE void init() + { + //PCK: added this line + m_ownsMemory = true; + m_data = 0; + m_size = 0; + m_capacity = 0; + } + SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) + { + int32_t i; + for (i = first; i < last; i++) { + m_data[i].~T(); + } + } - SIMD_FORCE_INLINE void *allocate(int32_t size) { - if (size) - return m_allocator.allocate(size); - return 0; - } + SIMD_FORCE_INLINE void* allocate(int32_t size) + { + if (size) + return m_allocator.allocate(size); + return 0; + } - SIMD_FORCE_INLINE void deallocate() { - if (m_data) { - //PCK: enclosed the deallocation in this block - if (m_ownsMemory) { - m_allocator.deallocate(m_data); - } - m_data = 0; - } - } + SIMD_FORCE_INLINE void deallocate() + { + if (m_data) { + //PCK: enclosed the deallocation in this block + if (m_ownsMemory) { + m_allocator.deallocate(m_data); + } + m_data = 0; + } + } public: - btAlignedObjectArray() { - init(); - } + btAlignedObjectArray() + { + init(); + } - ~btAlignedObjectArray() { - clear(); - } + ~btAlignedObjectArray() + { + clear(); + } - ///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead. - btAlignedObjectArray(const btAlignedObjectArray &otherArray) { - init(); + ///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead. + btAlignedObjectArray(const btAlignedObjectArray& otherArray) + { + init(); - int32_t otherSize = otherArray.size(); - resize(otherSize); - otherArray.copy(0, otherSize, m_data); - } + int32_t otherSize = otherArray.size(); + resize(otherSize); + otherArray.copy(0, otherSize, m_data); + } - /// return the number of elements in the array - SIMD_FORCE_INLINE int32_t size() const { - return m_size; - } + /// return the number of elements in the array + SIMD_FORCE_INLINE int32_t size() const + { + return m_size; + } - SIMD_FORCE_INLINE const T &at(int32_t n) const { - btAssert(n >= 0); - btAssert(n < size()); - return m_data[n]; - } + SIMD_FORCE_INLINE const T& at(int32_t n) const + { + btAssert(n >= 0); + btAssert(n < size()); + return m_data[n]; + } - SIMD_FORCE_INLINE T &at(int32_t n) { - btAssert(n >= 0); - btAssert(n < size()); - return m_data[n]; - } + SIMD_FORCE_INLINE T& at(int32_t n) + { + btAssert(n >= 0); + btAssert(n < size()); + return m_data[n]; + } - SIMD_FORCE_INLINE const T &operator[](int32_t n) const { - btAssert(n >= 0); - btAssert(n < size()); - return m_data[n]; - } + SIMD_FORCE_INLINE const T& operator[](int32_t n) const + { + btAssert(n >= 0); + btAssert(n < size()); + return m_data[n]; + } - SIMD_FORCE_INLINE T &operator[](int32_t n) { - btAssert(n >= 0); - btAssert(n < size()); - return m_data[n]; - } + SIMD_FORCE_INLINE T& operator[](int32_t n) + { + btAssert(n >= 0); + btAssert(n < size()); + return m_data[n]; + } - ///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations. - SIMD_FORCE_INLINE void clear() { - destroy(0, size()); + ///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations. + SIMD_FORCE_INLINE void clear() + { + destroy(0, size()); - deallocate(); + deallocate(); - init(); - } + init(); + } - SIMD_FORCE_INLINE void pop_back() { - btAssert(m_size > 0); - m_size--; - m_data[m_size].~T(); - } + SIMD_FORCE_INLINE void pop_back() + { + btAssert(m_size > 0); + m_size--; + m_data[m_size].~T(); + } - ///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument. - ///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations. - SIMD_FORCE_INLINE void resize(int32_t newsize, const T &fillData = T()) { - int32_t curSize = size(); + ///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument. + ///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations. + SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T()) + { + int32_t curSize = size(); - if (newsize < curSize) { - for (int32_t i = newsize; i < curSize; i++) { - m_data[i].~T(); - } - } else { - if (newsize > size()) { - reserve(newsize); - } + if (newsize < curSize) { + for (int32_t i = newsize; i < curSize; i++) { + m_data[i].~T(); + } + } + else { + if (newsize > size()) { + reserve(newsize); + } #ifdef BT_USE_PLACEMENT_NEW - for (int32_t i = curSize; i < newsize; i++) { - new (&m_data[i]) T(fillData); - } + for (int32_t i = curSize; i < newsize; i++) { + new (&m_data[i]) T(fillData); + } #endif //BT_USE_PLACEMENT_NEW - } + } - m_size = newsize; - } + m_size = newsize; + } - SIMD_FORCE_INLINE T &expandNonInitializing() { - int32_t sz = size(); - if (sz == capacity()) { - reserve(allocSize(size())); - } - m_size++; + SIMD_FORCE_INLINE T& expandNonInitializing() + { + int32_t sz = size(); + if (sz == capacity()) { + reserve(allocSize(size())); + } + m_size++; - return m_data[sz]; - } + return m_data[sz]; + } - SIMD_FORCE_INLINE T &expand(const T &fillValue = T()) { - int32_t sz = size(); - if (sz == capacity()) { - reserve(allocSize(size())); - } - m_size++; + SIMD_FORCE_INLINE T& expand(const T& fillValue = T()) + { + int32_t sz = size(); + if (sz == capacity()) { + reserve(allocSize(size())); + } + m_size++; #ifdef BT_USE_PLACEMENT_NEW - new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory) + new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory) #endif - return m_data[sz]; - } + return m_data[sz]; + } - SIMD_FORCE_INLINE void push_back(const T &_Val) { - int32_t sz = size(); - if (sz == capacity()) { - reserve(allocSize(size())); - } + SIMD_FORCE_INLINE void push_back(const T& _Val) + { + int32_t sz = size(); + if (sz == capacity()) { + reserve(allocSize(size())); + } #ifdef BT_USE_PLACEMENT_NEW - new (&m_data[m_size]) T(_Val); + new (&m_data[m_size]) T(_Val); #else - m_data[size()] = _Val; + m_data[size()] = _Val; #endif //BT_USE_PLACEMENT_NEW - m_size++; - } + m_size++; + } - /// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve() - SIMD_FORCE_INLINE int32_t capacity() const { - return m_capacity; - } + /// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve() + SIMD_FORCE_INLINE int32_t capacity() const + { + return m_capacity; + } - SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage - if (capacity() < _Count) { // not enough room, reallocate - T *s = (T *)allocate(_Count); + SIMD_FORCE_INLINE void reserve(int32_t _Count) + { // determine new minimum length of allocated storage + if (capacity() < _Count) { // not enough room, reallocate + T* s = (T*)allocate(_Count); - copy(0, size(), s); + copy(0, size(), s); - destroy(0, size()); + destroy(0, size()); - deallocate(); + deallocate(); - //PCK: added this line - m_ownsMemory = true; + //PCK: added this line + m_ownsMemory = true; - m_data = s; + m_data = s; - m_capacity = _Count; - } - } + m_capacity = _Count; + } + } - class less { - public: - bool operator()(const T &a, const T &b) { - return (a < b); - } - }; + class less { + public: + bool operator()(const T& a, const T& b) + { + return (a < b); + } + }; - template - void quickSortInternal(const L &CompareFunc, int32_t lo, int32_t hi) { - // lo is the lower index, hi is the upper index - // of the region of array a that is to be sorted - int32_t i = lo, j = hi; - T x = m_data[(lo + hi) / 2]; + template + void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi) + { + // lo is the lower index, hi is the upper index + // of the region of array a that is to be sorted + int32_t i = lo, j = hi; + T x = m_data[(lo + hi) / 2]; - // partition - do { - while (CompareFunc(m_data[i], x)) - i++; - while (CompareFunc(x, m_data[j])) - j--; - if (i <= j) { - swap(i, j); - i++; - j--; - } - } while (i <= j); + // partition + do { + while (CompareFunc(m_data[i], x)) + i++; + while (CompareFunc(x, m_data[j])) + j--; + if (i <= j) { + swap(i, j); + i++; + j--; + } + } while (i <= j); - // recursion - if (lo < j) - quickSortInternal(CompareFunc, lo, j); - if (i < hi) - quickSortInternal(CompareFunc, i, hi); - } + // recursion + if (lo < j) + quickSortInternal(CompareFunc, lo, j); + if (i < hi) + quickSortInternal(CompareFunc, i, hi); + } - template - void quickSort(const L &CompareFunc) { - //don't sort 0 or 1 elements - if (size() > 1) { - quickSortInternal(CompareFunc, 0, size() - 1); - } - } + template + void quickSort(const L& CompareFunc) + { + //don't sort 0 or 1 elements + if (size() > 1) { + quickSortInternal(CompareFunc, 0, size() - 1); + } + } - ///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/ - template - void downHeap(T *pArr, int32_t k, int32_t n, const L &CompareFunc) { - /* PRE: a[k+1..N] is a heap */ - /* POST: a[k..N] is a heap */ + ///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/ + template + void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc) + { + /* PRE: a[k+1..N] is a heap */ + /* POST: a[k..N] is a heap */ - T temp = pArr[k - 1]; - /* k has child(s) */ - while (k <= n / 2) { - int32_t child = 2 * k; + T temp = pArr[k - 1]; + /* k has child(s) */ + while (k <= n / 2) { + int32_t child = 2 * k; - if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) { - child++; - } - /* pick larger child */ - if (CompareFunc(temp, pArr[child - 1])) { - /* move child up */ - pArr[k - 1] = pArr[child - 1]; - k = child; - } else { - break; - } - } - pArr[k - 1] = temp; - } /*downHeap*/ + if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) { + child++; + } + /* pick larger child */ + if (CompareFunc(temp, pArr[child - 1])) { + /* move child up */ + pArr[k - 1] = pArr[child - 1]; + k = child; + } + else { + break; + } + } + pArr[k - 1] = temp; + } /*downHeap*/ - void swap(int32_t index0, int32_t index1) { + void swap(int32_t index0, int32_t index1) + { #ifdef BT_USE_MEMCPY - char temp[sizeof(T)]; - memcpy(temp, &m_data[index0], sizeof(T)); - memcpy(&m_data[index0], &m_data[index1], sizeof(T)); - memcpy(&m_data[index1], temp, sizeof(T)); + char temp[sizeof(T)]; + memcpy(temp, &m_data[index0], sizeof(T)); + memcpy(&m_data[index0], &m_data[index1], sizeof(T)); + memcpy(&m_data[index1], temp, sizeof(T)); #else - T temp = m_data[index0]; - m_data[index0] = m_data[index1]; - m_data[index1] = temp; + T temp = m_data[index0]; + m_data[index0] = m_data[index1]; + m_data[index1] = temp; #endif //BT_USE_PLACEMENT_NEW - } + } - template - void heapSort(const L &CompareFunc) { - /* sort a[0..N-1], N.B. 0 to N-1 */ - int32_t k; - int32_t n = m_size; - for (k = n / 2; k > 0; k--) { - downHeap(m_data, k, n, CompareFunc); - } + template + void heapSort(const L& CompareFunc) + { + /* sort a[0..N-1], N.B. 0 to N-1 */ + int32_t k; + int32_t n = m_size; + for (k = n / 2; k > 0; k--) { + downHeap(m_data, k, n, CompareFunc); + } - /* a[1..N] is now a heap */ - while (n >= 1) { - swap(0, n - 1); /* largest of a[0..n-1] */ + /* a[1..N] is now a heap */ + while (n >= 1) { + swap(0, n - 1); /* largest of a[0..n-1] */ - n = n - 1; - /* restore a[1..i-1] heap */ - downHeap(m_data, 1, n, CompareFunc); - } - } + n = n - 1; + /* restore a[1..i-1] heap */ + downHeap(m_data, 1, n, CompareFunc); + } + } - ///non-recursive binary search, assumes sorted array - int32_t findBinarySearch(const T &key) const { - int32_t first = 0; - int32_t last = size() - 1; + ///non-recursive binary search, assumes sorted array + int32_t findBinarySearch(const T& key) const + { + int32_t first = 0; + int32_t last = size() - 1; - //assume sorted array - while (first <= last) { - int32_t mid = (first + last) / 2; // compute mid point. - if (key > m_data[mid]) - first = mid + 1; // repeat search in top half. - else if (key < m_data[mid]) - last = mid - 1; // repeat search in bottom half. - else - return mid; // found it. return position ///// - } - return size(); // failed to find key - } + //assume sorted array + while (first <= last) { + int32_t mid = (first + last) / 2; // compute mid point. + if (key > m_data[mid]) + first = mid + 1; // repeat search in top half. + else if (key < m_data[mid]) + last = mid - 1; // repeat search in bottom half. + else + return mid; // found it. return position ///// + } + return size(); // failed to find key + } - int32_t findLinearSearch(const T &key) const { - int32_t index = size(); - int32_t i; + int32_t findLinearSearch(const T& key) const + { + int32_t index = size(); + int32_t i; - for (i = 0; i < size(); i++) { - if (m_data[i] == key) { - index = i; - break; - } - } - return index; - } + for (i = 0; i < size(); i++) { + if (m_data[i] == key) { + index = i; + break; + } + } + return index; + } - void remove(const T &key) { + void remove(const T& key) + { - int32_t findIndex = findLinearSearch(key); - if (findIndex < size()) { - swap(findIndex, size() - 1); - pop_back(); - } - } + int32_t findIndex = findLinearSearch(key); + if (findIndex < size()) { + swap(findIndex, size() - 1); + pop_back(); + } + } - //PCK: whole function - void initializeFromBuffer(void *buffer, int32_t size, int32_t capacity) { - clear(); - m_ownsMemory = false; - m_data = (T *)buffer; - m_size = size; - m_capacity = capacity; - } + //PCK: whole function + void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity) + { + clear(); + m_ownsMemory = false; + m_data = (T*)buffer; + m_size = size; + m_capacity = capacity; + } - void copyFromArray(const btAlignedObjectArray &otherArray) { - int32_t otherSize = otherArray.size(); - resize(otherSize); - otherArray.copy(0, otherSize, m_data); - } + void copyFromArray(const btAlignedObjectArray& otherArray) + { + int32_t otherSize = otherArray.size(); + resize(otherSize); + otherArray.copy(0, otherSize, m_data); + } }; -//GODOT ADDITION -}; // namespace VHACD -// - #endif //BT_OBJECT_ARRAY__ diff --git a/thirdparty/vhacd/inc/btConvexHullComputer.h b/thirdparty/vhacd/inc/btConvexHullComputer.h index f10a3cf86f3..3c5075c2cbb 100644 --- a/thirdparty/vhacd/inc/btConvexHullComputer.h +++ b/thirdparty/vhacd/inc/btConvexHullComputer.h @@ -18,60 +18,59 @@ subject to the following restrictions: #include "btAlignedObjectArray.h" #include "btVector3.h" -//GODOT ADDITION -namespace VHACD { -// - /// Convex hull implementation based on Preparata and Hong /// See http://code.google.com/p/bullet/issues/detail?id=275 /// Ole Kniemeyer, MAXON Computer GmbH class btConvexHullComputer { private: - btScalar compute(const void *coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp); + btScalar compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp); public: - class Edge { - private: - int32_t next; - int32_t reverse; - int32_t targetVertex; + class Edge { + private: + int32_t next; + int32_t reverse; + int32_t targetVertex; - friend class btConvexHullComputer; + friend class btConvexHullComputer; - public: - int32_t getSourceVertex() const { - return (this + reverse)->targetVertex; - } + public: + int32_t getSourceVertex() const + { + return (this + reverse)->targetVertex; + } - int32_t getTargetVertex() const { - return targetVertex; - } + int32_t getTargetVertex() const + { + return targetVertex; + } - const Edge *getNextEdgeOfVertex() const // clockwise list of all edges of a vertex - { - return this + next; - } + const Edge* getNextEdgeOfVertex() const // clockwise list of all edges of a vertex + { + return this + next; + } - const Edge *getNextEdgeOfFace() const // counter-clockwise list of all edges of a face - { - return (this + reverse)->getNextEdgeOfVertex(); - } + const Edge* getNextEdgeOfFace() const // counter-clockwise list of all edges of a face + { + return (this + reverse)->getNextEdgeOfVertex(); + } - const Edge *getReverseEdge() const { - return this + reverse; - } - }; + const Edge* getReverseEdge() const + { + return this + reverse; + } + }; - // Vertices of the output hull - btAlignedObjectArray vertices; + // Vertices of the output hull + btAlignedObjectArray vertices; - // Edges of the output hull - btAlignedObjectArray edges; + // Edges of the output hull + btAlignedObjectArray edges; - // Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons - btAlignedObjectArray faces; + // Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons + btAlignedObjectArray faces; - /* + /* Compute convex hull of "count" vertices stored in "coords". "stride" is the difference in bytes between the addresses of consecutive vertices. If "shrink" is positive, the convex hull is shrunken by that amount (each face is moved by "shrink" length units towards the center along its normal). @@ -83,18 +82,16 @@ public: The output convex hull can be found in the member variables "vertices", "edges", "faces". */ - btScalar compute(const float *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { - return compute(coords, false, stride, count, shrink, shrinkClamp); - } + btScalar compute(const float* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) + { + return compute(coords, false, stride, count, shrink, shrinkClamp); + } - // same as above, but double precision - btScalar compute(const double *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { - return compute(coords, true, stride, count, shrink, shrinkClamp); - } + // same as above, but double precision + btScalar compute(const double* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) + { + return compute(coords, true, stride, count, shrink, shrinkClamp); + } }; -//GODOT ADDITION -}; // namespace VHACD -// - #endif //BT_CONVEX_HULL_COMPUTER_H diff --git a/thirdparty/vhacd/inc/btMinMax.h b/thirdparty/vhacd/inc/btMinMax.h index a5be2586676..40b0ceb6eda 100644 --- a/thirdparty/vhacd/inc/btMinMax.h +++ b/thirdparty/vhacd/inc/btMinMax.h @@ -17,50 +17,49 @@ subject to the following restrictions: #include "btScalar.h" -//GODOT ADDITION -namespace VHACD { -// - template -SIMD_FORCE_INLINE const T &btMin(const T &a, const T &b) { - return a < b ? a : b; +SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b) +{ + return a < b ? a : b; } template -SIMD_FORCE_INLINE const T &btMax(const T &a, const T &b) { - return a > b ? a : b; +SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b) +{ + return a > b ? a : b; } template -SIMD_FORCE_INLINE const T &btClamped(const T &a, const T &lb, const T &ub) { - return a < lb ? lb : (ub < a ? ub : a); +SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub) +{ + return a < lb ? lb : (ub < a ? ub : a); } template -SIMD_FORCE_INLINE void btSetMin(T &a, const T &b) { - if (b < a) { - a = b; - } +SIMD_FORCE_INLINE void btSetMin(T& a, const T& b) +{ + if (b < a) { + a = b; + } } template -SIMD_FORCE_INLINE void btSetMax(T &a, const T &b) { - if (a < b) { - a = b; - } +SIMD_FORCE_INLINE void btSetMax(T& a, const T& b) +{ + if (a < b) { + a = b; + } } template -SIMD_FORCE_INLINE void btClamp(T &a, const T &lb, const T &ub) { - if (a < lb) { - a = lb; - } else if (ub < a) { - a = ub; - } +SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub) +{ + if (a < lb) { + a = lb; + } + else if (ub < a) { + a = ub; + } } -//GODOT ADDITION -}; // namespace VHACD -// - #endif //BT_GEN_MINMAX_H diff --git a/thirdparty/vhacd/inc/btScalar.h b/thirdparty/vhacd/inc/btScalar.h index a9f836f9f23..b814474bdff 100644 --- a/thirdparty/vhacd/inc/btScalar.h +++ b/thirdparty/vhacd/inc/btScalar.h @@ -22,24 +22,17 @@ subject to the following restrictions: #include #include -#include #include //size_t for MSVC 6.0 +#include /* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/ #define BT_BULLET_VERSION 279 -//GODOT ADDITION -namespace VHACD { -// - -inline int32_t btGetVersion() { - return BT_BULLET_VERSION; +inline int32_t btGetVersion() +{ + return BT_BULLET_VERSION; } -//GODOT ADDITION -}; // namespace VHACD -// - #if defined(DEBUG) || defined(_DEBUG) #define BT_DEBUG #endif @@ -107,12 +100,12 @@ inline int32_t btGetVersion() { #include #define printf spu_printf #define btAssert(x) \ - { \ - if (!(x)) { \ - printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \ - spu_hcmpeq(0, 0); \ - } \ - } + { \ + if (!(x)) { \ + printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \ + spu_hcmpeq(0, 0); \ + } \ + } #else #define btAssert assert #endif @@ -206,10 +199,6 @@ inline int32_t btGetVersion() { #endif //__CELLOS_LV2__ #endif -//GODOT ADDITION -namespace VHACD { -// - ///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision. #if defined(BT_USE_DOUBLE_PRECISION) typedef double btScalar; @@ -222,130 +211,96 @@ typedef float btScalar; #endif #define BT_DECLARE_ALIGNED_ALLOCATOR() \ - SIMD_FORCE_INLINE void *operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ - SIMD_FORCE_INLINE void operator delete(void *ptr) { btAlignedFree(ptr); } \ - SIMD_FORCE_INLINE void *operator new(size_t, void *ptr) { return ptr; } \ - SIMD_FORCE_INLINE void operator delete(void *, void *) {} \ - SIMD_FORCE_INLINE void *operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ - SIMD_FORCE_INLINE void operator delete[](void *ptr) { btAlignedFree(ptr); } \ - SIMD_FORCE_INLINE void *operator new[](size_t, void *ptr) { return ptr; } \ - SIMD_FORCE_INLINE void operator delete[](void *, void *) {} + SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ + SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \ + SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \ + SIMD_FORCE_INLINE void operator delete(void*, void*) {} \ + SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ + SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \ + SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \ + SIMD_FORCE_INLINE void operator delete[](void*, void*) {} #if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS) -SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) { - return sqrt(x); +SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) +{ + return sqrt(x); } -SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { - return fabs(x); +SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); } +SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); } +SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); } +SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); } +SIMD_FORCE_INLINE btScalar btAcos(btScalar x) +{ + if (x < btScalar(-1)) + x = btScalar(-1); + if (x > btScalar(1)) + x = btScalar(1); + return acos(x); } -SIMD_FORCE_INLINE btScalar btCos(btScalar x) { - return cos(x); -} -SIMD_FORCE_INLINE btScalar btSin(btScalar x) { - return sin(x); -} -SIMD_FORCE_INLINE btScalar btTan(btScalar x) { - return tan(x); -} -SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { - if (x < btScalar(-1)) - x = btScalar(-1); - if (x > btScalar(1)) - x = btScalar(1); - return acos(x); -} -SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { - if (x < btScalar(-1)) - x = btScalar(-1); - if (x > btScalar(1)) - x = btScalar(1); - return asin(x); -} -SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { - return atan(x); -} -SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { - return atan2(x, y); -} -SIMD_FORCE_INLINE btScalar btExp(btScalar x) { - return exp(x); -} -SIMD_FORCE_INLINE btScalar btLog(btScalar x) { - return log(x); -} -SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { - return pow(x, y); -} -SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { - return fmod(x, y); +SIMD_FORCE_INLINE btScalar btAsin(btScalar x) +{ + if (x < btScalar(-1)) + x = btScalar(-1); + if (x > btScalar(1)) + x = btScalar(1); + return asin(x); } +SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); } +SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); } +SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); } +SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); } +SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); } +SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); } #else -SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) { +SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) +{ #ifdef USE_APPROXIMATION - double x, z, tempf; - unsigned long *tfptr = ((unsigned long *)&tempf) + 1; + double x, z, tempf; + unsigned long* tfptr = ((unsigned long*)&tempf) + 1; - tempf = y; - *tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */ - x = tempf; - z = y * btScalar(0.5); - x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */ - x = (btScalar(1.5) * x) - (x * x) * (x * z); - x = (btScalar(1.5) * x) - (x * x) * (x * z); - x = (btScalar(1.5) * x) - (x * x) * (x * z); - x = (btScalar(1.5) * x) - (x * x) * (x * z); - return x * y; + tempf = y; + *tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */ + x = tempf; + z = y * btScalar(0.5); + x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */ + x = (btScalar(1.5) * x) - (x * x) * (x * z); + x = (btScalar(1.5) * x) - (x * x) * (x * z); + x = (btScalar(1.5) * x) - (x * x) * (x * z); + x = (btScalar(1.5) * x) - (x * x) * (x * z); + return x * y; #else - return sqrtf(y); + return sqrtf(y); #endif } -SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { - return fabsf(x); +SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); } +SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); } +SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); } +SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); } +SIMD_FORCE_INLINE btScalar btAcos(btScalar x) +{ + if (x < btScalar(-1)) + x = btScalar(-1); + if (x > btScalar(1)) + x = btScalar(1); + return acosf(x); } -SIMD_FORCE_INLINE btScalar btCos(btScalar x) { - return cosf(x); -} -SIMD_FORCE_INLINE btScalar btSin(btScalar x) { - return sinf(x); -} -SIMD_FORCE_INLINE btScalar btTan(btScalar x) { - return tanf(x); -} -SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { - if (x < btScalar(-1)) - x = btScalar(-1); - if (x > btScalar(1)) - x = btScalar(1); - return acosf(x); -} -SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { - if (x < btScalar(-1)) - x = btScalar(-1); - if (x > btScalar(1)) - x = btScalar(1); - return asinf(x); -} -SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { - return atanf(x); -} -SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { - return atan2f(x, y); -} -SIMD_FORCE_INLINE btScalar btExp(btScalar x) { - return expf(x); -} -SIMD_FORCE_INLINE btScalar btLog(btScalar x) { - return logf(x); -} -SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { - return powf(x, y); -} -SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { - return fmodf(x, y); +SIMD_FORCE_INLINE btScalar btAsin(btScalar x) +{ + if (x < btScalar(-1)) + x = btScalar(-1); + if (x > btScalar(1)) + x = btScalar(1); + return asinf(x); } +SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); } +SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); } +SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); } +SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); } +SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); } +SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); } #endif @@ -366,110 +321,119 @@ SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { #define SIMD_INFINITY FLT_MAX #endif -SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) { - btScalar coeff_1 = SIMD_PI / 4.0f; - btScalar coeff_2 = 3.0f * coeff_1; - btScalar abs_y = btFabs(y); - btScalar angle; - if (x >= 0.0f) { - btScalar r = (x - abs_y) / (x + abs_y); - angle = coeff_1 - coeff_1 * r; - } else { - btScalar r = (x + abs_y) / (abs_y - x); - angle = coeff_2 - coeff_1 * r; - } - return (y < 0.0f) ? -angle : angle; +SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) +{ + btScalar coeff_1 = SIMD_PI / 4.0f; + btScalar coeff_2 = 3.0f * coeff_1; + btScalar abs_y = btFabs(y); + btScalar angle; + if (x >= 0.0f) { + btScalar r = (x - abs_y) / (x + abs_y); + angle = coeff_1 - coeff_1 * r; + } + else { + btScalar r = (x + abs_y) / (abs_y - x); + angle = coeff_2 - coeff_1 * r; + } + return (y < 0.0f) ? -angle : angle; } -SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { - return btFabs(x) < SIMD_EPSILON; +SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; } + +SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) +{ + return (((a) <= eps) && !((a) < -eps)); +} +SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps) +{ + return (!((a) <= eps)); } -SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) { - return (((a) <= eps) && !((a) < -eps)); -} -SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps) { - return (!((a) <= eps)); +SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x) +{ + return x < btScalar(0.0) ? 1 : 0; } -SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x) { - return x < btScalar(0.0) ? 1 : 0; -} - -SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { - return x * SIMD_RADS_PER_DEG; -} -SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { - return x * SIMD_DEGS_PER_RAD; -} +SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; } +SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; } #define BT_DECLARE_HANDLE(name) \ - typedef struct name##__ { \ - int32_t unused; \ - } * name + typedef struct name##__ { \ + int32_t unused; \ + } * name #ifndef btFsel -SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) { - return a >= 0 ? b : c; +SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) +{ + return a >= 0 ? b : c; } #endif #define btFsels(a, b, c) (btScalar) btFsel(a, b, c) -SIMD_FORCE_INLINE bool btMachineIsLittleEndian() { - long int i = 1; - const char *p = (const char *)&i; - if (p[0] == 1) // Lowest address contains the least significant byte - return true; - else - return false; +SIMD_FORCE_INLINE bool btMachineIsLittleEndian() +{ + long int i = 1; + const char* p = (const char*)&i; + if (p[0] == 1) // Lowest address contains the least significant byte + return true; + else + return false; } ///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360 ///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html -SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) { - // Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero - // Rely on positive value or'ed with its negative having sign bit on - // and zero value or'ed with its negative (which is still zero) having sign bit off - // Use arithmetic shift right, shifting the sign bit through all 32 bits - unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); - unsigned testEqz = ~testNz; - return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); +SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) +{ + // Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero + // Rely on positive value or'ed with its negative having sign bit on + // and zero value or'ed with its negative (which is still zero) having sign bit off + // Use arithmetic shift right, shifting the sign bit through all 32 bits + unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); + unsigned testEqz = ~testNz; + return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); } -SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero) { - unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); - unsigned testEqz = ~testNz; - return static_cast((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); +SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero) +{ + unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); + unsigned testEqz = ~testNz; + return static_cast((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); } -SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) { +SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) +{ #ifdef BT_HAVE_NATIVE_FSEL - return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero); + return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero); #else - return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero; + return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero; #endif } template -SIMD_FORCE_INLINE void btSwap(T &a, T &b) { - T tmp = a; - a = b; - b = tmp; +SIMD_FORCE_INLINE void btSwap(T& a, T& b) +{ + T tmp = a; + a = b; + b = tmp; } //PCK: endian swapping functions -SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) { - return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24)); +SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) +{ + return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24)); } -SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val) { - return static_cast(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8)); +SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val) +{ + return static_cast(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8)); } -SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val) { - return btSwapEndian((unsigned)val); +SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val) +{ + return btSwapEndian((unsigned)val); } -SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) { - return btSwapEndian((unsigned short)val); +SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) +{ + return btSwapEndian((unsigned short)val); } ///btSwapFloat uses using char pointers to swap the endianness @@ -478,88 +442,92 @@ SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) { ///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception. ///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you. ///so instead of returning a float/double, we return integer/long long integer -SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) { - uint32_t a = 0; - unsigned char *dst = (unsigned char *)&a; - unsigned char *src = (unsigned char *)&d; +SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) +{ + uint32_t a = 0; + unsigned char* dst = (unsigned char*)&a; + unsigned char* src = (unsigned char*)&d; - dst[0] = src[3]; - dst[1] = src[2]; - dst[2] = src[1]; - dst[3] = src[0]; - return a; + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; + return a; } // unswap using char pointers -SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) { - float d = 0.0f; - unsigned char *src = (unsigned char *)&a; - unsigned char *dst = (unsigned char *)&d; +SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) +{ + float d = 0.0f; + unsigned char* src = (unsigned char*)&a; + unsigned char* dst = (unsigned char*)&d; - dst[0] = src[3]; - dst[1] = src[2]; - dst[2] = src[1]; - dst[3] = src[0]; + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; - return d; + return d; } // swap using char pointers -SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char *dst) { - unsigned char *src = (unsigned char *)&d; +SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst) +{ + unsigned char* src = (unsigned char*)&d; - dst[0] = src[7]; - dst[1] = src[6]; - dst[2] = src[5]; - dst[3] = src[4]; - dst[4] = src[3]; - dst[5] = src[2]; - dst[6] = src[1]; - dst[7] = src[0]; + dst[0] = src[7]; + dst[1] = src[6]; + dst[2] = src[5]; + dst[3] = src[4]; + dst[4] = src[3]; + dst[5] = src[2]; + dst[6] = src[1]; + dst[7] = src[0]; } // unswap using char pointers -SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src) { - double d = 0.0; - unsigned char *dst = (unsigned char *)&d; +SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char* src) +{ + double d = 0.0; + unsigned char* dst = (unsigned char*)&d; - dst[0] = src[7]; - dst[1] = src[6]; - dst[2] = src[5]; - dst[3] = src[4]; - dst[4] = src[3]; - dst[5] = src[2]; - dst[6] = src[1]; - dst[7] = src[0]; + dst[0] = src[7]; + dst[1] = src[6]; + dst[2] = src[5]; + dst[3] = src[4]; + dst[4] = src[3]; + dst[5] = src[2]; + dst[6] = src[1]; + dst[7] = src[0]; - return d; + return d; } // returns normalized value in range [-SIMD_PI, SIMD_PI] -SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) { - angleInRadians = btFmod(angleInRadians, SIMD_2_PI); - if (angleInRadians < -SIMD_PI) { - return angleInRadians + SIMD_2_PI; - } else if (angleInRadians > SIMD_PI) { - return angleInRadians - SIMD_2_PI; - } else { - return angleInRadians; - } +SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) +{ + angleInRadians = btFmod(angleInRadians, SIMD_2_PI); + if (angleInRadians < -SIMD_PI) { + return angleInRadians + SIMD_2_PI; + } + else if (angleInRadians > SIMD_PI) { + return angleInRadians - SIMD_2_PI; + } + else { + return angleInRadians; + } } ///rudimentary class to provide type info struct btTypedObject { - btTypedObject(int32_t objectType) : - m_objectType(objectType) { - } - int32_t m_objectType; - inline int32_t getObjectType() const { - return m_objectType; - } + btTypedObject(int32_t objectType) + : m_objectType(objectType) + { + } + int32_t m_objectType; + inline int32_t getObjectType() const + { + return m_objectType; + } }; - -//GODOT ADDITION -}; // namespace VHACD -// - #endif //BT_SCALAR_H diff --git a/thirdparty/vhacd/inc/btVector3.h b/thirdparty/vhacd/inc/btVector3.h index 664728a419a..0f2fefbbd5a 100644 --- a/thirdparty/vhacd/inc/btVector3.h +++ b/thirdparty/vhacd/inc/btVector3.h @@ -30,387 +30,431 @@ subject to the following restrictions: * It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user * Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers */ -//GODOT ADDITION -namespace VHACD { -// - ATTRIBUTE_ALIGNED16(class) -btVector3 { +btVector3 +{ public: #if defined(__SPU__) && defined(__CELLOS_LV2__) - btScalar m_floats[4]; + btScalar m_floats[4]; public: - SIMD_FORCE_INLINE const vec_float4 &get128() const { - return *((const vec_float4 *)&m_floats[0]); - } + SIMD_FORCE_INLINE const vec_float4& get128() const + { + return *((const vec_float4*)&m_floats[0]); + } public: #else //__CELLOS_LV2__ __SPU__ #ifdef BT_USE_SSE // _WIN32 - union { - __m128 mVec128; - btScalar m_floats[4]; - }; - SIMD_FORCE_INLINE __m128 get128() const { - return mVec128; - } - SIMD_FORCE_INLINE void set128(__m128 v128) { - mVec128 = v128; - } + union { + __m128 mVec128; + btScalar m_floats[4]; + }; + SIMD_FORCE_INLINE __m128 get128() const + { + return mVec128; + } + SIMD_FORCE_INLINE void set128(__m128 v128) + { + mVec128 = v128; + } #else - btScalar m_floats[4]; + btScalar m_floats[4]; #endif #endif //__CELLOS_LV2__ __SPU__ public: - /**@brief No initialization constructor */ - SIMD_FORCE_INLINE btVector3() {} + /**@brief No initialization constructor */ + SIMD_FORCE_INLINE btVector3() {} - /**@brief Constructor from scalars + /**@brief Constructor from scalars * @param x X value * @param y Y value * @param z Z value */ - SIMD_FORCE_INLINE btVector3(const btScalar &x, const btScalar &y, const btScalar &z) { - m_floats[0] = x; - m_floats[1] = y; - m_floats[2] = z; - m_floats[3] = btScalar(0.); - } + SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z) + { + m_floats[0] = x; + m_floats[1] = y; + m_floats[2] = z; + m_floats[3] = btScalar(0.); + } - /**@brief Add a vector to this one + /**@brief Add a vector to this one * @param The vector to add to this one */ - SIMD_FORCE_INLINE btVector3 &operator+=(const btVector3 &v) { + SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v) + { - m_floats[0] += v.m_floats[0]; - m_floats[1] += v.m_floats[1]; - m_floats[2] += v.m_floats[2]; - return *this; - } + m_floats[0] += v.m_floats[0]; + m_floats[1] += v.m_floats[1]; + m_floats[2] += v.m_floats[2]; + return *this; + } - /**@brief Subtract a vector from this one + /**@brief Subtract a vector from this one * @param The vector to subtract */ - SIMD_FORCE_INLINE btVector3 &operator-=(const btVector3 &v) { - m_floats[0] -= v.m_floats[0]; - m_floats[1] -= v.m_floats[1]; - m_floats[2] -= v.m_floats[2]; - return *this; - } - /**@brief Scale the vector + SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v) + { + m_floats[0] -= v.m_floats[0]; + m_floats[1] -= v.m_floats[1]; + m_floats[2] -= v.m_floats[2]; + return *this; + } + /**@brief Scale the vector * @param s Scale factor */ - SIMD_FORCE_INLINE btVector3 &operator*=(const btScalar &s) { - m_floats[0] *= s; - m_floats[1] *= s; - m_floats[2] *= s; - return *this; - } + SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s) + { + m_floats[0] *= s; + m_floats[1] *= s; + m_floats[2] *= s; + return *this; + } - /**@brief Inversely scale the vector + /**@brief Inversely scale the vector * @param s Scale factor to divide by */ - SIMD_FORCE_INLINE btVector3 &operator/=(const btScalar &s) { - btFullAssert(s != btScalar(0.0)); - return *this *= btScalar(1.0) / s; - } + SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s) + { + btFullAssert(s != btScalar(0.0)); + return * this *= btScalar(1.0) / s; + } - /**@brief Return the dot product + /**@brief Return the dot product * @param v The other vector in the dot product */ - SIMD_FORCE_INLINE btScalar dot(const btVector3 &v) const { - return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2]; - } + SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const + { + return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2]; + } - /**@brief Return the length of the vector squared */ - SIMD_FORCE_INLINE btScalar length2() const { - return dot(*this); - } + /**@brief Return the length of the vector squared */ + SIMD_FORCE_INLINE btScalar length2() const + { + return dot(*this); + } - /**@brief Return the length of the vector */ - SIMD_FORCE_INLINE btScalar length() const { - return btSqrt(length2()); - } + /**@brief Return the length of the vector */ + SIMD_FORCE_INLINE btScalar length() const + { + return btSqrt(length2()); + } - /**@brief Return the distance squared between the ends of this and another vector + /**@brief Return the distance squared between the ends of this and another vector * This is symantically treating the vector like a point */ - SIMD_FORCE_INLINE btScalar distance2(const btVector3 &v) const; + SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const; - /**@brief Return the distance between the ends of this and another vector + /**@brief Return the distance between the ends of this and another vector * This is symantically treating the vector like a point */ - SIMD_FORCE_INLINE btScalar distance(const btVector3 &v) const; + SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const; - SIMD_FORCE_INLINE btVector3 &safeNormalize() { - btVector3 absVec = this->absolute(); - int32_t maxIndex = absVec.maxAxis(); - if (absVec[maxIndex] > 0) { - *this /= absVec[maxIndex]; - return *this /= length(); - } - setValue(1, 0, 0); - return *this; - } + SIMD_FORCE_INLINE btVector3& safeNormalize() + { + btVector3 absVec = this->absolute(); + int32_t maxIndex = absVec.maxAxis(); + if (absVec[maxIndex] > 0) { + *this /= absVec[maxIndex]; + return * this /= length(); + } + setValue(1, 0, 0); + return *this; + } - /**@brief Normalize this vector + /**@brief Normalize this vector * x^2 + y^2 + z^2 = 1 */ - SIMD_FORCE_INLINE btVector3 &normalize() { - return *this /= length(); - } + SIMD_FORCE_INLINE btVector3& normalize() + { + return * this /= length(); + } - /**@brief Return a normalized version of this vector */ - SIMD_FORCE_INLINE btVector3 normalized() const; + /**@brief Return a normalized version of this vector */ + SIMD_FORCE_INLINE btVector3 normalized() const; - /**@brief Return a rotated version of this vector + /**@brief Return a rotated version of this vector * @param wAxis The axis to rotate about * @param angle The angle to rotate by */ - SIMD_FORCE_INLINE btVector3 rotate(const btVector3 &wAxis, const btScalar angle) const; + SIMD_FORCE_INLINE btVector3 rotate(const btVector3& wAxis, const btScalar angle) const; - /**@brief Return the angle between this and another vector + /**@brief Return the angle between this and another vector * @param v The other vector */ - SIMD_FORCE_INLINE btScalar angle(const btVector3 &v) const { - btScalar s = btSqrt(length2() * v.length2()); - btFullAssert(s != btScalar(0.0)); - return btAcos(dot(v) / s); - } - /**@brief Return a vector will the absolute values of each element */ - SIMD_FORCE_INLINE btVector3 absolute() const { - return btVector3( - btFabs(m_floats[0]), - btFabs(m_floats[1]), - btFabs(m_floats[2])); - } - /**@brief Return the cross product between this and another vector + SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const + { + btScalar s = btSqrt(length2() * v.length2()); + btFullAssert(s != btScalar(0.0)); + return btAcos(dot(v) / s); + } + /**@brief Return a vector will the absolute values of each element */ + SIMD_FORCE_INLINE btVector3 absolute() const + { + return btVector3( + btFabs(m_floats[0]), + btFabs(m_floats[1]), + btFabs(m_floats[2])); + } + /**@brief Return the cross product between this and another vector * @param v The other vector */ - SIMD_FORCE_INLINE btVector3 cross(const btVector3 &v) const { - return btVector3( - m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1], - m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2], - m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]); - } + SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const + { + return btVector3( + m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1], + m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2], + m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]); + } - SIMD_FORCE_INLINE btScalar triple(const btVector3 &v1, const btVector3 &v2) const { - return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]); - } + SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const + { + return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]); + } - /**@brief Return the axis with the smallest value + /**@brief Return the axis with the smallest value * Note return values are 0,1,2 for x, y, or z */ - SIMD_FORCE_INLINE int32_t minAxis() const { - return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2); - } + SIMD_FORCE_INLINE int32_t minAxis() const + { + return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2); + } - /**@brief Return the axis with the largest value + /**@brief Return the axis with the largest value * Note return values are 0,1,2 for x, y, or z */ - SIMD_FORCE_INLINE int32_t maxAxis() const { - return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0); - } + SIMD_FORCE_INLINE int32_t maxAxis() const + { + return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0); + } - SIMD_FORCE_INLINE int32_t furthestAxis() const { - return absolute().minAxis(); - } + SIMD_FORCE_INLINE int32_t furthestAxis() const + { + return absolute().minAxis(); + } - SIMD_FORCE_INLINE int32_t closestAxis() const { - return absolute().maxAxis(); - } + SIMD_FORCE_INLINE int32_t closestAxis() const + { + return absolute().maxAxis(); + } - SIMD_FORCE_INLINE void setInterpolate3(const btVector3 &v0, const btVector3 &v1, btScalar rt) { - btScalar s = btScalar(1.0) - rt; - m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0]; - m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1]; - m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2]; - //don't do the unused w component - // m_co[3] = s * v0[3] + rt * v1[3]; - } + SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt) + { + btScalar s = btScalar(1.0) - rt; + m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0]; + m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1]; + m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2]; + //don't do the unused w component + // m_co[3] = s * v0[3] + rt * v1[3]; + } - /**@brief Return the linear interpolation between this and another vector + /**@brief Return the linear interpolation between this and another vector * @param v The other vector * @param t The ration of this to v (t = 0 => return this, t=1 => return other) */ - SIMD_FORCE_INLINE btVector3 lerp(const btVector3 &v, const btScalar &t) const { - return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t, - m_floats[1] + (v.m_floats[1] - m_floats[1]) * t, - m_floats[2] + (v.m_floats[2] - m_floats[2]) * t); - } + SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const + { + return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t, + m_floats[1] + (v.m_floats[1] - m_floats[1]) * t, + m_floats[2] + (v.m_floats[2] - m_floats[2]) * t); + } - /**@brief Elementwise multiply this vector by the other + /**@brief Elementwise multiply this vector by the other * @param v The other vector */ - SIMD_FORCE_INLINE btVector3 &operator*=(const btVector3 &v) { - m_floats[0] *= v.m_floats[0]; - m_floats[1] *= v.m_floats[1]; - m_floats[2] *= v.m_floats[2]; - return *this; - } + SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v) + { + m_floats[0] *= v.m_floats[0]; + m_floats[1] *= v.m_floats[1]; + m_floats[2] *= v.m_floats[2]; + return *this; + } - /**@brief Return the x value */ - SIMD_FORCE_INLINE const btScalar &getX() const { return m_floats[0]; } - /**@brief Return the y value */ - SIMD_FORCE_INLINE const btScalar &getY() const { return m_floats[1]; } - /**@brief Return the z value */ - SIMD_FORCE_INLINE const btScalar &getZ() const { return m_floats[2]; } - /**@brief Set the x value */ - SIMD_FORCE_INLINE void setX(btScalar x) { m_floats[0] = x; }; - /**@brief Set the y value */ - SIMD_FORCE_INLINE void setY(btScalar y) { m_floats[1] = y; }; - /**@brief Set the z value */ - SIMD_FORCE_INLINE void setZ(btScalar z) { m_floats[2] = z; }; - /**@brief Set the w value */ - SIMD_FORCE_INLINE void setW(btScalar w) { m_floats[3] = w; }; - /**@brief Return the x value */ - SIMD_FORCE_INLINE const btScalar &x() const { return m_floats[0]; } - /**@brief Return the y value */ - SIMD_FORCE_INLINE const btScalar &y() const { return m_floats[1]; } - /**@brief Return the z value */ - SIMD_FORCE_INLINE const btScalar &z() const { return m_floats[2]; } - /**@brief Return the w value */ - SIMD_FORCE_INLINE const btScalar &w() const { return m_floats[3]; } + /**@brief Return the x value */ + SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; } + /**@brief Return the y value */ + SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; } + /**@brief Return the z value */ + SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; } + /**@brief Set the x value */ + SIMD_FORCE_INLINE void setX(btScalar x) { m_floats[0] = x; }; + /**@brief Set the y value */ + SIMD_FORCE_INLINE void setY(btScalar y) { m_floats[1] = y; }; + /**@brief Set the z value */ + SIMD_FORCE_INLINE void setZ(btScalar z) { m_floats[2] = z; }; + /**@brief Set the w value */ + SIMD_FORCE_INLINE void setW(btScalar w) { m_floats[3] = w; }; + /**@brief Return the x value */ + SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; } + /**@brief Return the y value */ + SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; } + /**@brief Return the z value */ + SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; } + /**@brief Return the w value */ + SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; } - //SIMD_FORCE_INLINE btScalar& operator[](int32_t i) { return (&m_floats[0])[i]; } - //SIMD_FORCE_INLINE const btScalar& operator[](int32_t i) const { return (&m_floats[0])[i]; } - ///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons. - SIMD_FORCE_INLINE operator btScalar *() { return &m_floats[0]; } - SIMD_FORCE_INLINE operator const btScalar *() const { return &m_floats[0]; } + //SIMD_FORCE_INLINE btScalar& operator[](int32_t i) { return (&m_floats[0])[i]; } + //SIMD_FORCE_INLINE const btScalar& operator[](int32_t i) const { return (&m_floats[0])[i]; } + ///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons. + SIMD_FORCE_INLINE operator btScalar*() { return &m_floats[0]; } + SIMD_FORCE_INLINE operator const btScalar*() const { return &m_floats[0]; } - SIMD_FORCE_INLINE bool operator==(const btVector3 &other) const { - return ((m_floats[3] == other.m_floats[3]) && (m_floats[2] == other.m_floats[2]) && (m_floats[1] == other.m_floats[1]) && (m_floats[0] == other.m_floats[0])); - } + SIMD_FORCE_INLINE bool operator==(const btVector3& other) const + { + return ((m_floats[3] == other.m_floats[3]) && (m_floats[2] == other.m_floats[2]) && (m_floats[1] == other.m_floats[1]) && (m_floats[0] == other.m_floats[0])); + } - SIMD_FORCE_INLINE bool operator!=(const btVector3 &other) const { - return !(*this == other); - } + SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const + { + return !(*this == other); + } - /**@brief Set each element to the max of the current values and the values of another btVector3 + /**@brief Set each element to the max of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ - SIMD_FORCE_INLINE void setMax(const btVector3 &other) { - btSetMax(m_floats[0], other.m_floats[0]); - btSetMax(m_floats[1], other.m_floats[1]); - btSetMax(m_floats[2], other.m_floats[2]); - btSetMax(m_floats[3], other.w()); - } - /**@brief Set each element to the min of the current values and the values of another btVector3 + SIMD_FORCE_INLINE void setMax(const btVector3& other) + { + btSetMax(m_floats[0], other.m_floats[0]); + btSetMax(m_floats[1], other.m_floats[1]); + btSetMax(m_floats[2], other.m_floats[2]); + btSetMax(m_floats[3], other.w()); + } + /**@brief Set each element to the min of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ - SIMD_FORCE_INLINE void setMin(const btVector3 &other) { - btSetMin(m_floats[0], other.m_floats[0]); - btSetMin(m_floats[1], other.m_floats[1]); - btSetMin(m_floats[2], other.m_floats[2]); - btSetMin(m_floats[3], other.w()); - } + SIMD_FORCE_INLINE void setMin(const btVector3& other) + { + btSetMin(m_floats[0], other.m_floats[0]); + btSetMin(m_floats[1], other.m_floats[1]); + btSetMin(m_floats[2], other.m_floats[2]); + btSetMin(m_floats[3], other.w()); + } - SIMD_FORCE_INLINE void setValue(const btScalar &x, const btScalar &y, const btScalar &z) { - m_floats[0] = x; - m_floats[1] = y; - m_floats[2] = z; - m_floats[3] = btScalar(0.); - } + SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z) + { + m_floats[0] = x; + m_floats[1] = y; + m_floats[2] = z; + m_floats[3] = btScalar(0.); + } - void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const { - v0->setValue(0., -z(), y()); - v1->setValue(z(), 0., -x()); - v2->setValue(-y(), x(), 0.); - } + void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const + { + v0->setValue(0., -z(), y()); + v1->setValue(z(), 0., -x()); + v2->setValue(-y(), x(), 0.); + } - void setZero() { - setValue(btScalar(0.), btScalar(0.), btScalar(0.)); - } + void setZero() + { + setValue(btScalar(0.), btScalar(0.), btScalar(0.)); + } - SIMD_FORCE_INLINE bool isZero() const { - return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0); - } + SIMD_FORCE_INLINE bool isZero() const + { + return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0); + } - SIMD_FORCE_INLINE bool fuzzyZero() const { - return length2() < SIMD_EPSILON; - } + SIMD_FORCE_INLINE bool fuzzyZero() const + { + return length2() < SIMD_EPSILON; + } - SIMD_FORCE_INLINE void serialize(struct btVector3Data & dataOut) const; + SIMD_FORCE_INLINE void serialize(struct btVector3Data & dataOut) const; - SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data &dataIn); + SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn); - SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData & dataOut) const; + SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData & dataOut) const; - SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData &dataIn); + SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn); - SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData & dataOut) const; + SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData & dataOut) const; - SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData &dataIn); + SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn); }; /**@brief Return the sum of two vectors (Point symantics)*/ SIMD_FORCE_INLINE btVector3 -operator+(const btVector3 &v1, const btVector3 &v2) { - return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]); +operator+(const btVector3& v1, const btVector3& v2) +{ + return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]); } /**@brief Return the elementwise product of two vectors */ SIMD_FORCE_INLINE btVector3 -operator*(const btVector3 &v1, const btVector3 &v2) { - return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]); +operator*(const btVector3& v1, const btVector3& v2) +{ + return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]); } /**@brief Return the difference between two vectors */ SIMD_FORCE_INLINE btVector3 -operator-(const btVector3 &v1, const btVector3 &v2) { - return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]); +operator-(const btVector3& v1, const btVector3& v2) +{ + return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]); } /**@brief Return the negative of the vector */ SIMD_FORCE_INLINE btVector3 -operator-(const btVector3 &v) { - return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]); +operator-(const btVector3& v) +{ + return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 -operator*(const btVector3 &v, const btScalar &s) { - return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s); +operator*(const btVector3& v, const btScalar& s) +{ + return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 -operator*(const btScalar &s, const btVector3 &v) { - return v * s; +operator*(const btScalar& s, const btVector3& v) +{ + return v * s; } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 -operator/(const btVector3 &v, const btScalar &s) { - btFullAssert(s != btScalar(0.0)); - return v * (btScalar(1.0) / s); +operator/(const btVector3& v, const btScalar& s) +{ + btFullAssert(s != btScalar(0.0)); + return v * (btScalar(1.0) / s); } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 -operator/(const btVector3 &v1, const btVector3 &v2) { - return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]); +operator/(const btVector3& v1, const btVector3& v2) +{ + return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]); } /**@brief Return the dot product between two vectors */ SIMD_FORCE_INLINE btScalar -btDot(const btVector3 &v1, const btVector3 &v2) { - return v1.dot(v2); +btDot(const btVector3& v1, const btVector3& v2) +{ + return v1.dot(v2); } /**@brief Return the distance squared between two vectors */ SIMD_FORCE_INLINE btScalar -btDistance2(const btVector3 &v1, const btVector3 &v2) { - return v1.distance2(v2); +btDistance2(const btVector3& v1, const btVector3& v2) +{ + return v1.distance2(v2); } /**@brief Return the distance between two vectors */ SIMD_FORCE_INLINE btScalar -btDistance(const btVector3 &v1, const btVector3 &v2) { - return v1.distance(v2); +btDistance(const btVector3& v1, const btVector3& v2) +{ + return v1.distance(v2); } /**@brief Return the angle between two vectors */ SIMD_FORCE_INLINE btScalar -btAngle(const btVector3 &v1, const btVector3 &v2) { - return v1.angle(v2); +btAngle(const btVector3& v1, const btVector3& v2) +{ + return v1.angle(v2); } /**@brief Return the cross product of two vectors */ SIMD_FORCE_INLINE btVector3 -btCross(const btVector3 &v1, const btVector3 &v2) { - return v1.cross(v2); +btCross(const btVector3& v1, const btVector3& v2) +{ + return v1.cross(v2); } SIMD_FORCE_INLINE btScalar -btTriple(const btVector3 &v1, const btVector3 &v2, const btVector3 &v3) { - return v1.triple(v2, v3); +btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3) +{ + return v1.triple(v2, v3); } /**@brief Return the linear interpolation between two vectors @@ -418,236 +462,254 @@ btTriple(const btVector3 &v1, const btVector3 &v2, const btVector3 &v3) { * @param v2 The other vector * @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */ SIMD_FORCE_INLINE btVector3 -lerp(const btVector3 &v1, const btVector3 &v2, const btScalar &t) { - return v1.lerp(v2, t); +lerp(const btVector3& v1, const btVector3& v2, const btScalar& t) +{ + return v1.lerp(v2, t); } -SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3 &v) const { - return (v - *this).length2(); +SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const +{ + return (v - *this).length2(); } -SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3 &v) const { - return (v - *this).length(); +SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const +{ + return (v - *this).length(); } -SIMD_FORCE_INLINE btVector3 btVector3::normalized() const { - return *this / length(); +SIMD_FORCE_INLINE btVector3 btVector3::normalized() const +{ + return *this / length(); } -SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3 &wAxis, const btScalar angle) const { - // wAxis must be a unit lenght vector +SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar angle) const +{ + // wAxis must be a unit lenght vector - btVector3 o = wAxis * wAxis.dot(*this); - btVector3 x = *this - o; - btVector3 y; + btVector3 o = wAxis * wAxis.dot(*this); + btVector3 x = *this - o; + btVector3 y; - y = wAxis.cross(*this); + y = wAxis.cross(*this); - return (o + x * btCos(angle) + y * btSin(angle)); + return (o + x * btCos(angle) + y * btSin(angle)); } class btVector4 : public btVector3 { public: - SIMD_FORCE_INLINE btVector4() {} + SIMD_FORCE_INLINE btVector4() {} - SIMD_FORCE_INLINE btVector4(const btScalar &x, const btScalar &y, const btScalar &z, const btScalar &w) : - btVector3(x, y, z) { - m_floats[3] = w; - } + SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) + : btVector3(x, y, z) + { + m_floats[3] = w; + } - SIMD_FORCE_INLINE btVector4 absolute4() const { - return btVector4( - btFabs(m_floats[0]), - btFabs(m_floats[1]), - btFabs(m_floats[2]), - btFabs(m_floats[3])); - } + SIMD_FORCE_INLINE btVector4 absolute4() const + { + return btVector4( + btFabs(m_floats[0]), + btFabs(m_floats[1]), + btFabs(m_floats[2]), + btFabs(m_floats[3])); + } - btScalar getW() const { return m_floats[3]; } + btScalar getW() const { return m_floats[3]; } - SIMD_FORCE_INLINE int32_t maxAxis4() const { - int32_t maxIndex = -1; - btScalar maxVal = btScalar(-BT_LARGE_FLOAT); - if (m_floats[0] > maxVal) { - maxIndex = 0; - maxVal = m_floats[0]; - } - if (m_floats[1] > maxVal) { - maxIndex = 1; - maxVal = m_floats[1]; - } - if (m_floats[2] > maxVal) { - maxIndex = 2; - maxVal = m_floats[2]; - } - if (m_floats[3] > maxVal) { - maxIndex = 3; - } - return maxIndex; - } + SIMD_FORCE_INLINE int32_t maxAxis4() const + { + int32_t maxIndex = -1; + btScalar maxVal = btScalar(-BT_LARGE_FLOAT); + if (m_floats[0] > maxVal) { + maxIndex = 0; + maxVal = m_floats[0]; + } + if (m_floats[1] > maxVal) { + maxIndex = 1; + maxVal = m_floats[1]; + } + if (m_floats[2] > maxVal) { + maxIndex = 2; + maxVal = m_floats[2]; + } + if (m_floats[3] > maxVal) { + maxIndex = 3; + } + return maxIndex; + } - SIMD_FORCE_INLINE int32_t minAxis4() const { - int32_t minIndex = -1; - btScalar minVal = btScalar(BT_LARGE_FLOAT); - if (m_floats[0] < minVal) { - minIndex = 0; - minVal = m_floats[0]; - } - if (m_floats[1] < minVal) { - minIndex = 1; - minVal = m_floats[1]; - } - if (m_floats[2] < minVal) { - minIndex = 2; - minVal = m_floats[2]; - } - if (m_floats[3] < minVal) { - minIndex = 3; - } + SIMD_FORCE_INLINE int32_t minAxis4() const + { + int32_t minIndex = -1; + btScalar minVal = btScalar(BT_LARGE_FLOAT); + if (m_floats[0] < minVal) { + minIndex = 0; + minVal = m_floats[0]; + } + if (m_floats[1] < minVal) { + minIndex = 1; + minVal = m_floats[1]; + } + if (m_floats[2] < minVal) { + minIndex = 2; + minVal = m_floats[2]; + } + if (m_floats[3] < minVal) { + minIndex = 3; + } - return minIndex; - } + return minIndex; + } - SIMD_FORCE_INLINE int32_t closestAxis4() const { - return absolute4().maxAxis4(); - } + SIMD_FORCE_INLINE int32_t closestAxis4() const + { + return absolute4().maxAxis4(); + } - /**@brief Set x,y,z and zero w + /**@brief Set x,y,z and zero w * @param x Value of x * @param y Value of y * @param z Value of z */ - /* void getValue(btScalar *m) const + /* void getValue(btScalar *m) const { m[0] = m_floats[0]; m[1] = m_floats[1]; m[2] =m_floats[2]; } */ - /**@brief Set the values + /**@brief Set the values * @param x Value of x * @param y Value of y * @param z Value of z * @param w Value of w */ - SIMD_FORCE_INLINE void setValue(const btScalar &x, const btScalar &y, const btScalar &z, const btScalar &w) { - m_floats[0] = x; - m_floats[1] = y; - m_floats[2] = z; - m_floats[3] = w; - } + SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) + { + m_floats[0] = x; + m_floats[1] = y; + m_floats[2] = z; + m_floats[3] = w; + } }; ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization -SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar &sourceVal, btScalar &destVal) { +SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal) +{ #ifdef BT_USE_DOUBLE_PRECISION - unsigned char *dest = (unsigned char *)&destVal; - unsigned char *src = (unsigned char *)&sourceVal; - dest[0] = src[7]; - dest[1] = src[6]; - dest[2] = src[5]; - dest[3] = src[4]; - dest[4] = src[3]; - dest[5] = src[2]; - dest[6] = src[1]; - dest[7] = src[0]; + unsigned char* dest = (unsigned char*)&destVal; + unsigned char* src = (unsigned char*)&sourceVal; + dest[0] = src[7]; + dest[1] = src[6]; + dest[2] = src[5]; + dest[3] = src[4]; + dest[4] = src[3]; + dest[5] = src[2]; + dest[6] = src[1]; + dest[7] = src[0]; #else - unsigned char *dest = (unsigned char *)&destVal; - unsigned char *src = (unsigned char *)&sourceVal; - dest[0] = src[3]; - dest[1] = src[2]; - dest[2] = src[1]; - dest[3] = src[0]; + unsigned char* dest = (unsigned char*)&destVal; + unsigned char* src = (unsigned char*)&sourceVal; + dest[0] = src[3]; + dest[1] = src[2]; + dest[2] = src[1]; + dest[3] = src[0]; #endif //BT_USE_DOUBLE_PRECISION } ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization -SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3 &sourceVec, btVector3 &destVec) { - for (int32_t i = 0; i < 4; i++) { - btSwapScalarEndian(sourceVec[i], destVec[i]); - } +SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec) +{ + for (int32_t i = 0; i < 4; i++) { + btSwapScalarEndian(sourceVec[i], destVec[i]); + } } ///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization -SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3 &vector) { +SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector) +{ - btVector3 swappedVec; - for (int32_t i = 0; i < 4; i++) { - btSwapScalarEndian(vector[i], swappedVec[i]); - } - vector = swappedVec; + btVector3 swappedVec; + for (int32_t i = 0; i < 4; i++) { + btSwapScalarEndian(vector[i], swappedVec[i]); + } + vector = swappedVec; } template -SIMD_FORCE_INLINE void btPlaneSpace1(const T &n, T &p, T &q) { - if (btFabs(n[2]) > SIMDSQRT12) { - // choose p in y-z plane - btScalar a = n[1] * n[1] + n[2] * n[2]; - btScalar k = btRecipSqrt(a); - p[0] = 0; - p[1] = -n[2] * k; - p[2] = n[1] * k; - // set q = n x p - q[0] = a * k; - q[1] = -n[0] * p[2]; - q[2] = n[0] * p[1]; - } else { - // choose p in x-y plane - btScalar a = n[0] * n[0] + n[1] * n[1]; - btScalar k = btRecipSqrt(a); - p[0] = -n[1] * k; - p[1] = n[0] * k; - p[2] = 0; - // set q = n x p - q[0] = -n[2] * p[1]; - q[1] = n[2] * p[0]; - q[2] = a * k; - } +SIMD_FORCE_INLINE void btPlaneSpace1(const T& n, T& p, T& q) +{ + if (btFabs(n[2]) > SIMDSQRT12) { + // choose p in y-z plane + btScalar a = n[1] * n[1] + n[2] * n[2]; + btScalar k = btRecipSqrt(a); + p[0] = 0; + p[1] = -n[2] * k; + p[2] = n[1] * k; + // set q = n x p + q[0] = a * k; + q[1] = -n[0] * p[2]; + q[2] = n[0] * p[1]; + } + else { + // choose p in x-y plane + btScalar a = n[0] * n[0] + n[1] * n[1]; + btScalar k = btRecipSqrt(a); + p[0] = -n[1] * k; + p[1] = n[0] * k; + p[2] = 0; + // set q = n x p + q[0] = -n[2] * p[1]; + q[1] = n[2] * p[0]; + q[2] = a * k; + } } struct btVector3FloatData { - float m_floats[4]; + float m_floats[4]; }; struct btVector3DoubleData { - double m_floats[4]; + double m_floats[4]; }; -SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData &dataOut) const { - ///could also do a memcpy, check if it is worth it - for (int32_t i = 0; i < 4; i++) - dataOut.m_floats[i] = float(m_floats[i]); +SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const +{ + ///could also do a memcpy, check if it is worth it + for (int32_t i = 0; i < 4; i++) + dataOut.m_floats[i] = float(m_floats[i]); } -SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData &dataIn) { - for (int32_t i = 0; i < 4; i++) - m_floats[i] = btScalar(dataIn.m_floats[i]); +SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn) +{ + for (int32_t i = 0; i < 4; i++) + m_floats[i] = btScalar(dataIn.m_floats[i]); } -SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData &dataOut) const { - ///could also do a memcpy, check if it is worth it - for (int32_t i = 0; i < 4; i++) - dataOut.m_floats[i] = double(m_floats[i]); +SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const +{ + ///could also do a memcpy, check if it is worth it + for (int32_t i = 0; i < 4; i++) + dataOut.m_floats[i] = double(m_floats[i]); } -SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData &dataIn) { - for (int32_t i = 0; i < 4; i++) - m_floats[i] = btScalar(dataIn.m_floats[i]); +SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn) +{ + for (int32_t i = 0; i < 4; i++) + m_floats[i] = btScalar(dataIn.m_floats[i]); } -SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data &dataOut) const { - ///could also do a memcpy, check if it is worth it - for (int32_t i = 0; i < 4; i++) - dataOut.m_floats[i] = m_floats[i]; +SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const +{ + ///could also do a memcpy, check if it is worth it + for (int32_t i = 0; i < 4; i++) + dataOut.m_floats[i] = m_floats[i]; } -SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data &dataIn) { - for (int32_t i = 0; i < 4; i++) - m_floats[i] = dataIn.m_floats[i]; +SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn) +{ + for (int32_t i = 0; i < 4; i++) + m_floats[i] = dataIn.m_floats[i]; } -//GODOT ADDITION -}; // namespace VHACD -// - #endif //BT_VECTOR3_H diff --git a/thirdparty/vhacd/src/btAlignedAllocator.cpp b/thirdparty/vhacd/src/btAlignedAllocator.cpp index e137032463b..11d594f6c95 100644 --- a/thirdparty/vhacd/src/btAlignedAllocator.cpp +++ b/thirdparty/vhacd/src/btAlignedAllocator.cpp @@ -15,157 +15,166 @@ subject to the following restrictions: #include "btAlignedAllocator.h" -//GODOT ADDITION -namespace VHACD { -// - #ifdef _MSC_VER -#pragma warning(disable : 4311 4302) +#pragma warning(disable:4311 4302) #endif int32_t gNumAlignedAllocs = 0; int32_t gNumAlignedFree = 0; int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks -static void *btAllocDefault(size_t size) { - return malloc(size); +static void* btAllocDefault(size_t size) +{ + return malloc(size); } -static void btFreeDefault(void *ptr) { - free(ptr); +static void btFreeDefault(void* ptr) +{ + free(ptr); } -static btAllocFunc *sAllocFunc = btAllocDefault; -static btFreeFunc *sFreeFunc = btFreeDefault; +static btAllocFunc* sAllocFunc = btAllocDefault; +static btFreeFunc* sFreeFunc = btFreeDefault; #if defined(BT_HAS_ALIGNED_ALLOCATOR) #include -static void *btAlignedAllocDefault(size_t size, int32_t alignment) { - return _aligned_malloc(size, (size_t)alignment); +static void* btAlignedAllocDefault(size_t size, int32_t alignment) +{ + return _aligned_malloc(size, (size_t)alignment); } -static void btAlignedFreeDefault(void *ptr) { - _aligned_free(ptr); +static void btAlignedFreeDefault(void* ptr) +{ + _aligned_free(ptr); } #elif defined(__CELLOS_LV2__) #include -static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) { - return memalign(alignment, size); +static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) +{ + return memalign(alignment, size); } -static inline void btAlignedFreeDefault(void *ptr) { - free(ptr); +static inline void btAlignedFreeDefault(void* ptr) +{ + free(ptr); } #else -static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) { - void *ret; - char *real; - unsigned long offset; +static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) +{ + void* ret; + char* real; + unsigned long offset; - real = (char *)sAllocFunc(size + sizeof(void *) + (alignment - 1)); - if (real) { - offset = (alignment - (unsigned long)(real + sizeof(void *))) & (alignment - 1); - ret = (void *)((real + sizeof(void *)) + offset); - *((void **)(ret)-1) = (void *)(real); - } else { - ret = (void *)(real); - } - return (ret); + real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1)); + if (real) { + offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1); + ret = (void*)((real + sizeof(void*)) + offset); + *((void**)(ret)-1) = (void*)(real); + } + else { + ret = (void*)(real); + } + return (ret); } -static inline void btAlignedFreeDefault(void *ptr) { - void *real; +static inline void btAlignedFreeDefault(void* ptr) +{ + void* real; - if (ptr) { - real = *((void **)(ptr)-1); - sFreeFunc(real); - } + if (ptr) { + real = *((void**)(ptr)-1); + sFreeFunc(real); + } } #endif -static btAlignedAllocFunc *sAlignedAllocFunc = btAlignedAllocDefault; -static btAlignedFreeFunc *sAlignedFreeFunc = btAlignedFreeDefault; +static btAlignedAllocFunc* sAlignedAllocFunc = btAlignedAllocDefault; +static btAlignedFreeFunc* sAlignedFreeFunc = btAlignedFreeDefault; -void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc) { - sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault; - sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault; +void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc) +{ + sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault; + sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault; } -void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc) { - sAllocFunc = allocFunc ? allocFunc : btAllocDefault; - sFreeFunc = freeFunc ? freeFunc : btFreeDefault; +void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc) +{ + sAllocFunc = allocFunc ? allocFunc : btAllocDefault; + sFreeFunc = freeFunc ? freeFunc : btFreeDefault; } #ifdef BT_DEBUG_MEMORY_ALLOCATIONS //this generic allocator provides the total allocated number of bytes #include -void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char *filename) { - void *ret; - char *real; - unsigned long offset; +void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename) +{ + void* ret; + char* real; + unsigned long offset; - gTotalBytesAlignedAllocs += size; - gNumAlignedAllocs++; + gTotalBytesAlignedAllocs += size; + gNumAlignedAllocs++; - real = (char *)sAllocFunc(size + 2 * sizeof(void *) + (alignment - 1)); - if (real) { - offset = (alignment - (unsigned long)(real + 2 * sizeof(void *))) & (alignment - 1); - ret = (void *)((real + 2 * sizeof(void *)) + offset); - *((void **)(ret)-1) = (void *)(real); - *((int32_t *)(ret)-2) = size; - } else { - ret = (void *)(real); //?? - } + real = (char*)sAllocFunc(size + 2 * sizeof(void*) + (alignment - 1)); + if (real) { + offset = (alignment - (unsigned long)(real + 2 * sizeof(void*))) & (alignment - 1); + ret = (void*)((real + 2 * sizeof(void*)) + offset); + *((void**)(ret)-1) = (void*)(real); + *((int32_t*)(ret)-2) = size; + } + else { + ret = (void*)(real); //?? + } - printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size); + printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size); - int32_t *ptr = (int32_t *)ret; - *ptr = 12; - return (ret); + int32_t* ptr = (int32_t*)ret; + *ptr = 12; + return (ret); } -void btAlignedFreeInternal(void *ptr, int32_t line, char *filename) { +void btAlignedFreeInternal(void* ptr, int32_t line, char* filename) +{ - void *real; - gNumAlignedFree++; + void* real; + gNumAlignedFree++; - if (ptr) { - real = *((void **)(ptr)-1); - int32_t size = *((int32_t *)(ptr)-2); - gTotalBytesAlignedAllocs -= size; + if (ptr) { + real = *((void**)(ptr)-1); + int32_t size = *((int32_t*)(ptr)-2); + gTotalBytesAlignedAllocs -= size; - printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size); + printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size); - sFreeFunc(real); - } else { - printf("NULL ptr\n"); - } + sFreeFunc(real); + } + else { + printf("NULL ptr\n"); + } } #else //BT_DEBUG_MEMORY_ALLOCATIONS -void *btAlignedAllocInternal(size_t size, int32_t alignment) { - gNumAlignedAllocs++; - void *ptr; - ptr = sAlignedAllocFunc(size, alignment); - // printf("btAlignedAllocInternal %d, %x\n",size,ptr); - return ptr; +void* btAlignedAllocInternal(size_t size, int32_t alignment) +{ + gNumAlignedAllocs++; + void* ptr; + ptr = sAlignedAllocFunc(size, alignment); + // printf("btAlignedAllocInternal %d, %x\n",size,ptr); + return ptr; } -void btAlignedFreeInternal(void *ptr) { - if (!ptr) { - return; - } +void btAlignedFreeInternal(void* ptr) +{ + if (!ptr) { + return; + } - gNumAlignedFree++; - // printf("btAlignedFreeInternal %x\n",ptr); - sAlignedFreeFunc(ptr); + gNumAlignedFree++; + // printf("btAlignedFreeInternal %x\n",ptr); + sAlignedFreeFunc(ptr); } -//GODOT ADDITION -}; -// - #endif //BT_DEBUG_MEMORY_ALLOCATIONS diff --git a/thirdparty/vhacd/src/btConvexHullComputer.cpp b/thirdparty/vhacd/src/btConvexHullComputer.cpp index 15956fc0c40..d3d749adbe3 100644 --- a/thirdparty/vhacd/src/btConvexHullComputer.cpp +++ b/thirdparty/vhacd/src/btConvexHullComputer.cpp @@ -3,8 +3,8 @@ Copyright (c) 2011 Ole Kniemeyer, MAXON, www.maxon.net This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it freely, +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. @@ -34,7 +34,7 @@ typedef unsigned long long int32_t uint64_t; #endif #ifdef _MSC_VER -#pragma warning(disable : 4458) +#pragma warning(disable:4458) #endif //The definition of USE_X86_64_ASM is moved into the build system. You can enable it manually by commenting out the following lines @@ -49,2287 +49,2431 @@ typedef unsigned long long int32_t uint64_t; #include #endif -//GODOT ADDITION -namespace VHACD { -// - // Convex hull implementation based on Preparata and Hong // Ole Kniemeyer, MAXON Computer GmbH class btConvexHullInternal { public: - class Point64 { - public: - int64_t x; - int64_t y; - int64_t z; + class Point64 { + public: + int64_t x; + int64_t y; + int64_t z; - Point64(int64_t x, int64_t y, int64_t z) : - x(x), - y(y), - z(z) { - } + Point64(int64_t x, int64_t y, int64_t z) + : x(x) + , y(y) + , z(z) + { + } - bool isZero() { - return (x == 0) && (y == 0) && (z == 0); - } + bool isZero() + { + return (x == 0) && (y == 0) && (z == 0); + } - int64_t dot(const Point64 &b) const { - return x * b.x + y * b.y + z * b.z; - } - }; + int64_t dot(const Point64& b) const + { + return x * b.x + y * b.y + z * b.z; + } + }; - class Point32 { - public: - int32_t x; - int32_t y; - int32_t z; - int32_t index; + class Point32 { + public: + int32_t x; + int32_t y; + int32_t z; + int32_t index; - Point32() { - } + Point32() + { + } - Point32(int32_t x, int32_t y, int32_t z) : - x(x), - y(y), - z(z), - index(-1) { - } + Point32(int32_t x, int32_t y, int32_t z) + : x(x) + , y(y) + , z(z) + , index(-1) + { + } - bool operator==(const Point32 &b) const { - return (x == b.x) && (y == b.y) && (z == b.z); - } + bool operator==(const Point32& b) const + { + return (x == b.x) && (y == b.y) && (z == b.z); + } - bool operator!=(const Point32 &b) const { - return (x != b.x) || (y != b.y) || (z != b.z); - } + bool operator!=(const Point32& b) const + { + return (x != b.x) || (y != b.y) || (z != b.z); + } - bool isZero() { - return (x == 0) && (y == 0) && (z == 0); - } + bool isZero() + { + return (x == 0) && (y == 0) && (z == 0); + } - Point64 cross(const Point32 &b) const { - return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); - } + Point64 cross(const Point32& b) const + { + return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); + } - Point64 cross(const Point64 &b) const { - return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); - } + Point64 cross(const Point64& b) const + { + return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); + } - int64_t dot(const Point32 &b) const { - return x * b.x + y * b.y + z * b.z; - } + int64_t dot(const Point32& b) const + { + return x * b.x + y * b.y + z * b.z; + } - int64_t dot(const Point64 &b) const { - return x * b.x + y * b.y + z * b.z; - } + int64_t dot(const Point64& b) const + { + return x * b.x + y * b.y + z * b.z; + } - Point32 operator+(const Point32 &b) const { - return Point32(x + b.x, y + b.y, z + b.z); - } + Point32 operator+(const Point32& b) const + { + return Point32(x + b.x, y + b.y, z + b.z); + } - Point32 operator-(const Point32 &b) const { - return Point32(x - b.x, y - b.y, z - b.z); - } - }; + Point32 operator-(const Point32& b) const + { + return Point32(x - b.x, y - b.y, z - b.z); + } + }; - class Int128 { - public: - uint64_t low; - uint64_t high; + class Int128 { + public: + uint64_t low; + uint64_t high; - Int128() { - } + Int128() + { + } - Int128(uint64_t low, uint64_t high) : - low(low), - high(high) { - } + Int128(uint64_t low, uint64_t high) + : low(low) + , high(high) + { + } - Int128(uint64_t low) : - low(low), - high(0) { - } + Int128(uint64_t low) + : low(low) + , high(0) + { + } - Int128(int64_t value) : - low(value), - high((value >= 0) ? 0 : (uint64_t)-1LL) { - } + Int128(int64_t value) + : low(value) + , high((value >= 0) ? 0 : (uint64_t)-1LL) + { + } - static Int128 mul(int64_t a, int64_t b); + static Int128 mul(int64_t a, int64_t b); - static Int128 mul(uint64_t a, uint64_t b); + static Int128 mul(uint64_t a, uint64_t b); - Int128 operator-() const { - return Int128((uint64_t) - (int64_t)low, ~high + (low == 0)); - } + Int128 operator-() const + { + return Int128((uint64_t) - (int64_t)low, ~high + (low == 0)); + } - Int128 operator+(const Int128 &b) const { + Int128 operator+(const Int128& b) const + { #ifdef USE_X86_64_ASM - Int128 result; - __asm__("addq %[bl], %[rl]\n\t" - "adcq %[bh], %[rh]\n\t" - : [rl] "=r"(result.low), [rh] "=r"(result.high) - : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) - : "cc"); - return result; + Int128 result; + __asm__("addq %[bl], %[rl]\n\t" + "adcq %[bh], %[rh]\n\t" + : [rl] "=r"(result.low), [rh] "=r"(result.high) + : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) + : "cc"); + return result; #else - uint64_t lo = low + b.low; - return Int128(lo, high + b.high + (lo < low)); + uint64_t lo = low + b.low; + return Int128(lo, high + b.high + (lo < low)); #endif - } + } - Int128 operator-(const Int128 &b) const { + Int128 operator-(const Int128& b) const + { #ifdef USE_X86_64_ASM - Int128 result; - __asm__("subq %[bl], %[rl]\n\t" - "sbbq %[bh], %[rh]\n\t" - : [rl] "=r"(result.low), [rh] "=r"(result.high) - : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) - : "cc"); - return result; + Int128 result; + __asm__("subq %[bl], %[rl]\n\t" + "sbbq %[bh], %[rh]\n\t" + : [rl] "=r"(result.low), [rh] "=r"(result.high) + : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) + : "cc"); + return result; #else - return *this + -b; + return *this + -b; #endif - } + } - Int128 &operator+=(const Int128 &b) { + Int128& operator+=(const Int128& b) + { #ifdef USE_X86_64_ASM - __asm__("addq %[bl], %[rl]\n\t" - "adcq %[bh], %[rh]\n\t" - : [rl] "=r"(low), [rh] "=r"(high) - : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) - : "cc"); + __asm__("addq %[bl], %[rl]\n\t" + "adcq %[bh], %[rh]\n\t" + : [rl] "=r"(low), [rh] "=r"(high) + : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) + : "cc"); #else - uint64_t lo = low + b.low; - if (lo < low) { - ++high; - } - low = lo; - high += b.high; + uint64_t lo = low + b.low; + if (lo < low) { + ++high; + } + low = lo; + high += b.high; #endif - return *this; - } + return *this; + } - Int128 &operator++() { - if (++low == 0) { - ++high; - } - return *this; - } + Int128& operator++() + { + if (++low == 0) { + ++high; + } + return *this; + } - Int128 operator*(int64_t b) const; + Int128 operator*(int64_t b) const; - btScalar toScalar() const { - return ((int64_t)high >= 0) ? btScalar(high) * (btScalar(0x100000000LL) * btScalar(0x100000000LL)) + btScalar(low) : -(-*this).toScalar(); - } + btScalar toScalar() const + { + return ((int64_t)high >= 0) ? btScalar(high) * (btScalar(0x100000000LL) * btScalar(0x100000000LL)) + btScalar(low) + : -(-*this).toScalar(); + } - int32_t getSign() const { - return ((int64_t)high < 0) ? -1 : (high || low) ? 1 : 0; - } + int32_t getSign() const + { + return ((int64_t)high < 0) ? -1 : (high || low) ? 1 : 0; + } - bool operator<(const Int128 &b) const { - return (high < b.high) || ((high == b.high) && (low < b.low)); - } + bool operator<(const Int128& b) const + { + return (high < b.high) || ((high == b.high) && (low < b.low)); + } - int32_t ucmp(const Int128 &b) const { - if (high < b.high) { - return -1; - } - if (high > b.high) { - return 1; - } - if (low < b.low) { - return -1; - } - if (low > b.low) { - return 1; - } - return 0; - } - }; + int32_t ucmp(const Int128& b) const + { + if (high < b.high) { + return -1; + } + if (high > b.high) { + return 1; + } + if (low < b.low) { + return -1; + } + if (low > b.low) { + return 1; + } + return 0; + } + }; - class Rational64 { - private: - uint64_t m_numerator; - uint64_t m_denominator; - int32_t sign; + class Rational64 { + private: + uint64_t m_numerator; + uint64_t m_denominator; + int32_t sign; - public: - Rational64(int64_t numerator, int64_t denominator) { - if (numerator > 0) { - sign = 1; - m_numerator = (uint64_t)numerator; - } else if (numerator < 0) { - sign = -1; - m_numerator = (uint64_t)-numerator; - } else { - sign = 0; - m_numerator = 0; - } - if (denominator > 0) { - m_denominator = (uint64_t)denominator; - } else if (denominator < 0) { - sign = -sign; - m_denominator = (uint64_t)-denominator; - } else { - m_denominator = 0; - } - } + public: + Rational64(int64_t numerator, int64_t denominator) + { + if (numerator > 0) { + sign = 1; + m_numerator = (uint64_t)numerator; + } + else if (numerator < 0) { + sign = -1; + m_numerator = (uint64_t)-numerator; + } + else { + sign = 0; + m_numerator = 0; + } + if (denominator > 0) { + m_denominator = (uint64_t)denominator; + } + else if (denominator < 0) { + sign = -sign; + m_denominator = (uint64_t)-denominator; + } + else { + m_denominator = 0; + } + } - bool isNegativeInfinity() const { - return (sign < 0) && (m_denominator == 0); - } + bool isNegativeInfinity() const + { + return (sign < 0) && (m_denominator == 0); + } - bool isNaN() const { - return (sign == 0) && (m_denominator == 0); - } + bool isNaN() const + { + return (sign == 0) && (m_denominator == 0); + } - int32_t compare(const Rational64 &b) const; + int32_t compare(const Rational64& b) const; - btScalar toScalar() const { - return sign * ((m_denominator == 0) ? SIMD_INFINITY : (btScalar)m_numerator / m_denominator); - } - }; + btScalar toScalar() const + { + return sign * ((m_denominator == 0) ? SIMD_INFINITY : (btScalar)m_numerator / m_denominator); + } + }; - class Rational128 { - private: - Int128 numerator; - Int128 denominator; - int32_t sign; - bool isInt64; + class Rational128 { + private: + Int128 numerator; + Int128 denominator; + int32_t sign; + bool isInt64; - public: - Rational128(int64_t value) { - if (value > 0) { - sign = 1; - this->numerator = value; - } else if (value < 0) { - sign = -1; - this->numerator = -value; - } else { - sign = 0; - this->numerator = (uint64_t)0; - } - this->denominator = (uint64_t)1; - isInt64 = true; - } + public: + Rational128(int64_t value) + { + if (value > 0) { + sign = 1; + this->numerator = value; + } + else if (value < 0) { + sign = -1; + this->numerator = -value; + } + else { + sign = 0; + this->numerator = (uint64_t)0; + } + this->denominator = (uint64_t)1; + isInt64 = true; + } - Rational128(const Int128 &numerator, const Int128 &denominator) { - sign = numerator.getSign(); - if (sign >= 0) { - this->numerator = numerator; - } else { - this->numerator = -numerator; - } - int32_t dsign = denominator.getSign(); - if (dsign >= 0) { - this->denominator = denominator; - } else { - sign = -sign; - this->denominator = -denominator; - } - isInt64 = false; - } + Rational128(const Int128& numerator, const Int128& denominator) + { + sign = numerator.getSign(); + if (sign >= 0) { + this->numerator = numerator; + } + else { + this->numerator = -numerator; + } + int32_t dsign = denominator.getSign(); + if (dsign >= 0) { + this->denominator = denominator; + } + else { + sign = -sign; + this->denominator = -denominator; + } + isInt64 = false; + } - int32_t compare(const Rational128 &b) const; + int32_t compare(const Rational128& b) const; - int32_t compare(int64_t b) const; + int32_t compare(int64_t b) const; - btScalar toScalar() const { - return sign * ((denominator.getSign() == 0) ? SIMD_INFINITY : numerator.toScalar() / denominator.toScalar()); - } - }; + btScalar toScalar() const + { + return sign * ((denominator.getSign() == 0) ? SIMD_INFINITY : numerator.toScalar() / denominator.toScalar()); + } + }; - class PointR128 { - public: - Int128 x; - Int128 y; - Int128 z; - Int128 denominator; + class PointR128 { + public: + Int128 x; + Int128 y; + Int128 z; + Int128 denominator; - PointR128() { - } + PointR128() + { + } - PointR128(Int128 x, Int128 y, Int128 z, Int128 denominator) : - x(x), - y(y), - z(z), - denominator(denominator) { - } + PointR128(Int128 x, Int128 y, Int128 z, Int128 denominator) + : x(x) + , y(y) + , z(z) + , denominator(denominator) + { + } - btScalar xvalue() const { - return x.toScalar() / denominator.toScalar(); - } + btScalar xvalue() const + { + return x.toScalar() / denominator.toScalar(); + } - btScalar yvalue() const { - return y.toScalar() / denominator.toScalar(); - } + btScalar yvalue() const + { + return y.toScalar() / denominator.toScalar(); + } - btScalar zvalue() const { - return z.toScalar() / denominator.toScalar(); - } - }; + btScalar zvalue() const + { + return z.toScalar() / denominator.toScalar(); + } + }; - class Edge; - class Face; + class Edge; + class Face; - class Vertex { - public: - Vertex *next; - Vertex *prev; - Edge *edges; - Face *firstNearbyFace; - Face *lastNearbyFace; - PointR128 point128; - Point32 point; - int32_t copy; + class Vertex { + public: + Vertex* next; + Vertex* prev; + Edge* edges; + Face* firstNearbyFace; + Face* lastNearbyFace; + PointR128 point128; + Point32 point; + int32_t copy; - Vertex() : - next(NULL), - prev(NULL), - edges(NULL), - firstNearbyFace(NULL), - lastNearbyFace(NULL), - copy(-1) { - } + Vertex() + : next(NULL) + , prev(NULL) + , edges(NULL) + , firstNearbyFace(NULL) + , lastNearbyFace(NULL) + , copy(-1) + { + } #ifdef DEBUG_CONVEX_HULL - void print() { - printf("V%d (%d, %d, %d)", point.index, point.x, point.y, point.z); - } + void print() + { + printf("V%d (%d, %d, %d)", point.index, point.x, point.y, point.z); + } - void printGraph(); + void printGraph(); #endif - Point32 operator-(const Vertex &b) const { - return point - b.point; - } + Point32 operator-(const Vertex& b) const + { + return point - b.point; + } - Rational128 dot(const Point64 &b) const { - return (point.index >= 0) ? Rational128(point.dot(b)) : Rational128(point128.x * b.x + point128.y * b.y + point128.z * b.z, point128.denominator); - } + Rational128 dot(const Point64& b) const + { + return (point.index >= 0) ? Rational128(point.dot(b)) + : Rational128(point128.x * b.x + point128.y * b.y + point128.z * b.z, point128.denominator); + } - btScalar xvalue() const { - return (point.index >= 0) ? btScalar(point.x) : point128.xvalue(); - } + btScalar xvalue() const + { + return (point.index >= 0) ? btScalar(point.x) : point128.xvalue(); + } - btScalar yvalue() const { - return (point.index >= 0) ? btScalar(point.y) : point128.yvalue(); - } + btScalar yvalue() const + { + return (point.index >= 0) ? btScalar(point.y) : point128.yvalue(); + } - btScalar zvalue() const { - return (point.index >= 0) ? btScalar(point.z) : point128.zvalue(); - } + btScalar zvalue() const + { + return (point.index >= 0) ? btScalar(point.z) : point128.zvalue(); + } - void receiveNearbyFaces(Vertex *src) { - if (lastNearbyFace) { - lastNearbyFace->nextWithSameNearbyVertex = src->firstNearbyFace; - } else { - firstNearbyFace = src->firstNearbyFace; - } - if (src->lastNearbyFace) { - lastNearbyFace = src->lastNearbyFace; - } - for (Face *f = src->firstNearbyFace; f; f = f->nextWithSameNearbyVertex) { - btAssert(f->nearbyVertex == src); - f->nearbyVertex = this; - } - src->firstNearbyFace = NULL; - src->lastNearbyFace = NULL; - } - }; + void receiveNearbyFaces(Vertex* src) + { + if (lastNearbyFace) { + lastNearbyFace->nextWithSameNearbyVertex = src->firstNearbyFace; + } + else { + firstNearbyFace = src->firstNearbyFace; + } + if (src->lastNearbyFace) { + lastNearbyFace = src->lastNearbyFace; + } + for (Face* f = src->firstNearbyFace; f; f = f->nextWithSameNearbyVertex) { + btAssert(f->nearbyVertex == src); + f->nearbyVertex = this; + } + src->firstNearbyFace = NULL; + src->lastNearbyFace = NULL; + } + }; - class Edge { - public: - Edge *next; - Edge *prev; - Edge *reverse; - Vertex *target; - Face *face; - int32_t copy; + class Edge { + public: + Edge* next; + Edge* prev; + Edge* reverse; + Vertex* target; + Face* face; + int32_t copy; - ~Edge() { - next = NULL; - prev = NULL; - reverse = NULL; - target = NULL; - face = NULL; - } + ~Edge() + { + next = NULL; + prev = NULL; + reverse = NULL; + target = NULL; + face = NULL; + } - void link(Edge *n) { - btAssert(reverse->target == n->reverse->target); - next = n; - n->prev = this; - } + void link(Edge* n) + { + btAssert(reverse->target == n->reverse->target); + next = n; + n->prev = this; + } #ifdef DEBUG_CONVEX_HULL - void print() { - printf("E%p : %d -> %d, n=%p p=%p (0 %d\t%d\t%d) -> (%d %d %d)", this, reverse->target->point.index, target->point.index, next, prev, - reverse->target->point.x, reverse->target->point.y, reverse->target->point.z, target->point.x, target->point.y, target->point.z); - } + void print() + { + printf("E%p : %d -> %d, n=%p p=%p (0 %d\t%d\t%d) -> (%d %d %d)", this, reverse->target->point.index, target->point.index, next, prev, + reverse->target->point.x, reverse->target->point.y, reverse->target->point.z, target->point.x, target->point.y, target->point.z); + } #endif - }; + }; - class Face { - public: - Face *next; - Vertex *nearbyVertex; - Face *nextWithSameNearbyVertex; - Point32 origin; - Point32 dir0; - Point32 dir1; + class Face { + public: + Face* next; + Vertex* nearbyVertex; + Face* nextWithSameNearbyVertex; + Point32 origin; + Point32 dir0; + Point32 dir1; - Face() : - next(NULL), - nearbyVertex(NULL), - nextWithSameNearbyVertex(NULL) { - } + Face() + : next(NULL) + , nearbyVertex(NULL) + , nextWithSameNearbyVertex(NULL) + { + } - void init(Vertex *a, Vertex *b, Vertex *c) { - nearbyVertex = a; - origin = a->point; - dir0 = *b - *a; - dir1 = *c - *a; - if (a->lastNearbyFace) { - a->lastNearbyFace->nextWithSameNearbyVertex = this; - } else { - a->firstNearbyFace = this; - } - a->lastNearbyFace = this; - } + void init(Vertex* a, Vertex* b, Vertex* c) + { + nearbyVertex = a; + origin = a->point; + dir0 = *b - *a; + dir1 = *c - *a; + if (a->lastNearbyFace) { + a->lastNearbyFace->nextWithSameNearbyVertex = this; + } + else { + a->firstNearbyFace = this; + } + a->lastNearbyFace = this; + } - Point64 getNormal() { - return dir0.cross(dir1); - } - }; + Point64 getNormal() + { + return dir0.cross(dir1); + } + }; - template - class DMul { - private: - static uint32_t high(uint64_t value) { - return (uint32_t)(value >> 32); - } + template + class DMul { + private: + static uint32_t high(uint64_t value) + { + return (uint32_t)(value >> 32); + } - static uint32_t low(uint64_t value) { - return (uint32_t)value; - } + static uint32_t low(uint64_t value) + { + return (uint32_t)value; + } - static uint64_t mul(uint32_t a, uint32_t b) { - return (uint64_t)a * (uint64_t)b; - } + static uint64_t mul(uint32_t a, uint32_t b) + { + return (uint64_t)a * (uint64_t)b; + } - static void shlHalf(uint64_t &value) { - value <<= 32; - } + static void shlHalf(uint64_t& value) + { + value <<= 32; + } - static uint64_t high(Int128 value) { - return value.high; - } + static uint64_t high(Int128 value) + { + return value.high; + } - static uint64_t low(Int128 value) { - return value.low; - } + static uint64_t low(Int128 value) + { + return value.low; + } - static Int128 mul(uint64_t a, uint64_t b) { - return Int128::mul(a, b); - } + static Int128 mul(uint64_t a, uint64_t b) + { + return Int128::mul(a, b); + } - static void shlHalf(Int128 &value) { - value.high = value.low; - value.low = 0; - } + static void shlHalf(Int128& value) + { + value.high = value.low; + value.low = 0; + } - public: - static void mul(UWord a, UWord b, UWord &resLow, UWord &resHigh) { - UWord p00 = mul(low(a), low(b)); - UWord p01 = mul(low(a), high(b)); - UWord p10 = mul(high(a), low(b)); - UWord p11 = mul(high(a), high(b)); - UWord p0110 = UWord(low(p01)) + UWord(low(p10)); - p11 += high(p01); - p11 += high(p10); - p11 += high(p0110); - shlHalf(p0110); - p00 += p0110; - if (p00 < p0110) { - ++p11; - } - resLow = p00; - resHigh = p11; - } - }; + public: + static void mul(UWord a, UWord b, UWord& resLow, UWord& resHigh) + { + UWord p00 = mul(low(a), low(b)); + UWord p01 = mul(low(a), high(b)); + UWord p10 = mul(high(a), low(b)); + UWord p11 = mul(high(a), high(b)); + UWord p0110 = UWord(low(p01)) + UWord(low(p10)); + p11 += high(p01); + p11 += high(p10); + p11 += high(p0110); + shlHalf(p0110); + p00 += p0110; + if (p00 < p0110) { + ++p11; + } + resLow = p00; + resHigh = p11; + } + }; private: - class IntermediateHull { - public: - Vertex *minXy; - Vertex *maxXy; - Vertex *minYx; - Vertex *maxYx; + class IntermediateHull { + public: + Vertex* minXy; + Vertex* maxXy; + Vertex* minYx; + Vertex* maxYx; - IntermediateHull() : - minXy(NULL), - maxXy(NULL), - minYx(NULL), - maxYx(NULL) { - } + IntermediateHull() + : minXy(NULL) + , maxXy(NULL) + , minYx(NULL) + , maxYx(NULL) + { + } - void print(); - }; + void print(); + }; - enum Orientation { NONE, - CLOCKWISE, - COUNTER_CLOCKWISE }; + enum Orientation { NONE, + CLOCKWISE, + COUNTER_CLOCKWISE }; - template - class PoolArray { - private: - T *array; - int32_t size; + template + class PoolArray { + private: + T* array; + int32_t size; - public: - PoolArray *next; + public: + PoolArray* next; - PoolArray(int32_t size) : - size(size), - next(NULL) { - array = (T *)btAlignedAlloc(sizeof(T) * size, 16); - } + PoolArray(int32_t size) + : size(size) + , next(NULL) + { + array = (T*)btAlignedAlloc(sizeof(T) * size, 16); + } - ~PoolArray() { - btAlignedFree(array); - } + ~PoolArray() + { + btAlignedFree(array); + } - T *init() { - T *o = array; - for (int32_t i = 0; i < size; i++, o++) { - o->next = (i + 1 < size) ? o + 1 : NULL; - } - return array; - } - }; + T* init() + { + T* o = array; + for (int32_t i = 0; i < size; i++, o++) { + o->next = (i + 1 < size) ? o + 1 : NULL; + } + return array; + } + }; - template - class Pool { - private: - PoolArray *arrays; - PoolArray *nextArray; - T *freeObjects; - int32_t arraySize; + template + class Pool { + private: + PoolArray* arrays; + PoolArray* nextArray; + T* freeObjects; + int32_t arraySize; - public: - Pool() : - arrays(NULL), - nextArray(NULL), - freeObjects(NULL), - arraySize(256) { - } + public: + Pool() + : arrays(NULL) + , nextArray(NULL) + , freeObjects(NULL) + , arraySize(256) + { + } - ~Pool() { - while (arrays) { - PoolArray *p = arrays; - arrays = p->next; - p->~PoolArray(); - btAlignedFree(p); - } - } + ~Pool() + { + while (arrays) { + PoolArray* p = arrays; + arrays = p->next; + p->~PoolArray(); + btAlignedFree(p); + } + } - void reset() { - nextArray = arrays; - freeObjects = NULL; - } + void reset() + { + nextArray = arrays; + freeObjects = NULL; + } - void setArraySize(int32_t arraySize) { - this->arraySize = arraySize; - } + void setArraySize(int32_t arraySize) + { + this->arraySize = arraySize; + } - T *newObject() { - T *o = freeObjects; - if (!o) { - PoolArray *p = nextArray; - if (p) { - nextArray = p->next; - } else { - p = new (btAlignedAlloc(sizeof(PoolArray), 16)) PoolArray(arraySize); - p->next = arrays; - arrays = p; - } - o = p->init(); - } - freeObjects = o->next; - return new (o) T(); - }; + T* newObject() + { + T* o = freeObjects; + if (!o) { + PoolArray* p = nextArray; + if (p) { + nextArray = p->next; + } + else { + p = new (btAlignedAlloc(sizeof(PoolArray), 16)) PoolArray(arraySize); + p->next = arrays; + arrays = p; + } + o = p->init(); + } + freeObjects = o->next; + return new (o) T(); + }; - void freeObject(T *object) { - object->~T(); - object->next = freeObjects; - freeObjects = object; - } - }; + void freeObject(T* object) + { + object->~T(); + object->next = freeObjects; + freeObjects = object; + } + }; - btVector3 scaling; - btVector3 center; - Pool vertexPool; - Pool edgePool; - Pool facePool; - btAlignedObjectArray originalVertices; - int32_t mergeStamp; - int32_t minAxis; - int32_t medAxis; - int32_t maxAxis; - int32_t usedEdgePairs; - int32_t maxUsedEdgePairs; + btVector3 scaling; + btVector3 center; + Pool vertexPool; + Pool edgePool; + Pool facePool; + btAlignedObjectArray originalVertices; + int32_t mergeStamp; + int32_t minAxis; + int32_t medAxis; + int32_t maxAxis; + int32_t usedEdgePairs; + int32_t maxUsedEdgePairs; - static Orientation getOrientation(const Edge *prev, const Edge *next, const Point32 &s, const Point32 &t); - Edge *findMaxAngle(bool ccw, const Vertex *start, const Point32 &s, const Point64 &rxs, const Point64 &sxrxs, Rational64 &minCot); - void findEdgeForCoplanarFaces(Vertex *c0, Vertex *c1, Edge *&e0, Edge *&e1, Vertex *stop0, Vertex *stop1); + static Orientation getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t); + Edge* findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot); + void findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1); - Edge *newEdgePair(Vertex *from, Vertex *to); + Edge* newEdgePair(Vertex* from, Vertex* to); - void removeEdgePair(Edge *edge) { - Edge *n = edge->next; - Edge *r = edge->reverse; + void removeEdgePair(Edge* edge) + { + Edge* n = edge->next; + Edge* r = edge->reverse; - btAssert(edge->target && r->target); + btAssert(edge->target && r->target); - if (n != edge) { - n->prev = edge->prev; - edge->prev->next = n; - r->target->edges = n; - } else { - r->target->edges = NULL; - } + if (n != edge) { + n->prev = edge->prev; + edge->prev->next = n; + r->target->edges = n; + } + else { + r->target->edges = NULL; + } - n = r->next; + n = r->next; - if (n != r) { - n->prev = r->prev; - r->prev->next = n; - edge->target->edges = n; - } else { - edge->target->edges = NULL; - } + if (n != r) { + n->prev = r->prev; + r->prev->next = n; + edge->target->edges = n; + } + else { + edge->target->edges = NULL; + } - edgePool.freeObject(edge); - edgePool.freeObject(r); - usedEdgePairs--; - } + edgePool.freeObject(edge); + edgePool.freeObject(r); + usedEdgePairs--; + } - void computeInternal(int32_t start, int32_t end, IntermediateHull &result); + void computeInternal(int32_t start, int32_t end, IntermediateHull& result); - bool mergeProjection(IntermediateHull &h0, IntermediateHull &h1, Vertex *&c0, Vertex *&c1); + bool mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1); - void merge(IntermediateHull &h0, IntermediateHull &h1); + void merge(IntermediateHull& h0, IntermediateHull& h1); - btVector3 toBtVector(const Point32 &v); + btVector3 toBtVector(const Point32& v); - btVector3 getBtNormal(Face *face); + btVector3 getBtNormal(Face* face); - bool shiftFace(Face *face, btScalar amount, btAlignedObjectArray stack); + bool shiftFace(Face* face, btScalar amount, btAlignedObjectArray stack); public: - Vertex *vertexList; + Vertex* vertexList; - void compute(const void *coords, bool doubleCoords, int32_t stride, int32_t count); + void compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count); - btVector3 getCoordinates(const Vertex *v); + btVector3 getCoordinates(const Vertex* v); - btScalar shrink(btScalar amount, btScalar clampAmount); + btScalar shrink(btScalar amount, btScalar clampAmount); }; -btConvexHullInternal::Int128 btConvexHullInternal::Int128::operator*(int64_t b) const { - bool negative = (int64_t)high < 0; - Int128 a = negative ? -*this : *this; - if (b < 0) { - negative = !negative; - b = -b; - } - Int128 result = mul(a.low, (uint64_t)b); - result.high += a.high * (uint64_t)b; - return negative ? -result : result; +btConvexHullInternal::Int128 btConvexHullInternal::Int128::operator*(int64_t b) const +{ + bool negative = (int64_t)high < 0; + Int128 a = negative ? -*this : *this; + if (b < 0) { + negative = !negative; + b = -b; + } + Int128 result = mul(a.low, (uint64_t)b); + result.high += a.high * (uint64_t)b; + return negative ? -result : result; } -btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(int64_t a, int64_t b) { - Int128 result; +btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(int64_t a, int64_t b) +{ + Int128 result; #ifdef USE_X86_64_ASM - __asm__("imulq %[b]" - : "=a"(result.low), "=d"(result.high) - : "0"(a), [b] "r"(b) - : "cc"); - return result; + __asm__("imulq %[b]" + : "=a"(result.low), "=d"(result.high) + : "0"(a), [b] "r"(b) + : "cc"); + return result; #else - bool negative = a < 0; - if (negative) { - a = -a; - } - if (b < 0) { - negative = !negative; - b = -b; - } - DMul::mul((uint64_t)a, (uint64_t)b, result.low, result.high); - return negative ? -result : result; + bool negative = a < 0; + if (negative) { + a = -a; + } + if (b < 0) { + negative = !negative; + b = -b; + } + DMul::mul((uint64_t)a, (uint64_t)b, result.low, result.high); + return negative ? -result : result; #endif } -btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(uint64_t a, uint64_t b) { - Int128 result; +btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(uint64_t a, uint64_t b) +{ + Int128 result; #ifdef USE_X86_64_ASM - __asm__("mulq %[b]" - : "=a"(result.low), "=d"(result.high) - : "0"(a), [b] "r"(b) - : "cc"); + __asm__("mulq %[b]" + : "=a"(result.low), "=d"(result.high) + : "0"(a), [b] "r"(b) + : "cc"); #else - DMul::mul(a, b, result.low, result.high); + DMul::mul(a, b, result.low, result.high); #endif - return result; + return result; } -int32_t btConvexHullInternal::Rational64::compare(const Rational64 &b) const { - if (sign != b.sign) { - return sign - b.sign; - } else if (sign == 0) { - return 0; - } +int32_t btConvexHullInternal::Rational64::compare(const Rational64& b) const +{ + if (sign != b.sign) { + return sign - b.sign; + } + else if (sign == 0) { + return 0; + } - // return (numerator * b.denominator > b.numerator * denominator) ? sign : (numerator * b.denominator < b.numerator * denominator) ? -sign : 0; +// return (numerator * b.denominator > b.numerator * denominator) ? sign : (numerator * b.denominator < b.numerator * denominator) ? -sign : 0; #ifdef USE_X86_64_ASM - int32_t result; - int64_t tmp; - int64_t dummy; - __asm__("mulq %[bn]\n\t" - "movq %%rax, %[tmp]\n\t" - "movq %%rdx, %%rbx\n\t" - "movq %[tn], %%rax\n\t" - "mulq %[bd]\n\t" - "subq %[tmp], %%rax\n\t" - "sbbq %%rbx, %%rdx\n\t" // rdx:rax contains 128-bit-difference "numerator*b.denominator - b.numerator*denominator" - "setnsb %%bh\n\t" // bh=1 if difference is non-negative, bh=0 otherwise - "orq %%rdx, %%rax\n\t" - "setnzb %%bl\n\t" // bl=1 if difference if non-zero, bl=0 if it is zero - "decb %%bh\n\t" // now bx=0x0000 if difference is zero, 0xff01 if it is negative, 0x0001 if it is positive (i.e., same sign as difference) - "shll $16, %%ebx\n\t" // ebx has same sign as difference - : "=&b"(result), [tmp] "=&r"(tmp), "=a"(dummy) - : "a"(denominator), [bn] "g"(b.numerator), [tn] "g"(numerator), [bd] "g"(b.denominator) - : "%rdx", "cc"); - return result ? result ^ sign // if sign is +1, only bit 0 of result is inverted, which does not change the sign of result (and cannot result in zero) - // if sign is -1, all bits of result are inverted, which changes the sign of result (and again cannot result in zero) - : - 0; + int32_t result; + int64_t tmp; + int64_t dummy; + __asm__("mulq %[bn]\n\t" + "movq %%rax, %[tmp]\n\t" + "movq %%rdx, %%rbx\n\t" + "movq %[tn], %%rax\n\t" + "mulq %[bd]\n\t" + "subq %[tmp], %%rax\n\t" + "sbbq %%rbx, %%rdx\n\t" // rdx:rax contains 128-bit-difference "numerator*b.denominator - b.numerator*denominator" + "setnsb %%bh\n\t" // bh=1 if difference is non-negative, bh=0 otherwise + "orq %%rdx, %%rax\n\t" + "setnzb %%bl\n\t" // bl=1 if difference if non-zero, bl=0 if it is zero + "decb %%bh\n\t" // now bx=0x0000 if difference is zero, 0xff01 if it is negative, 0x0001 if it is positive (i.e., same sign as difference) + "shll $16, %%ebx\n\t" // ebx has same sign as difference + : "=&b"(result), [tmp] "=&r"(tmp), "=a"(dummy) + : "a"(denominator), [bn] "g"(b.numerator), [tn] "g"(numerator), [bd] "g"(b.denominator) + : "%rdx", "cc"); + return result ? result ^ sign // if sign is +1, only bit 0 of result is inverted, which does not change the sign of result (and cannot result in zero) + // if sign is -1, all bits of result are inverted, which changes the sign of result (and again cannot result in zero) + : 0; #else - return sign * Int128::mul(m_numerator, b.m_denominator).ucmp(Int128::mul(m_denominator, b.m_numerator)); + return sign * Int128::mul(m_numerator, b.m_denominator).ucmp(Int128::mul(m_denominator, b.m_numerator)); #endif } -int32_t btConvexHullInternal::Rational128::compare(const Rational128 &b) const { - if (sign != b.sign) { - return sign - b.sign; - } else if (sign == 0) { - return 0; - } - if (isInt64) { - return -b.compare(sign * (int64_t)numerator.low); - } +int32_t btConvexHullInternal::Rational128::compare(const Rational128& b) const +{ + if (sign != b.sign) { + return sign - b.sign; + } + else if (sign == 0) { + return 0; + } + if (isInt64) { + return -b.compare(sign * (int64_t)numerator.low); + } - Int128 nbdLow, nbdHigh, dbnLow, dbnHigh; - DMul::mul(numerator, b.denominator, nbdLow, nbdHigh); - DMul::mul(denominator, b.numerator, dbnLow, dbnHigh); + Int128 nbdLow, nbdHigh, dbnLow, dbnHigh; + DMul::mul(numerator, b.denominator, nbdLow, nbdHigh); + DMul::mul(denominator, b.numerator, dbnLow, dbnHigh); - int32_t cmp = nbdHigh.ucmp(dbnHigh); - if (cmp) { - return cmp * sign; - } - return nbdLow.ucmp(dbnLow) * sign; + int32_t cmp = nbdHigh.ucmp(dbnHigh); + if (cmp) { + return cmp * sign; + } + return nbdLow.ucmp(dbnLow) * sign; } -int32_t btConvexHullInternal::Rational128::compare(int64_t b) const { - if (isInt64) { - int64_t a = sign * (int64_t)numerator.low; - return (a > b) ? 1 : (a < b) ? -1 : 0; - } - if (b > 0) { - if (sign <= 0) { - return -1; - } - } else if (b < 0) { - if (sign >= 0) { - return 1; - } - b = -b; - } else { - return sign; - } +int32_t btConvexHullInternal::Rational128::compare(int64_t b) const +{ + if (isInt64) { + int64_t a = sign * (int64_t)numerator.low; + return (a > b) ? 1 : (a < b) ? -1 : 0; + } + if (b > 0) { + if (sign <= 0) { + return -1; + } + } + else if (b < 0) { + if (sign >= 0) { + return 1; + } + b = -b; + } + else { + return sign; + } - return numerator.ucmp(denominator * b) * sign; + return numerator.ucmp(denominator * b) * sign; } -btConvexHullInternal::Edge *btConvexHullInternal::newEdgePair(Vertex *from, Vertex *to) { - btAssert(from && to); - Edge *e = edgePool.newObject(); - Edge *r = edgePool.newObject(); - e->reverse = r; - r->reverse = e; - e->copy = mergeStamp; - r->copy = mergeStamp; - e->target = to; - r->target = from; - e->face = NULL; - r->face = NULL; - usedEdgePairs++; - if (usedEdgePairs > maxUsedEdgePairs) { - maxUsedEdgePairs = usedEdgePairs; - } - return e; +btConvexHullInternal::Edge* btConvexHullInternal::newEdgePair(Vertex* from, Vertex* to) +{ + btAssert(from && to); + Edge* e = edgePool.newObject(); + Edge* r = edgePool.newObject(); + e->reverse = r; + r->reverse = e; + e->copy = mergeStamp; + r->copy = mergeStamp; + e->target = to; + r->target = from; + e->face = NULL; + r->face = NULL; + usedEdgePairs++; + if (usedEdgePairs > maxUsedEdgePairs) { + maxUsedEdgePairs = usedEdgePairs; + } + return e; } -bool btConvexHullInternal::mergeProjection(IntermediateHull &h0, IntermediateHull &h1, Vertex *&c0, Vertex *&c1) { - Vertex *v0 = h0.maxYx; - Vertex *v1 = h1.minYx; - if ((v0->point.x == v1->point.x) && (v0->point.y == v1->point.y)) { - btAssert(v0->point.z < v1->point.z); - Vertex *v1p = v1->prev; - if (v1p == v1) { - c0 = v0; - if (v1->edges) { - btAssert(v1->edges->next == v1->edges); - v1 = v1->edges->target; - btAssert(v1->edges->next == v1->edges); - } - c1 = v1; - return false; - } - Vertex *v1n = v1->next; - v1p->next = v1n; - v1n->prev = v1p; - if (v1 == h1.minXy) { - if ((v1n->point.x < v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y < v1p->point.y))) { - h1.minXy = v1n; - } else { - h1.minXy = v1p; - } - } - if (v1 == h1.maxXy) { - if ((v1n->point.x > v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y > v1p->point.y))) { - h1.maxXy = v1n; - } else { - h1.maxXy = v1p; - } - } - } +bool btConvexHullInternal::mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1) +{ + Vertex* v0 = h0.maxYx; + Vertex* v1 = h1.minYx; + if ((v0->point.x == v1->point.x) && (v0->point.y == v1->point.y)) { + btAssert(v0->point.z < v1->point.z); + Vertex* v1p = v1->prev; + if (v1p == v1) { + c0 = v0; + if (v1->edges) { + btAssert(v1->edges->next == v1->edges); + v1 = v1->edges->target; + btAssert(v1->edges->next == v1->edges); + } + c1 = v1; + return false; + } + Vertex* v1n = v1->next; + v1p->next = v1n; + v1n->prev = v1p; + if (v1 == h1.minXy) { + if ((v1n->point.x < v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y < v1p->point.y))) { + h1.minXy = v1n; + } + else { + h1.minXy = v1p; + } + } + if (v1 == h1.maxXy) { + if ((v1n->point.x > v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y > v1p->point.y))) { + h1.maxXy = v1n; + } + else { + h1.maxXy = v1p; + } + } + } - v0 = h0.maxXy; - v1 = h1.maxXy; - Vertex *v00 = NULL; - Vertex *v10 = NULL; - int32_t sign = 1; + v0 = h0.maxXy; + v1 = h1.maxXy; + Vertex* v00 = NULL; + Vertex* v10 = NULL; + int32_t sign = 1; - for (int32_t side = 0; side <= 1; side++) { - int32_t dx = (v1->point.x - v0->point.x) * sign; - if (dx > 0) { - while (true) { - int32_t dy = v1->point.y - v0->point.y; + for (int32_t side = 0; side <= 1; side++) { + int32_t dx = (v1->point.x - v0->point.x) * sign; + if (dx > 0) { + while (true) { + int32_t dy = v1->point.y - v0->point.y; - Vertex *w0 = side ? v0->next : v0->prev; - if (w0 != v0) { - int32_t dx0 = (w0->point.x - v0->point.x) * sign; - int32_t dy0 = w0->point.y - v0->point.y; - if ((dy0 <= 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx <= dy * dx0)))) { - v0 = w0; - dx = (v1->point.x - v0->point.x) * sign; - continue; - } - } + Vertex* w0 = side ? v0->next : v0->prev; + if (w0 != v0) { + int32_t dx0 = (w0->point.x - v0->point.x) * sign; + int32_t dy0 = w0->point.y - v0->point.y; + if ((dy0 <= 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx <= dy * dx0)))) { + v0 = w0; + dx = (v1->point.x - v0->point.x) * sign; + continue; + } + } - Vertex *w1 = side ? v1->next : v1->prev; - if (w1 != v1) { - int32_t dx1 = (w1->point.x - v1->point.x) * sign; - int32_t dy1 = w1->point.y - v1->point.y; - int32_t dxn = (w1->point.x - v0->point.x) * sign; - if ((dxn > 0) && (dy1 < 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx < dy * dx1)))) { - v1 = w1; - dx = dxn; - continue; - } - } + Vertex* w1 = side ? v1->next : v1->prev; + if (w1 != v1) { + int32_t dx1 = (w1->point.x - v1->point.x) * sign; + int32_t dy1 = w1->point.y - v1->point.y; + int32_t dxn = (w1->point.x - v0->point.x) * sign; + if ((dxn > 0) && (dy1 < 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx < dy * dx1)))) { + v1 = w1; + dx = dxn; + continue; + } + } - break; - } - } else if (dx < 0) { - while (true) { - int32_t dy = v1->point.y - v0->point.y; + break; + } + } + else if (dx < 0) { + while (true) { + int32_t dy = v1->point.y - v0->point.y; - Vertex *w1 = side ? v1->prev : v1->next; - if (w1 != v1) { - int32_t dx1 = (w1->point.x - v1->point.x) * sign; - int32_t dy1 = w1->point.y - v1->point.y; - if ((dy1 >= 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx <= dy * dx1)))) { - v1 = w1; - dx = (v1->point.x - v0->point.x) * sign; - continue; - } - } + Vertex* w1 = side ? v1->prev : v1->next; + if (w1 != v1) { + int32_t dx1 = (w1->point.x - v1->point.x) * sign; + int32_t dy1 = w1->point.y - v1->point.y; + if ((dy1 >= 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx <= dy * dx1)))) { + v1 = w1; + dx = (v1->point.x - v0->point.x) * sign; + continue; + } + } - Vertex *w0 = side ? v0->prev : v0->next; - if (w0 != v0) { - int32_t dx0 = (w0->point.x - v0->point.x) * sign; - int32_t dy0 = w0->point.y - v0->point.y; - int32_t dxn = (v1->point.x - w0->point.x) * sign; - if ((dxn < 0) && (dy0 > 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx < dy * dx0)))) { - v0 = w0; - dx = dxn; - continue; - } - } + Vertex* w0 = side ? v0->prev : v0->next; + if (w0 != v0) { + int32_t dx0 = (w0->point.x - v0->point.x) * sign; + int32_t dy0 = w0->point.y - v0->point.y; + int32_t dxn = (v1->point.x - w0->point.x) * sign; + if ((dxn < 0) && (dy0 > 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx < dy * dx0)))) { + v0 = w0; + dx = dxn; + continue; + } + } - break; - } - } else { - int32_t x = v0->point.x; - int32_t y0 = v0->point.y; - Vertex *w0 = v0; - Vertex *t; - while (((t = side ? w0->next : w0->prev) != v0) && (t->point.x == x) && (t->point.y <= y0)) { - w0 = t; - y0 = t->point.y; - } - v0 = w0; + break; + } + } + else { + int32_t x = v0->point.x; + int32_t y0 = v0->point.y; + Vertex* w0 = v0; + Vertex* t; + while (((t = side ? w0->next : w0->prev) != v0) && (t->point.x == x) && (t->point.y <= y0)) { + w0 = t; + y0 = t->point.y; + } + v0 = w0; - int32_t y1 = v1->point.y; - Vertex *w1 = v1; - while (((t = side ? w1->prev : w1->next) != v1) && (t->point.x == x) && (t->point.y >= y1)) { - w1 = t; - y1 = t->point.y; - } - v1 = w1; - } + int32_t y1 = v1->point.y; + Vertex* w1 = v1; + while (((t = side ? w1->prev : w1->next) != v1) && (t->point.x == x) && (t->point.y >= y1)) { + w1 = t; + y1 = t->point.y; + } + v1 = w1; + } - if (side == 0) { - v00 = v0; - v10 = v1; + if (side == 0) { + v00 = v0; + v10 = v1; - v0 = h0.minXy; - v1 = h1.minXy; - sign = -1; - } - } + v0 = h0.minXy; + v1 = h1.minXy; + sign = -1; + } + } - v0->prev = v1; - v1->next = v0; + v0->prev = v1; + v1->next = v0; - v00->next = v10; - v10->prev = v00; + v00->next = v10; + v10->prev = v00; - if (h1.minXy->point.x < h0.minXy->point.x) { - h0.minXy = h1.minXy; - } - if (h1.maxXy->point.x >= h0.maxXy->point.x) { - h0.maxXy = h1.maxXy; - } + if (h1.minXy->point.x < h0.minXy->point.x) { + h0.minXy = h1.minXy; + } + if (h1.maxXy->point.x >= h0.maxXy->point.x) { + h0.maxXy = h1.maxXy; + } - h0.maxYx = h1.maxYx; + h0.maxYx = h1.maxYx; - c0 = v00; - c1 = v10; + c0 = v00; + c1 = v10; - return true; + return true; } -void btConvexHullInternal::computeInternal(int32_t start, int32_t end, IntermediateHull &result) { - int32_t n = end - start; - switch (n) { - case 0: - result.minXy = NULL; - result.maxXy = NULL; - result.minYx = NULL; - result.maxYx = NULL; - return; - case 2: { - Vertex *v = originalVertices[start]; - Vertex *w = v + 1; - if (v->point != w->point) { - int32_t dx = v->point.x - w->point.x; - int32_t dy = v->point.y - w->point.y; +void btConvexHullInternal::computeInternal(int32_t start, int32_t end, IntermediateHull& result) +{ + int32_t n = end - start; + switch (n) { + case 0: + result.minXy = NULL; + result.maxXy = NULL; + result.minYx = NULL; + result.maxYx = NULL; + return; + case 2: { + Vertex* v = originalVertices[start]; + Vertex* w = v + 1; + if (v->point != w->point) { + int32_t dx = v->point.x - w->point.x; + int32_t dy = v->point.y - w->point.y; - if ((dx == 0) && (dy == 0)) { - if (v->point.z > w->point.z) { - Vertex *t = w; - w = v; - v = t; - } - btAssert(v->point.z < w->point.z); - v->next = v; - v->prev = v; - result.minXy = v; - result.maxXy = v; - result.minYx = v; - result.maxYx = v; - } else { - v->next = w; - v->prev = w; - w->next = v; - w->prev = v; + if ((dx == 0) && (dy == 0)) { + if (v->point.z > w->point.z) { + Vertex* t = w; + w = v; + v = t; + } + btAssert(v->point.z < w->point.z); + v->next = v; + v->prev = v; + result.minXy = v; + result.maxXy = v; + result.minYx = v; + result.maxYx = v; + } + else { + v->next = w; + v->prev = w; + w->next = v; + w->prev = v; - if ((dx < 0) || ((dx == 0) && (dy < 0))) { - result.minXy = v; - result.maxXy = w; - } else { - result.minXy = w; - result.maxXy = v; - } + if ((dx < 0) || ((dx == 0) && (dy < 0))) { + result.minXy = v; + result.maxXy = w; + } + else { + result.minXy = w; + result.maxXy = v; + } - if ((dy < 0) || ((dy == 0) && (dx < 0))) { - result.minYx = v; - result.maxYx = w; - } else { - result.minYx = w; - result.maxYx = v; - } - } + if ((dy < 0) || ((dy == 0) && (dx < 0))) { + result.minYx = v; + result.maxYx = w; + } + else { + result.minYx = w; + result.maxYx = v; + } + } - Edge *e = newEdgePair(v, w); - e->link(e); - v->edges = e; + Edge* e = newEdgePair(v, w); + e->link(e); + v->edges = e; - e = e->reverse; - e->link(e); - w->edges = e; + e = e->reverse; + e->link(e); + w->edges = e; - return; - } - } - // lint -fallthrough - case 1: { - Vertex *v = originalVertices[start]; - v->edges = NULL; - v->next = v; - v->prev = v; + return; + } + } + // lint -fallthrough + case 1: { + Vertex* v = originalVertices[start]; + v->edges = NULL; + v->next = v; + v->prev = v; - result.minXy = v; - result.maxXy = v; - result.minYx = v; - result.maxYx = v; + result.minXy = v; + result.maxXy = v; + result.minYx = v; + result.maxYx = v; - return; - } - } + return; + } + } - int32_t split0 = start + n / 2; - Point32 p = originalVertices[split0 - 1]->point; - int32_t split1 = split0; - while ((split1 < end) && (originalVertices[split1]->point == p)) { - split1++; - } - computeInternal(start, split0, result); - IntermediateHull hull1; - computeInternal(split1, end, hull1); + int32_t split0 = start + n / 2; + Point32 p = originalVertices[split0 - 1]->point; + int32_t split1 = split0; + while ((split1 < end) && (originalVertices[split1]->point == p)) { + split1++; + } + computeInternal(start, split0, result); + IntermediateHull hull1; + computeInternal(split1, end, hull1); #ifdef DEBUG_CONVEX_HULL - printf("\n\nMerge\n"); - result.print(); - hull1.print(); + printf("\n\nMerge\n"); + result.print(); + hull1.print(); #endif - merge(result, hull1); + merge(result, hull1); #ifdef DEBUG_CONVEX_HULL - printf("\n Result\n"); - result.print(); + printf("\n Result\n"); + result.print(); #endif } #ifdef DEBUG_CONVEX_HULL -void btConvexHullInternal::IntermediateHull::print() { - printf(" Hull\n"); - for (Vertex *v = minXy; v;) { - printf(" "); - v->print(); - if (v == maxXy) { - printf(" maxXy"); - } - if (v == minYx) { - printf(" minYx"); - } - if (v == maxYx) { - printf(" maxYx"); - } - if (v->next->prev != v) { - printf(" Inconsistency"); - } - printf("\n"); - v = v->next; - if (v == minXy) { - break; - } - } - if (minXy) { - minXy->copy = (minXy->copy == -1) ? -2 : -1; - minXy->printGraph(); - } +void btConvexHullInternal::IntermediateHull::print() +{ + printf(" Hull\n"); + for (Vertex* v = minXy; v;) { + printf(" "); + v->print(); + if (v == maxXy) { + printf(" maxXy"); + } + if (v == minYx) { + printf(" minYx"); + } + if (v == maxYx) { + printf(" maxYx"); + } + if (v->next->prev != v) { + printf(" Inconsistency"); + } + printf("\n"); + v = v->next; + if (v == minXy) { + break; + } + } + if (minXy) { + minXy->copy = (minXy->copy == -1) ? -2 : -1; + minXy->printGraph(); + } } -void btConvexHullInternal::Vertex::printGraph() { - print(); - printf("\nEdges\n"); - Edge *e = edges; - if (e) { - do { - e->print(); - printf("\n"); - e = e->next; - } while (e != edges); - do { - Vertex *v = e->target; - if (v->copy != copy) { - v->copy = copy; - v->printGraph(); - } - e = e->next; - } while (e != edges); - } +void btConvexHullInternal::Vertex::printGraph() +{ + print(); + printf("\nEdges\n"); + Edge* e = edges; + if (e) { + do { + e->print(); + printf("\n"); + e = e->next; + } while (e != edges); + do { + Vertex* v = e->target; + if (v->copy != copy) { + v->copy = copy; + v->printGraph(); + } + e = e->next; + } while (e != edges); + } } #endif -btConvexHullInternal::Orientation btConvexHullInternal::getOrientation(const Edge *prev, const Edge *next, const Point32 &s, const Point32 &t) { - btAssert(prev->reverse->target == next->reverse->target); - if (prev->next == next) { - if (prev->prev == next) { - Point64 n = t.cross(s); - Point64 m = (*prev->target - *next->reverse->target).cross(*next->target - *next->reverse->target); - btAssert(!m.isZero()); - int64_t dot = n.dot(m); - btAssert(dot != 0); - return (dot > 0) ? COUNTER_CLOCKWISE : CLOCKWISE; - } - return COUNTER_CLOCKWISE; - } else if (prev->prev == next) { - return CLOCKWISE; - } else { - return NONE; - } +btConvexHullInternal::Orientation btConvexHullInternal::getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t) +{ + btAssert(prev->reverse->target == next->reverse->target); + if (prev->next == next) { + if (prev->prev == next) { + Point64 n = t.cross(s); + Point64 m = (*prev->target - *next->reverse->target).cross(*next->target - *next->reverse->target); + btAssert(!m.isZero()); + int64_t dot = n.dot(m); + btAssert(dot != 0); + return (dot > 0) ? COUNTER_CLOCKWISE : CLOCKWISE; + } + return COUNTER_CLOCKWISE; + } + else if (prev->prev == next) { + return CLOCKWISE; + } + else { + return NONE; + } } -btConvexHullInternal::Edge *btConvexHullInternal::findMaxAngle(bool ccw, const Vertex *start, const Point32 &s, const Point64 &rxs, const Point64 &sxrxs, Rational64 &minCot) { - Edge *minEdge = NULL; +btConvexHullInternal::Edge* btConvexHullInternal::findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot) +{ + Edge* minEdge = NULL; #ifdef DEBUG_CONVEX_HULL - printf("find max edge for %d\n", start->point.index); + printf("find max edge for %d\n", start->point.index); #endif - Edge *e = start->edges; - if (e) { - do { - if (e->copy > mergeStamp) { - Point32 t = *e->target - *start; - Rational64 cot(t.dot(sxrxs), t.dot(rxs)); + Edge* e = start->edges; + if (e) { + do { + if (e->copy > mergeStamp) { + Point32 t = *e->target - *start; + Rational64 cot(t.dot(sxrxs), t.dot(rxs)); #ifdef DEBUG_CONVEX_HULL - printf(" Angle is %f (%d) for ", (float)btAtan(cot.toScalar()), (int32_t)cot.isNaN()); - e->print(); + printf(" Angle is %f (%d) for ", (float)btAtan(cot.toScalar()), (int32_t)cot.isNaN()); + e->print(); #endif - if (cot.isNaN()) { - btAssert(ccw ? (t.dot(s) < 0) : (t.dot(s) > 0)); - } else { - int32_t cmp; - if (minEdge == NULL) { - minCot = cot; - minEdge = e; - } else if ((cmp = cot.compare(minCot)) < 0) { - minCot = cot; - minEdge = e; - } else if ((cmp == 0) && (ccw == (getOrientation(minEdge, e, s, t) == COUNTER_CLOCKWISE))) { - minEdge = e; - } - } + if (cot.isNaN()) { + btAssert(ccw ? (t.dot(s) < 0) : (t.dot(s) > 0)); + } + else { + int32_t cmp; + if (minEdge == NULL) { + minCot = cot; + minEdge = e; + } + else if ((cmp = cot.compare(minCot)) < 0) { + minCot = cot; + minEdge = e; + } + else if ((cmp == 0) && (ccw == (getOrientation(minEdge, e, s, t) == COUNTER_CLOCKWISE))) { + minEdge = e; + } + } #ifdef DEBUG_CONVEX_HULL - printf("\n"); + printf("\n"); #endif - } - e = e->next; - } while (e != start->edges); - } - return minEdge; + } + e = e->next; + } while (e != start->edges); + } + return minEdge; } -void btConvexHullInternal::findEdgeForCoplanarFaces(Vertex *c0, Vertex *c1, Edge *&e0, Edge *&e1, Vertex *stop0, Vertex *stop1) { - Edge *start0 = e0; - Edge *start1 = e1; - Point32 et0 = start0 ? start0->target->point : c0->point; - Point32 et1 = start1 ? start1->target->point : c1->point; - Point32 s = c1->point - c0->point; - Point64 normal = ((start0 ? start0 : start1)->target->point - c0->point).cross(s); - int64_t dist = c0->point.dot(normal); - btAssert(!start1 || (start1->target->point.dot(normal) == dist)); - Point64 perp = s.cross(normal); - btAssert(!perp.isZero()); +void btConvexHullInternal::findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1) +{ + Edge* start0 = e0; + Edge* start1 = e1; + Point32 et0 = start0 ? start0->target->point : c0->point; + Point32 et1 = start1 ? start1->target->point : c1->point; + Point32 s = c1->point - c0->point; + Point64 normal = ((start0 ? start0 : start1)->target->point - c0->point).cross(s); + int64_t dist = c0->point.dot(normal); + btAssert(!start1 || (start1->target->point.dot(normal) == dist)); + Point64 perp = s.cross(normal); + btAssert(!perp.isZero()); #ifdef DEBUG_CONVEX_HULL - printf(" Advancing %d %d (%p %p, %d %d)\n", c0->point.index, c1->point.index, start0, start1, start0 ? start0->target->point.index : -1, start1 ? start1->target->point.index : -1); + printf(" Advancing %d %d (%p %p, %d %d)\n", c0->point.index, c1->point.index, start0, start1, start0 ? start0->target->point.index : -1, start1 ? start1->target->point.index : -1); #endif - int64_t maxDot0 = et0.dot(perp); - if (e0) { - while (e0->target != stop0) { - Edge *e = e0->reverse->prev; - if (e->target->point.dot(normal) < dist) { - break; - } - btAssert(e->target->point.dot(normal) == dist); - if (e->copy == mergeStamp) { - break; - } - int64_t dot = e->target->point.dot(perp); - if (dot <= maxDot0) { - break; - } - maxDot0 = dot; - e0 = e; - et0 = e->target->point; - } - } + int64_t maxDot0 = et0.dot(perp); + if (e0) { + while (e0->target != stop0) { + Edge* e = e0->reverse->prev; + if (e->target->point.dot(normal) < dist) { + break; + } + btAssert(e->target->point.dot(normal) == dist); + if (e->copy == mergeStamp) { + break; + } + int64_t dot = e->target->point.dot(perp); + if (dot <= maxDot0) { + break; + } + maxDot0 = dot; + e0 = e; + et0 = e->target->point; + } + } - int64_t maxDot1 = et1.dot(perp); - if (e1) { - while (e1->target != stop1) { - Edge *e = e1->reverse->next; - if (e->target->point.dot(normal) < dist) { - break; - } - btAssert(e->target->point.dot(normal) == dist); - if (e->copy == mergeStamp) { - break; - } - int64_t dot = e->target->point.dot(perp); - if (dot <= maxDot1) { - break; - } - maxDot1 = dot; - e1 = e; - et1 = e->target->point; - } - } + int64_t maxDot1 = et1.dot(perp); + if (e1) { + while (e1->target != stop1) { + Edge* e = e1->reverse->next; + if (e->target->point.dot(normal) < dist) { + break; + } + btAssert(e->target->point.dot(normal) == dist); + if (e->copy == mergeStamp) { + break; + } + int64_t dot = e->target->point.dot(perp); + if (dot <= maxDot1) { + break; + } + maxDot1 = dot; + e1 = e; + et1 = e->target->point; + } + } #ifdef DEBUG_CONVEX_HULL - printf(" Starting at %d %d\n", et0.index, et1.index); + printf(" Starting at %d %d\n", et0.index, et1.index); #endif - int64_t dx = maxDot1 - maxDot0; - if (dx > 0) { - while (true) { - int64_t dy = (et1 - et0).dot(s); + int64_t dx = maxDot1 - maxDot0; + if (dx > 0) { + while (true) { + int64_t dy = (et1 - et0).dot(s); - if (e0 && (e0->target != stop0)) { - Edge *f0 = e0->next->reverse; - if (f0->copy > mergeStamp) { - int64_t dx0 = (f0->target->point - et0).dot(perp); - int64_t dy0 = (f0->target->point - et0).dot(s); - if ((dx0 == 0) ? (dy0 < 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) >= 0))) { - et0 = f0->target->point; - dx = (et1 - et0).dot(perp); - e0 = (e0 == start0) ? NULL : f0; - continue; - } - } - } + if (e0 && (e0->target != stop0)) { + Edge* f0 = e0->next->reverse; + if (f0->copy > mergeStamp) { + int64_t dx0 = (f0->target->point - et0).dot(perp); + int64_t dy0 = (f0->target->point - et0).dot(s); + if ((dx0 == 0) ? (dy0 < 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) >= 0))) { + et0 = f0->target->point; + dx = (et1 - et0).dot(perp); + e0 = (e0 == start0) ? NULL : f0; + continue; + } + } + } - if (e1 && (e1->target != stop1)) { - Edge *f1 = e1->reverse->next; - if (f1->copy > mergeStamp) { - Point32 d1 = f1->target->point - et1; - if (d1.dot(normal) == 0) { - int64_t dx1 = d1.dot(perp); - int64_t dy1 = d1.dot(s); - int64_t dxn = (f1->target->point - et0).dot(perp); - if ((dxn > 0) && ((dx1 == 0) ? (dy1 < 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) > 0)))) { - e1 = f1; - et1 = e1->target->point; - dx = dxn; - continue; - } - } else { - btAssert((e1 == start1) && (d1.dot(normal) < 0)); - } - } - } + if (e1 && (e1->target != stop1)) { + Edge* f1 = e1->reverse->next; + if (f1->copy > mergeStamp) { + Point32 d1 = f1->target->point - et1; + if (d1.dot(normal) == 0) { + int64_t dx1 = d1.dot(perp); + int64_t dy1 = d1.dot(s); + int64_t dxn = (f1->target->point - et0).dot(perp); + if ((dxn > 0) && ((dx1 == 0) ? (dy1 < 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) > 0)))) { + e1 = f1; + et1 = e1->target->point; + dx = dxn; + continue; + } + } + else { + btAssert((e1 == start1) && (d1.dot(normal) < 0)); + } + } + } - break; - } - } else if (dx < 0) { - while (true) { - int64_t dy = (et1 - et0).dot(s); + break; + } + } + else if (dx < 0) { + while (true) { + int64_t dy = (et1 - et0).dot(s); - if (e1 && (e1->target != stop1)) { - Edge *f1 = e1->prev->reverse; - if (f1->copy > mergeStamp) { - int64_t dx1 = (f1->target->point - et1).dot(perp); - int64_t dy1 = (f1->target->point - et1).dot(s); - if ((dx1 == 0) ? (dy1 > 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) <= 0))) { - et1 = f1->target->point; - dx = (et1 - et0).dot(perp); - e1 = (e1 == start1) ? NULL : f1; - continue; - } - } - } + if (e1 && (e1->target != stop1)) { + Edge* f1 = e1->prev->reverse; + if (f1->copy > mergeStamp) { + int64_t dx1 = (f1->target->point - et1).dot(perp); + int64_t dy1 = (f1->target->point - et1).dot(s); + if ((dx1 == 0) ? (dy1 > 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) <= 0))) { + et1 = f1->target->point; + dx = (et1 - et0).dot(perp); + e1 = (e1 == start1) ? NULL : f1; + continue; + } + } + } - if (e0 && (e0->target != stop0)) { - Edge *f0 = e0->reverse->prev; - if (f0->copy > mergeStamp) { - Point32 d0 = f0->target->point - et0; - if (d0.dot(normal) == 0) { - int64_t dx0 = d0.dot(perp); - int64_t dy0 = d0.dot(s); - int64_t dxn = (et1 - f0->target->point).dot(perp); - if ((dxn < 0) && ((dx0 == 0) ? (dy0 > 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) < 0)))) { - e0 = f0; - et0 = e0->target->point; - dx = dxn; - continue; - } - } else { - btAssert((e0 == start0) && (d0.dot(normal) < 0)); - } - } - } + if (e0 && (e0->target != stop0)) { + Edge* f0 = e0->reverse->prev; + if (f0->copy > mergeStamp) { + Point32 d0 = f0->target->point - et0; + if (d0.dot(normal) == 0) { + int64_t dx0 = d0.dot(perp); + int64_t dy0 = d0.dot(s); + int64_t dxn = (et1 - f0->target->point).dot(perp); + if ((dxn < 0) && ((dx0 == 0) ? (dy0 > 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) < 0)))) { + e0 = f0; + et0 = e0->target->point; + dx = dxn; + continue; + } + } + else { + btAssert((e0 == start0) && (d0.dot(normal) < 0)); + } + } + } - break; - } - } + break; + } + } #ifdef DEBUG_CONVEX_HULL - printf(" Advanced edges to %d %d\n", et0.index, et1.index); + printf(" Advanced edges to %d %d\n", et0.index, et1.index); #endif } -void btConvexHullInternal::merge(IntermediateHull &h0, IntermediateHull &h1) { - if (!h1.maxXy) { - return; - } - if (!h0.maxXy) { - h0 = h1; - return; - } +void btConvexHullInternal::merge(IntermediateHull& h0, IntermediateHull& h1) +{ + if (!h1.maxXy) { + return; + } + if (!h0.maxXy) { + h0 = h1; + return; + } - mergeStamp--; + mergeStamp--; - Vertex *c0 = NULL; - Edge *toPrev0 = NULL; - Edge *firstNew0 = NULL; - Edge *pendingHead0 = NULL; - Edge *pendingTail0 = NULL; - Vertex *c1 = NULL; - Edge *toPrev1 = NULL; - Edge *firstNew1 = NULL; - Edge *pendingHead1 = NULL; - Edge *pendingTail1 = NULL; - Point32 prevPoint; + Vertex* c0 = NULL; + Edge* toPrev0 = NULL; + Edge* firstNew0 = NULL; + Edge* pendingHead0 = NULL; + Edge* pendingTail0 = NULL; + Vertex* c1 = NULL; + Edge* toPrev1 = NULL; + Edge* firstNew1 = NULL; + Edge* pendingHead1 = NULL; + Edge* pendingTail1 = NULL; + Point32 prevPoint; - if (mergeProjection(h0, h1, c0, c1)) { - Point32 s = *c1 - *c0; - Point64 normal = Point32(0, 0, -1).cross(s); - Point64 t = s.cross(normal); - btAssert(!t.isZero()); + if (mergeProjection(h0, h1, c0, c1)) { + Point32 s = *c1 - *c0; + Point64 normal = Point32(0, 0, -1).cross(s); + Point64 t = s.cross(normal); + btAssert(!t.isZero()); - Edge *e = c0->edges; - Edge *start0 = NULL; - if (e) { - do { - int64_t dot = (*e->target - *c0).dot(normal); - btAssert(dot <= 0); - if ((dot == 0) && ((*e->target - *c0).dot(t) > 0)) { - if (!start0 || (getOrientation(start0, e, s, Point32(0, 0, -1)) == CLOCKWISE)) { - start0 = e; - } - } - e = e->next; - } while (e != c0->edges); - } + Edge* e = c0->edges; + Edge* start0 = NULL; + if (e) { + do { + int64_t dot = (*e->target - *c0).dot(normal); + btAssert(dot <= 0); + if ((dot == 0) && ((*e->target - *c0).dot(t) > 0)) { + if (!start0 || (getOrientation(start0, e, s, Point32(0, 0, -1)) == CLOCKWISE)) { + start0 = e; + } + } + e = e->next; + } while (e != c0->edges); + } - e = c1->edges; - Edge *start1 = NULL; - if (e) { - do { - int64_t dot = (*e->target - *c1).dot(normal); - btAssert(dot <= 0); - if ((dot == 0) && ((*e->target - *c1).dot(t) > 0)) { - if (!start1 || (getOrientation(start1, e, s, Point32(0, 0, -1)) == COUNTER_CLOCKWISE)) { - start1 = e; - } - } - e = e->next; - } while (e != c1->edges); - } + e = c1->edges; + Edge* start1 = NULL; + if (e) { + do { + int64_t dot = (*e->target - *c1).dot(normal); + btAssert(dot <= 0); + if ((dot == 0) && ((*e->target - *c1).dot(t) > 0)) { + if (!start1 || (getOrientation(start1, e, s, Point32(0, 0, -1)) == COUNTER_CLOCKWISE)) { + start1 = e; + } + } + e = e->next; + } while (e != c1->edges); + } - if (start0 || start1) { - findEdgeForCoplanarFaces(c0, c1, start0, start1, NULL, NULL); - if (start0) { - c0 = start0->target; - } - if (start1) { - c1 = start1->target; - } - } + if (start0 || start1) { + findEdgeForCoplanarFaces(c0, c1, start0, start1, NULL, NULL); + if (start0) { + c0 = start0->target; + } + if (start1) { + c1 = start1->target; + } + } - prevPoint = c1->point; - prevPoint.z++; - } else { - prevPoint = c1->point; - prevPoint.x++; - } + prevPoint = c1->point; + prevPoint.z++; + } + else { + prevPoint = c1->point; + prevPoint.x++; + } - Vertex *first0 = c0; - Vertex *first1 = c1; - bool firstRun = true; + Vertex* first0 = c0; + Vertex* first1 = c1; + bool firstRun = true; - while (true) { - Point32 s = *c1 - *c0; - Point32 r = prevPoint - c0->point; - Point64 rxs = r.cross(s); - Point64 sxrxs = s.cross(rxs); + while (true) { + Point32 s = *c1 - *c0; + Point32 r = prevPoint - c0->point; + Point64 rxs = r.cross(s); + Point64 sxrxs = s.cross(rxs); #ifdef DEBUG_CONVEX_HULL - printf("\n Checking %d %d\n", c0->point.index, c1->point.index); + printf("\n Checking %d %d\n", c0->point.index, c1->point.index); #endif - Rational64 minCot0(0, 0); - Edge *min0 = findMaxAngle(false, c0, s, rxs, sxrxs, minCot0); - Rational64 minCot1(0, 0); - Edge *min1 = findMaxAngle(true, c1, s, rxs, sxrxs, minCot1); - if (!min0 && !min1) { - Edge *e = newEdgePair(c0, c1); - e->link(e); - c0->edges = e; + Rational64 minCot0(0, 0); + Edge* min0 = findMaxAngle(false, c0, s, rxs, sxrxs, minCot0); + Rational64 minCot1(0, 0); + Edge* min1 = findMaxAngle(true, c1, s, rxs, sxrxs, minCot1); + if (!min0 && !min1) { + Edge* e = newEdgePair(c0, c1); + e->link(e); + c0->edges = e; - e = e->reverse; - e->link(e); - c1->edges = e; - return; - } else { - int32_t cmp = !min0 ? 1 : !min1 ? -1 : minCot0.compare(minCot1); + e = e->reverse; + e->link(e); + c1->edges = e; + return; + } + else { + int32_t cmp = !min0 ? 1 : !min1 ? -1 : minCot0.compare(minCot1); #ifdef DEBUG_CONVEX_HULL - printf(" -> Result %d\n", cmp); + printf(" -> Result %d\n", cmp); #endif - if (firstRun || ((cmp >= 0) ? !minCot1.isNegativeInfinity() : !minCot0.isNegativeInfinity())) { - Edge *e = newEdgePair(c0, c1); - if (pendingTail0) { - pendingTail0->prev = e; - } else { - pendingHead0 = e; - } - e->next = pendingTail0; - pendingTail0 = e; + if (firstRun || ((cmp >= 0) ? !minCot1.isNegativeInfinity() : !minCot0.isNegativeInfinity())) { + Edge* e = newEdgePair(c0, c1); + if (pendingTail0) { + pendingTail0->prev = e; + } + else { + pendingHead0 = e; + } + e->next = pendingTail0; + pendingTail0 = e; - e = e->reverse; - if (pendingTail1) { - pendingTail1->next = e; - } else { - pendingHead1 = e; - } - e->prev = pendingTail1; - pendingTail1 = e; - } + e = e->reverse; + if (pendingTail1) { + pendingTail1->next = e; + } + else { + pendingHead1 = e; + } + e->prev = pendingTail1; + pendingTail1 = e; + } - Edge *e0 = min0; - Edge *e1 = min1; + Edge* e0 = min0; + Edge* e1 = min1; #ifdef DEBUG_CONVEX_HULL - printf(" Found min edges to %d %d\n", e0 ? e0->target->point.index : -1, e1 ? e1->target->point.index : -1); + printf(" Found min edges to %d %d\n", e0 ? e0->target->point.index : -1, e1 ? e1->target->point.index : -1); #endif - if (cmp == 0) { - findEdgeForCoplanarFaces(c0, c1, e0, e1, NULL, NULL); - } + if (cmp == 0) { + findEdgeForCoplanarFaces(c0, c1, e0, e1, NULL, NULL); + } - if ((cmp >= 0) && e1) { - if (toPrev1) { - for (Edge *e = toPrev1->next, *n = NULL; e != min1; e = n) { - n = e->next; - removeEdgePair(e); - } - } + if ((cmp >= 0) && e1) { + if (toPrev1) { + for (Edge *e = toPrev1->next, *n = NULL; e != min1; e = n) { + n = e->next; + removeEdgePair(e); + } + } - if (pendingTail1) { - if (toPrev1) { - toPrev1->link(pendingHead1); - } else { - min1->prev->link(pendingHead1); - firstNew1 = pendingHead1; - } - pendingTail1->link(min1); - pendingHead1 = NULL; - pendingTail1 = NULL; - } else if (!toPrev1) { - firstNew1 = min1; - } + if (pendingTail1) { + if (toPrev1) { + toPrev1->link(pendingHead1); + } + else { + min1->prev->link(pendingHead1); + firstNew1 = pendingHead1; + } + pendingTail1->link(min1); + pendingHead1 = NULL; + pendingTail1 = NULL; + } + else if (!toPrev1) { + firstNew1 = min1; + } - prevPoint = c1->point; - c1 = e1->target; - toPrev1 = e1->reverse; - } + prevPoint = c1->point; + c1 = e1->target; + toPrev1 = e1->reverse; + } - if ((cmp <= 0) && e0) { - if (toPrev0) { - for (Edge *e = toPrev0->prev, *n = NULL; e != min0; e = n) { - n = e->prev; - removeEdgePair(e); - } - } + if ((cmp <= 0) && e0) { + if (toPrev0) { + for (Edge *e = toPrev0->prev, *n = NULL; e != min0; e = n) { + n = e->prev; + removeEdgePair(e); + } + } - if (pendingTail0) { - if (toPrev0) { - pendingHead0->link(toPrev0); - } else { - pendingHead0->link(min0->next); - firstNew0 = pendingHead0; - } - min0->link(pendingTail0); - pendingHead0 = NULL; - pendingTail0 = NULL; - } else if (!toPrev0) { - firstNew0 = min0; - } + if (pendingTail0) { + if (toPrev0) { + pendingHead0->link(toPrev0); + } + else { + pendingHead0->link(min0->next); + firstNew0 = pendingHead0; + } + min0->link(pendingTail0); + pendingHead0 = NULL; + pendingTail0 = NULL; + } + else if (!toPrev0) { + firstNew0 = min0; + } - prevPoint = c0->point; - c0 = e0->target; - toPrev0 = e0->reverse; - } - } + prevPoint = c0->point; + c0 = e0->target; + toPrev0 = e0->reverse; + } + } - if ((c0 == first0) && (c1 == first1)) { - if (toPrev0 == NULL) { - pendingHead0->link(pendingTail0); - c0->edges = pendingTail0; - } else { - for (Edge *e = toPrev0->prev, *n = NULL; e != firstNew0; e = n) { - n = e->prev; - removeEdgePair(e); - } - if (pendingTail0) { - pendingHead0->link(toPrev0); - firstNew0->link(pendingTail0); - } - } + if ((c0 == first0) && (c1 == first1)) { + if (toPrev0 == NULL) { + pendingHead0->link(pendingTail0); + c0->edges = pendingTail0; + } + else { + for (Edge *e = toPrev0->prev, *n = NULL; e != firstNew0; e = n) { + n = e->prev; + removeEdgePair(e); + } + if (pendingTail0) { + pendingHead0->link(toPrev0); + firstNew0->link(pendingTail0); + } + } - if (toPrev1 == NULL) { - pendingTail1->link(pendingHead1); - c1->edges = pendingTail1; - } else { - for (Edge *e = toPrev1->next, *n = NULL; e != firstNew1; e = n) { - n = e->next; - removeEdgePair(e); - } - if (pendingTail1) { - toPrev1->link(pendingHead1); - pendingTail1->link(firstNew1); - } - } + if (toPrev1 == NULL) { + pendingTail1->link(pendingHead1); + c1->edges = pendingTail1; + } + else { + for (Edge *e = toPrev1->next, *n = NULL; e != firstNew1; e = n) { + n = e->next; + removeEdgePair(e); + } + if (pendingTail1) { + toPrev1->link(pendingHead1); + pendingTail1->link(firstNew1); + } + } - return; - } + return; + } - firstRun = false; - } + firstRun = false; + } } -static bool pointCmp(const btConvexHullInternal::Point32 &p, const btConvexHullInternal::Point32 &q) { - return (p.y < q.y) || ((p.y == q.y) && ((p.x < q.x) || ((p.x == q.x) && (p.z < q.z)))); +static bool pointCmp(const btConvexHullInternal::Point32& p, const btConvexHullInternal::Point32& q) +{ + return (p.y < q.y) || ((p.y == q.y) && ((p.x < q.x) || ((p.x == q.x) && (p.z < q.z)))); } -void btConvexHullInternal::compute(const void *coords, bool doubleCoords, int32_t stride, int32_t count) { - btVector3 min(btScalar(1e30), btScalar(1e30), btScalar(1e30)), max(btScalar(-1e30), btScalar(-1e30), btScalar(-1e30)); - const char *ptr = (const char *)coords; - if (doubleCoords) { - for (int32_t i = 0; i < count; i++) { - const double *v = (const double *)ptr; - btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); - ptr += stride; - min.setMin(p); - max.setMax(p); - } - } else { - for (int32_t i = 0; i < count; i++) { - const float *v = (const float *)ptr; - btVector3 p(v[0], v[1], v[2]); - ptr += stride; - min.setMin(p); - max.setMax(p); - } - } +void btConvexHullInternal::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count) +{ + btVector3 min(btScalar(1e30), btScalar(1e30), btScalar(1e30)), max(btScalar(-1e30), btScalar(-1e30), btScalar(-1e30)); + const char* ptr = (const char*)coords; + if (doubleCoords) { + for (int32_t i = 0; i < count; i++) { + const double* v = (const double*)ptr; + btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); + ptr += stride; + min.setMin(p); + max.setMax(p); + } + } + else { + for (int32_t i = 0; i < count; i++) { + const float* v = (const float*)ptr; + btVector3 p(v[0], v[1], v[2]); + ptr += stride; + min.setMin(p); + max.setMax(p); + } + } - btVector3 s = max - min; - maxAxis = s.maxAxis(); - minAxis = s.minAxis(); - if (minAxis == maxAxis) { - minAxis = (maxAxis + 1) % 3; - } - medAxis = 3 - maxAxis - minAxis; + btVector3 s = max - min; + maxAxis = s.maxAxis(); + minAxis = s.minAxis(); + if (minAxis == maxAxis) { + minAxis = (maxAxis + 1) % 3; + } + medAxis = 3 - maxAxis - minAxis; - s /= btScalar(10216); - if (((medAxis + 1) % 3) != maxAxis) { - s *= -1; - } - scaling = s; + s /= btScalar(10216); + if (((medAxis + 1) % 3) != maxAxis) { + s *= -1; + } + scaling = s; - if (s[0] != 0) { - s[0] = btScalar(1) / s[0]; - } - if (s[1] != 0) { - s[1] = btScalar(1) / s[1]; - } - if (s[2] != 0) { - s[2] = btScalar(1) / s[2]; - } + if (s[0] != 0) { + s[0] = btScalar(1) / s[0]; + } + if (s[1] != 0) { + s[1] = btScalar(1) / s[1]; + } + if (s[2] != 0) { + s[2] = btScalar(1) / s[2]; + } - center = (min + max) * btScalar(0.5); + center = (min + max) * btScalar(0.5); - btAlignedObjectArray points; - points.resize(count); - ptr = (const char *)coords; - if (doubleCoords) { - for (int32_t i = 0; i < count; i++) { - const double *v = (const double *)ptr; - btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); - ptr += stride; - p = (p - center) * s; - points[i].x = (int32_t)p[medAxis]; - points[i].y = (int32_t)p[maxAxis]; - points[i].z = (int32_t)p[minAxis]; - points[i].index = i; - } - } else { - for (int32_t i = 0; i < count; i++) { - const float *v = (const float *)ptr; - btVector3 p(v[0], v[1], v[2]); - ptr += stride; - p = (p - center) * s; - points[i].x = (int32_t)p[medAxis]; - points[i].y = (int32_t)p[maxAxis]; - points[i].z = (int32_t)p[minAxis]; - points[i].index = i; - } - } - points.quickSort(pointCmp); + btAlignedObjectArray points; + points.resize(count); + ptr = (const char*)coords; + if (doubleCoords) { + for (int32_t i = 0; i < count; i++) { + const double* v = (const double*)ptr; + btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); + ptr += stride; + p = (p - center) * s; + points[i].x = (int32_t)p[medAxis]; + points[i].y = (int32_t)p[maxAxis]; + points[i].z = (int32_t)p[minAxis]; + points[i].index = i; + } + } + else { + for (int32_t i = 0; i < count; i++) { + const float* v = (const float*)ptr; + btVector3 p(v[0], v[1], v[2]); + ptr += stride; + p = (p - center) * s; + points[i].x = (int32_t)p[medAxis]; + points[i].y = (int32_t)p[maxAxis]; + points[i].z = (int32_t)p[minAxis]; + points[i].index = i; + } + } + points.quickSort(pointCmp); - vertexPool.reset(); - vertexPool.setArraySize(count); - originalVertices.resize(count); - for (int32_t i = 0; i < count; i++) { - Vertex *v = vertexPool.newObject(); - v->edges = NULL; - v->point = points[i]; - v->copy = -1; - originalVertices[i] = v; - } + vertexPool.reset(); + vertexPool.setArraySize(count); + originalVertices.resize(count); + for (int32_t i = 0; i < count; i++) { + Vertex* v = vertexPool.newObject(); + v->edges = NULL; + v->point = points[i]; + v->copy = -1; + originalVertices[i] = v; + } - points.clear(); + points.clear(); - edgePool.reset(); - edgePool.setArraySize(6 * count); + edgePool.reset(); + edgePool.setArraySize(6 * count); - usedEdgePairs = 0; - maxUsedEdgePairs = 0; + usedEdgePairs = 0; + maxUsedEdgePairs = 0; - mergeStamp = -3; + mergeStamp = -3; - IntermediateHull hull; - computeInternal(0, count, hull); - vertexList = hull.minXy; + IntermediateHull hull; + computeInternal(0, count, hull); + vertexList = hull.minXy; #ifdef DEBUG_CONVEX_HULL - printf("max. edges %d (3v = %d)", maxUsedEdgePairs, 3 * count); + printf("max. edges %d (3v = %d)", maxUsedEdgePairs, 3 * count); #endif } -btVector3 btConvexHullInternal::toBtVector(const Point32 &v) { - btVector3 p; - p[medAxis] = btScalar(v.x); - p[maxAxis] = btScalar(v.y); - p[minAxis] = btScalar(v.z); - return p * scaling; +btVector3 btConvexHullInternal::toBtVector(const Point32& v) +{ + btVector3 p; + p[medAxis] = btScalar(v.x); + p[maxAxis] = btScalar(v.y); + p[minAxis] = btScalar(v.z); + return p * scaling; } -btVector3 btConvexHullInternal::getBtNormal(Face *face) { - return toBtVector(face->dir0).cross(toBtVector(face->dir1)).normalized(); +btVector3 btConvexHullInternal::getBtNormal(Face* face) +{ + return toBtVector(face->dir0).cross(toBtVector(face->dir1)).normalized(); } -btVector3 btConvexHullInternal::getCoordinates(const Vertex *v) { - btVector3 p; - p[medAxis] = v->xvalue(); - p[maxAxis] = v->yvalue(); - p[minAxis] = v->zvalue(); - return p * scaling + center; +btVector3 btConvexHullInternal::getCoordinates(const Vertex* v) +{ + btVector3 p; + p[medAxis] = v->xvalue(); + p[maxAxis] = v->yvalue(); + p[minAxis] = v->zvalue(); + return p * scaling + center; } -btScalar btConvexHullInternal::shrink(btScalar amount, btScalar clampAmount) { - if (!vertexList) { - return 0; - } - int32_t stamp = --mergeStamp; - btAlignedObjectArray stack; - vertexList->copy = stamp; - stack.push_back(vertexList); - btAlignedObjectArray faces; +btScalar btConvexHullInternal::shrink(btScalar amount, btScalar clampAmount) +{ + if (!vertexList) { + return 0; + } + int32_t stamp = --mergeStamp; + btAlignedObjectArray stack; + vertexList->copy = stamp; + stack.push_back(vertexList); + btAlignedObjectArray faces; - Point32 ref = vertexList->point; - Int128 hullCenterX(0, 0); - Int128 hullCenterY(0, 0); - Int128 hullCenterZ(0, 0); - Int128 volume(0, 0); + Point32 ref = vertexList->point; + Int128 hullCenterX(0, 0); + Int128 hullCenterY(0, 0); + Int128 hullCenterZ(0, 0); + Int128 volume(0, 0); - while (stack.size() > 0) { - Vertex *v = stack[stack.size() - 1]; - stack.pop_back(); - Edge *e = v->edges; - if (e) { - do { - if (e->target->copy != stamp) { - e->target->copy = stamp; - stack.push_back(e->target); - } - if (e->copy != stamp) { - Face *face = facePool.newObject(); - face->init(e->target, e->reverse->prev->target, v); - faces.push_back(face); - Edge *f = e; + while (stack.size() > 0) { + Vertex* v = stack[stack.size() - 1]; + stack.pop_back(); + Edge* e = v->edges; + if (e) { + do { + if (e->target->copy != stamp) { + e->target->copy = stamp; + stack.push_back(e->target); + } + if (e->copy != stamp) { + Face* face = facePool.newObject(); + face->init(e->target, e->reverse->prev->target, v); + faces.push_back(face); + Edge* f = e; - Vertex *a = NULL; - Vertex *b = NULL; - do { - if (a && b) { - int64_t vol = (v->point - ref).dot((a->point - ref).cross(b->point - ref)); - btAssert(vol >= 0); - Point32 c = v->point + a->point + b->point + ref; - hullCenterX += vol * c.x; - hullCenterY += vol * c.y; - hullCenterZ += vol * c.z; - volume += vol; - } + Vertex* a = NULL; + Vertex* b = NULL; + do { + if (a && b) { + int64_t vol = (v->point - ref).dot((a->point - ref).cross(b->point - ref)); + btAssert(vol >= 0); + Point32 c = v->point + a->point + b->point + ref; + hullCenterX += vol * c.x; + hullCenterY += vol * c.y; + hullCenterZ += vol * c.z; + volume += vol; + } - btAssert(f->copy != stamp); - f->copy = stamp; - f->face = face; + btAssert(f->copy != stamp); + f->copy = stamp; + f->face = face; - a = b; - b = f->target; + a = b; + b = f->target; - f = f->reverse->prev; - } while (f != e); - } - e = e->next; - } while (e != v->edges); - } - } + f = f->reverse->prev; + } while (f != e); + } + e = e->next; + } while (e != v->edges); + } + } - if (volume.getSign() <= 0) { - return 0; - } + if (volume.getSign() <= 0) { + return 0; + } - btVector3 hullCenter; - hullCenter[medAxis] = hullCenterX.toScalar(); - hullCenter[maxAxis] = hullCenterY.toScalar(); - hullCenter[minAxis] = hullCenterZ.toScalar(); - hullCenter /= 4 * volume.toScalar(); - hullCenter *= scaling; + btVector3 hullCenter; + hullCenter[medAxis] = hullCenterX.toScalar(); + hullCenter[maxAxis] = hullCenterY.toScalar(); + hullCenter[minAxis] = hullCenterZ.toScalar(); + hullCenter /= 4 * volume.toScalar(); + hullCenter *= scaling; - int32_t faceCount = faces.size(); + int32_t faceCount = faces.size(); - if (clampAmount > 0) { - btScalar minDist = SIMD_INFINITY; - for (int32_t i = 0; i < faceCount; i++) { - btVector3 normal = getBtNormal(faces[i]); - btScalar dist = normal.dot(toBtVector(faces[i]->origin) - hullCenter); - if (dist < minDist) { - minDist = dist; - } - } + if (clampAmount > 0) { + btScalar minDist = SIMD_INFINITY; + for (int32_t i = 0; i < faceCount; i++) { + btVector3 normal = getBtNormal(faces[i]); + btScalar dist = normal.dot(toBtVector(faces[i]->origin) - hullCenter); + if (dist < minDist) { + minDist = dist; + } + } - if (minDist <= 0) { - return 0; - } + if (minDist <= 0) { + return 0; + } - amount = btMin(amount, minDist * clampAmount); - } + amount = btMin(amount, minDist * clampAmount); + } - uint32_t seed = 243703; - for (int32_t i = 0; i < faceCount; i++, seed = 1664525 * seed + 1013904223) { - btSwap(faces[i], faces[seed % faceCount]); - } + uint32_t seed = 243703; + for (int32_t i = 0; i < faceCount; i++, seed = 1664525 * seed + 1013904223) { + btSwap(faces[i], faces[seed % faceCount]); + } - for (int32_t i = 0; i < faceCount; i++) { - if (!shiftFace(faces[i], amount, stack)) { - return -amount; - } - } + for (int32_t i = 0; i < faceCount; i++) { + if (!shiftFace(faces[i], amount, stack)) { + return -amount; + } + } - return amount; + return amount; } -bool btConvexHullInternal::shiftFace(Face *face, btScalar amount, btAlignedObjectArray stack) { - btVector3 origShift = getBtNormal(face) * -amount; - if (scaling[0] != 0) { - origShift[0] /= scaling[0]; - } - if (scaling[1] != 0) { - origShift[1] /= scaling[1]; - } - if (scaling[2] != 0) { - origShift[2] /= scaling[2]; - } - Point32 shift((int32_t)origShift[medAxis], (int32_t)origShift[maxAxis], (int32_t)origShift[minAxis]); - if (shift.isZero()) { - return true; - } - Point64 normal = face->getNormal(); +bool btConvexHullInternal::shiftFace(Face* face, btScalar amount, btAlignedObjectArray stack) +{ + btVector3 origShift = getBtNormal(face) * -amount; + if (scaling[0] != 0) { + origShift[0] /= scaling[0]; + } + if (scaling[1] != 0) { + origShift[1] /= scaling[1]; + } + if (scaling[2] != 0) { + origShift[2] /= scaling[2]; + } + Point32 shift((int32_t)origShift[medAxis], (int32_t)origShift[maxAxis], (int32_t)origShift[minAxis]); + if (shift.isZero()) { + return true; + } + Point64 normal = face->getNormal(); #ifdef DEBUG_CONVEX_HULL - printf("\nShrinking face (%d %d %d) (%d %d %d) (%d %d %d) by (%d %d %d)\n", - face->origin.x, face->origin.y, face->origin.z, face->dir0.x, face->dir0.y, face->dir0.z, face->dir1.x, face->dir1.y, face->dir1.z, shift.x, shift.y, shift.z); + printf("\nShrinking face (%d %d %d) (%d %d %d) (%d %d %d) by (%d %d %d)\n", + face->origin.x, face->origin.y, face->origin.z, face->dir0.x, face->dir0.y, face->dir0.z, face->dir1.x, face->dir1.y, face->dir1.z, shift.x, shift.y, shift.z); #endif - int64_t origDot = face->origin.dot(normal); - Point32 shiftedOrigin = face->origin + shift; - int64_t shiftedDot = shiftedOrigin.dot(normal); - btAssert(shiftedDot <= origDot); - if (shiftedDot >= origDot) { - return false; - } + int64_t origDot = face->origin.dot(normal); + Point32 shiftedOrigin = face->origin + shift; + int64_t shiftedDot = shiftedOrigin.dot(normal); + btAssert(shiftedDot <= origDot); + if (shiftedDot >= origDot) { + return false; + } - Edge *intersection = NULL; + Edge* intersection = NULL; - Edge *startEdge = face->nearbyVertex->edges; + Edge* startEdge = face->nearbyVertex->edges; #ifdef DEBUG_CONVEX_HULL - printf("Start edge is "); - startEdge->print(); - printf(", normal is (%lld %lld %lld), shifted dot is %lld\n", normal.x, normal.y, normal.z, shiftedDot); + printf("Start edge is "); + startEdge->print(); + printf(", normal is (%lld %lld %lld), shifted dot is %lld\n", normal.x, normal.y, normal.z, shiftedDot); #endif - Rational128 optDot = face->nearbyVertex->dot(normal); - int32_t cmp = optDot.compare(shiftedDot); + Rational128 optDot = face->nearbyVertex->dot(normal); + int32_t cmp = optDot.compare(shiftedDot); #ifdef SHOW_ITERATIONS - int32_t n = 0; + int32_t n = 0; #endif - if (cmp >= 0) { - Edge *e = startEdge; - do { + if (cmp >= 0) { + Edge* e = startEdge; + do { #ifdef SHOW_ITERATIONS - n++; + n++; #endif - Rational128 dot = e->target->dot(normal); - btAssert(dot.compare(origDot) <= 0); + Rational128 dot = e->target->dot(normal); + btAssert(dot.compare(origDot) <= 0); #ifdef DEBUG_CONVEX_HULL - printf("Moving downwards, edge is "); - e->print(); - printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); + printf("Moving downwards, edge is "); + e->print(); + printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); #endif - if (dot.compare(optDot) < 0) { - int32_t c = dot.compare(shiftedDot); - optDot = dot; - e = e->reverse; - startEdge = e; - if (c < 0) { - intersection = e; - break; - } - cmp = c; - } - e = e->prev; - } while (e != startEdge); + if (dot.compare(optDot) < 0) { + int32_t c = dot.compare(shiftedDot); + optDot = dot; + e = e->reverse; + startEdge = e; + if (c < 0) { + intersection = e; + break; + } + cmp = c; + } + e = e->prev; + } while (e != startEdge); - if (!intersection) { - return false; - } - } else { - Edge *e = startEdge; - do { + if (!intersection) { + return false; + } + } + else { + Edge* e = startEdge; + do { #ifdef SHOW_ITERATIONS - n++; + n++; #endif - Rational128 dot = e->target->dot(normal); - btAssert(dot.compare(origDot) <= 0); + Rational128 dot = e->target->dot(normal); + btAssert(dot.compare(origDot) <= 0); #ifdef DEBUG_CONVEX_HULL - printf("Moving upwards, edge is "); - e->print(); - printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); + printf("Moving upwards, edge is "); + e->print(); + printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); #endif - if (dot.compare(optDot) > 0) { - cmp = dot.compare(shiftedDot); - if (cmp >= 0) { - intersection = e; - break; - } - optDot = dot; - e = e->reverse; - startEdge = e; - } - e = e->prev; - } while (e != startEdge); + if (dot.compare(optDot) > 0) { + cmp = dot.compare(shiftedDot); + if (cmp >= 0) { + intersection = e; + break; + } + optDot = dot; + e = e->reverse; + startEdge = e; + } + e = e->prev; + } while (e != startEdge); - if (!intersection) { - return true; - } - } + if (!intersection) { + return true; + } + } #ifdef SHOW_ITERATIONS - printf("Needed %d iterations to find initial intersection\n", n); + printf("Needed %d iterations to find initial intersection\n", n); #endif - if (cmp == 0) { - Edge *e = intersection->reverse->next; + if (cmp == 0) { + Edge* e = intersection->reverse->next; #ifdef SHOW_ITERATIONS - n = 0; + n = 0; #endif - while (e->target->dot(normal).compare(shiftedDot) <= 0) { + while (e->target->dot(normal).compare(shiftedDot) <= 0) { #ifdef SHOW_ITERATIONS - n++; + n++; #endif - e = e->next; - if (e == intersection->reverse) { - return true; - } + e = e->next; + if (e == intersection->reverse) { + return true; + } #ifdef DEBUG_CONVEX_HULL - printf("Checking for outwards edge, current edge is "); - e->print(); - printf("\n"); + printf("Checking for outwards edge, current edge is "); + e->print(); + printf("\n"); #endif - } + } #ifdef SHOW_ITERATIONS - printf("Needed %d iterations to check for complete containment\n", n); + printf("Needed %d iterations to check for complete containment\n", n); #endif - } + } - Edge *firstIntersection = NULL; - Edge *faceEdge = NULL; - Edge *firstFaceEdge = NULL; + Edge* firstIntersection = NULL; + Edge* faceEdge = NULL; + Edge* firstFaceEdge = NULL; #ifdef SHOW_ITERATIONS - int32_t m = 0; + int32_t m = 0; #endif - while (true) { + while (true) { #ifdef SHOW_ITERATIONS - m++; + m++; #endif #ifdef DEBUG_CONVEX_HULL - printf("Intersecting edge is "); - intersection->print(); - printf("\n"); + printf("Intersecting edge is "); + intersection->print(); + printf("\n"); #endif - if (cmp == 0) { - Edge *e = intersection->reverse->next; - startEdge = e; + if (cmp == 0) { + Edge* e = intersection->reverse->next; + startEdge = e; #ifdef SHOW_ITERATIONS - n = 0; + n = 0; #endif - while (true) { + while (true) { #ifdef SHOW_ITERATIONS - n++; + n++; #endif - if (e->target->dot(normal).compare(shiftedDot) >= 0) { - break; - } - intersection = e->reverse; - e = e->next; - if (e == startEdge) { - return true; - } - } + if (e->target->dot(normal).compare(shiftedDot) >= 0) { + break; + } + intersection = e->reverse; + e = e->next; + if (e == startEdge) { + return true; + } + } #ifdef SHOW_ITERATIONS - printf("Needed %d iterations to advance intersection\n", n); + printf("Needed %d iterations to advance intersection\n", n); #endif - } + } #ifdef DEBUG_CONVEX_HULL - printf("Advanced intersecting edge to "); - intersection->print(); - printf(", cmp = %d\n", cmp); + printf("Advanced intersecting edge to "); + intersection->print(); + printf(", cmp = %d\n", cmp); #endif - if (!firstIntersection) { - firstIntersection = intersection; - } else if (intersection == firstIntersection) { - break; - } + if (!firstIntersection) { + firstIntersection = intersection; + } + else if (intersection == firstIntersection) { + break; + } - int32_t prevCmp = cmp; - Edge *prevIntersection = intersection; - Edge *prevFaceEdge = faceEdge; + int32_t prevCmp = cmp; + Edge* prevIntersection = intersection; + Edge* prevFaceEdge = faceEdge; - Edge *e = intersection->reverse; + Edge* e = intersection->reverse; #ifdef SHOW_ITERATIONS - n = 0; + n = 0; #endif - while (true) { + while (true) { #ifdef SHOW_ITERATIONS - n++; + n++; #endif - e = e->reverse->prev; - btAssert(e != intersection->reverse); - cmp = e->target->dot(normal).compare(shiftedDot); + e = e->reverse->prev; + btAssert(e != intersection->reverse); + cmp = e->target->dot(normal).compare(shiftedDot); #ifdef DEBUG_CONVEX_HULL - printf("Testing edge "); - e->print(); - printf(" -> cmp = %d\n", cmp); + printf("Testing edge "); + e->print(); + printf(" -> cmp = %d\n", cmp); #endif - if (cmp >= 0) { - intersection = e; - break; - } - } + if (cmp >= 0) { + intersection = e; + break; + } + } #ifdef SHOW_ITERATIONS - printf("Needed %d iterations to find other intersection of face\n", n); + printf("Needed %d iterations to find other intersection of face\n", n); #endif - if (cmp > 0) { - Vertex *removed = intersection->target; - e = intersection->reverse; - if (e->prev == e) { - removed->edges = NULL; - } else { - removed->edges = e->prev; - e->prev->link(e->next); - e->link(e); - } + if (cmp > 0) { + Vertex* removed = intersection->target; + e = intersection->reverse; + if (e->prev == e) { + removed->edges = NULL; + } + else { + removed->edges = e->prev; + e->prev->link(e->next); + e->link(e); + } #ifdef DEBUG_CONVEX_HULL - printf("1: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); + printf("1: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif - Point64 n0 = intersection->face->getNormal(); - Point64 n1 = intersection->reverse->face->getNormal(); - int64_t m00 = face->dir0.dot(n0); - int64_t m01 = face->dir1.dot(n0); - int64_t m10 = face->dir0.dot(n1); - int64_t m11 = face->dir1.dot(n1); - int64_t r0 = (intersection->face->origin - shiftedOrigin).dot(n0); - int64_t r1 = (intersection->reverse->face->origin - shiftedOrigin).dot(n1); - Int128 det = Int128::mul(m00, m11) - Int128::mul(m01, m10); - btAssert(det.getSign() != 0); - Vertex *v = vertexPool.newObject(); - v->point.index = -1; - v->copy = -1; - v->point128 = PointR128(Int128::mul(face->dir0.x * r0, m11) - Int128::mul(face->dir0.x * r1, m01) + Int128::mul(face->dir1.x * r1, m00) - Int128::mul(face->dir1.x * r0, m10) + det * shiftedOrigin.x, - Int128::mul(face->dir0.y * r0, m11) - Int128::mul(face->dir0.y * r1, m01) + Int128::mul(face->dir1.y * r1, m00) - Int128::mul(face->dir1.y * r0, m10) + det * shiftedOrigin.y, - Int128::mul(face->dir0.z * r0, m11) - Int128::mul(face->dir0.z * r1, m01) + Int128::mul(face->dir1.z * r1, m00) - Int128::mul(face->dir1.z * r0, m10) + det * shiftedOrigin.z, - det); - v->point.x = (int32_t)v->point128.xvalue(); - v->point.y = (int32_t)v->point128.yvalue(); - v->point.z = (int32_t)v->point128.zvalue(); - intersection->target = v; - v->edges = e; + Point64 n0 = intersection->face->getNormal(); + Point64 n1 = intersection->reverse->face->getNormal(); + int64_t m00 = face->dir0.dot(n0); + int64_t m01 = face->dir1.dot(n0); + int64_t m10 = face->dir0.dot(n1); + int64_t m11 = face->dir1.dot(n1); + int64_t r0 = (intersection->face->origin - shiftedOrigin).dot(n0); + int64_t r1 = (intersection->reverse->face->origin - shiftedOrigin).dot(n1); + Int128 det = Int128::mul(m00, m11) - Int128::mul(m01, m10); + btAssert(det.getSign() != 0); + Vertex* v = vertexPool.newObject(); + v->point.index = -1; + v->copy = -1; + v->point128 = PointR128(Int128::mul(face->dir0.x * r0, m11) - Int128::mul(face->dir0.x * r1, m01) + + Int128::mul(face->dir1.x * r1, m00) - Int128::mul(face->dir1.x * r0, m10) + det * shiftedOrigin.x, + Int128::mul(face->dir0.y * r0, m11) - Int128::mul(face->dir0.y * r1, m01) + + Int128::mul(face->dir1.y * r1, m00) - Int128::mul(face->dir1.y * r0, m10) + det * shiftedOrigin.y, + Int128::mul(face->dir0.z * r0, m11) - Int128::mul(face->dir0.z * r1, m01) + + Int128::mul(face->dir1.z * r1, m00) - Int128::mul(face->dir1.z * r0, m10) + det * shiftedOrigin.z, + det); + v->point.x = (int32_t)v->point128.xvalue(); + v->point.y = (int32_t)v->point128.yvalue(); + v->point.z = (int32_t)v->point128.zvalue(); + intersection->target = v; + v->edges = e; - stack.push_back(v); - stack.push_back(removed); - stack.push_back(NULL); - } + stack.push_back(v); + stack.push_back(removed); + stack.push_back(NULL); + } - if (cmp || prevCmp || (prevIntersection->reverse->next->target != intersection->target)) { - faceEdge = newEdgePair(prevIntersection->target, intersection->target); - if (prevCmp == 0) { - faceEdge->link(prevIntersection->reverse->next); - } - if ((prevCmp == 0) || prevFaceEdge) { - prevIntersection->reverse->link(faceEdge); - } - if (cmp == 0) { - intersection->reverse->prev->link(faceEdge->reverse); - } - faceEdge->reverse->link(intersection->reverse); - } else { - faceEdge = prevIntersection->reverse->next; - } + if (cmp || prevCmp || (prevIntersection->reverse->next->target != intersection->target)) { + faceEdge = newEdgePair(prevIntersection->target, intersection->target); + if (prevCmp == 0) { + faceEdge->link(prevIntersection->reverse->next); + } + if ((prevCmp == 0) || prevFaceEdge) { + prevIntersection->reverse->link(faceEdge); + } + if (cmp == 0) { + intersection->reverse->prev->link(faceEdge->reverse); + } + faceEdge->reverse->link(intersection->reverse); + } + else { + faceEdge = prevIntersection->reverse->next; + } - if (prevFaceEdge) { - if (prevCmp > 0) { - faceEdge->link(prevFaceEdge->reverse); - } else if (faceEdge != prevFaceEdge->reverse) { - stack.push_back(prevFaceEdge->target); - while (faceEdge->next != prevFaceEdge->reverse) { - Vertex *removed = faceEdge->next->target; - removeEdgePair(faceEdge->next); - stack.push_back(removed); + if (prevFaceEdge) { + if (prevCmp > 0) { + faceEdge->link(prevFaceEdge->reverse); + } + else if (faceEdge != prevFaceEdge->reverse) { + stack.push_back(prevFaceEdge->target); + while (faceEdge->next != prevFaceEdge->reverse) { + Vertex* removed = faceEdge->next->target; + removeEdgePair(faceEdge->next); + stack.push_back(removed); #ifdef DEBUG_CONVEX_HULL - printf("2: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); + printf("2: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif - } - stack.push_back(NULL); - } - } - faceEdge->face = face; - faceEdge->reverse->face = intersection->face; + } + stack.push_back(NULL); + } + } + faceEdge->face = face; + faceEdge->reverse->face = intersection->face; - if (!firstFaceEdge) { - firstFaceEdge = faceEdge; - } - } + if (!firstFaceEdge) { + firstFaceEdge = faceEdge; + } + } #ifdef SHOW_ITERATIONS - printf("Needed %d iterations to process all intersections\n", m); + printf("Needed %d iterations to process all intersections\n", m); #endif - if (cmp > 0) { - firstFaceEdge->reverse->target = faceEdge->target; - firstIntersection->reverse->link(firstFaceEdge); - firstFaceEdge->link(faceEdge->reverse); - } else if (firstFaceEdge != faceEdge->reverse) { - stack.push_back(faceEdge->target); - while (firstFaceEdge->next != faceEdge->reverse) { - Vertex *removed = firstFaceEdge->next->target; - removeEdgePair(firstFaceEdge->next); - stack.push_back(removed); + if (cmp > 0) { + firstFaceEdge->reverse->target = faceEdge->target; + firstIntersection->reverse->link(firstFaceEdge); + firstFaceEdge->link(faceEdge->reverse); + } + else if (firstFaceEdge != faceEdge->reverse) { + stack.push_back(faceEdge->target); + while (firstFaceEdge->next != faceEdge->reverse) { + Vertex* removed = firstFaceEdge->next->target; + removeEdgePair(firstFaceEdge->next); + stack.push_back(removed); #ifdef DEBUG_CONVEX_HULL - printf("3: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); + printf("3: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif - } - stack.push_back(NULL); - } + } + stack.push_back(NULL); + } - btAssert(stack.size() > 0); - vertexList = stack[0]; + btAssert(stack.size() > 0); + vertexList = stack[0]; #ifdef DEBUG_CONVEX_HULL - printf("Removing part\n"); + printf("Removing part\n"); #endif #ifdef SHOW_ITERATIONS - n = 0; + n = 0; #endif - int32_t pos = 0; - while (pos < stack.size()) { - int32_t end = stack.size(); - while (pos < end) { - Vertex *kept = stack[pos++]; + int32_t pos = 0; + while (pos < stack.size()) { + int32_t end = stack.size(); + while (pos < end) { + Vertex* kept = stack[pos++]; #ifdef DEBUG_CONVEX_HULL - kept->print(); + kept->print(); #endif - bool deeper = false; - Vertex *removed; - while ((removed = stack[pos++]) != NULL) { + bool deeper = false; + Vertex* removed; + while ((removed = stack[pos++]) != NULL) { #ifdef SHOW_ITERATIONS - n++; + n++; #endif - kept->receiveNearbyFaces(removed); - while (removed->edges) { - if (!deeper) { - deeper = true; - stack.push_back(kept); - } - stack.push_back(removed->edges->target); - removeEdgePair(removed->edges); - } - } - if (deeper) { - stack.push_back(NULL); - } - } - } + kept->receiveNearbyFaces(removed); + while (removed->edges) { + if (!deeper) { + deeper = true; + stack.push_back(kept); + } + stack.push_back(removed->edges->target); + removeEdgePair(removed->edges); + } + } + if (deeper) { + stack.push_back(NULL); + } + } + } #ifdef SHOW_ITERATIONS - printf("Needed %d iterations to remove part\n", n); + printf("Needed %d iterations to remove part\n", n); #endif - stack.resize(0); - face->origin = shiftedOrigin; + stack.resize(0); + face->origin = shiftedOrigin; - return true; + return true; } -static int32_t getVertexCopy(btConvexHullInternal::Vertex *vertex, btAlignedObjectArray &vertices) { - int32_t index = vertex->copy; - if (index < 0) { - index = vertices.size(); - vertex->copy = index; - vertices.push_back(vertex); +static int32_t getVertexCopy(btConvexHullInternal::Vertex* vertex, btAlignedObjectArray& vertices) +{ + int32_t index = vertex->copy; + if (index < 0) { + index = vertices.size(); + vertex->copy = index; + vertices.push_back(vertex); #ifdef DEBUG_CONVEX_HULL - printf("Vertex %d gets index *%d\n", vertex->point.index, index); + printf("Vertex %d gets index *%d\n", vertex->point.index, index); #endif - } - return index; + } + return index; } -btScalar btConvexHullComputer::compute(const void *coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { - if (count <= 0) { - vertices.clear(); - edges.clear(); - faces.clear(); - return 0; - } +btScalar btConvexHullComputer::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) +{ + if (count <= 0) { + vertices.clear(); + edges.clear(); + faces.clear(); + return 0; + } - btConvexHullInternal hull; - hull.compute(coords, doubleCoords, stride, count); + btConvexHullInternal hull; + hull.compute(coords, doubleCoords, stride, count); - btScalar shift = 0; - if ((shrink > 0) && ((shift = hull.shrink(shrink, shrinkClamp)) < 0)) { - vertices.clear(); - edges.clear(); - faces.clear(); - return shift; - } + btScalar shift = 0; + if ((shrink > 0) && ((shift = hull.shrink(shrink, shrinkClamp)) < 0)) { + vertices.clear(); + edges.clear(); + faces.clear(); + return shift; + } - vertices.resize(0); - edges.resize(0); - faces.resize(0); + vertices.resize(0); + edges.resize(0); + faces.resize(0); - btAlignedObjectArray oldVertices; - getVertexCopy(hull.vertexList, oldVertices); - int32_t copied = 0; - while (copied < oldVertices.size()) { - btConvexHullInternal::Vertex *v = oldVertices[copied]; - vertices.push_back(hull.getCoordinates(v)); - btConvexHullInternal::Edge *firstEdge = v->edges; - if (firstEdge) { - int32_t firstCopy = -1; - int32_t prevCopy = -1; - btConvexHullInternal::Edge *e = firstEdge; - do { - if (e->copy < 0) { - int32_t s = edges.size(); - edges.push_back(Edge()); - edges.push_back(Edge()); - Edge *c = &edges[s]; - Edge *r = &edges[s + 1]; - e->copy = s; - e->reverse->copy = s + 1; - c->reverse = 1; - r->reverse = -1; - c->targetVertex = getVertexCopy(e->target, oldVertices); - r->targetVertex = copied; + btAlignedObjectArray oldVertices; + getVertexCopy(hull.vertexList, oldVertices); + int32_t copied = 0; + while (copied < oldVertices.size()) { + btConvexHullInternal::Vertex* v = oldVertices[copied]; + vertices.push_back(hull.getCoordinates(v)); + btConvexHullInternal::Edge* firstEdge = v->edges; + if (firstEdge) { + int32_t firstCopy = -1; + int32_t prevCopy = -1; + btConvexHullInternal::Edge* e = firstEdge; + do { + if (e->copy < 0) { + int32_t s = edges.size(); + edges.push_back(Edge()); + edges.push_back(Edge()); + Edge* c = &edges[s]; + Edge* r = &edges[s + 1]; + e->copy = s; + e->reverse->copy = s + 1; + c->reverse = 1; + r->reverse = -1; + c->targetVertex = getVertexCopy(e->target, oldVertices); + r->targetVertex = copied; #ifdef DEBUG_CONVEX_HULL - printf(" CREATE: Vertex *%d has edge to *%d\n", copied, c->getTargetVertex()); + printf(" CREATE: Vertex *%d has edge to *%d\n", copied, c->getTargetVertex()); #endif - } - if (prevCopy >= 0) { - edges[e->copy].next = prevCopy - e->copy; - } else { - firstCopy = e->copy; - } - prevCopy = e->copy; - e = e->next; - } while (e != firstEdge); - edges[firstCopy].next = prevCopy - firstCopy; - } - copied++; - } + } + if (prevCopy >= 0) { + edges[e->copy].next = prevCopy - e->copy; + } + else { + firstCopy = e->copy; + } + prevCopy = e->copy; + e = e->next; + } while (e != firstEdge); + edges[firstCopy].next = prevCopy - firstCopy; + } + copied++; + } - for (int32_t i = 0; i < copied; i++) { - btConvexHullInternal::Vertex *v = oldVertices[i]; - btConvexHullInternal::Edge *firstEdge = v->edges; - if (firstEdge) { - btConvexHullInternal::Edge *e = firstEdge; - do { - if (e->copy >= 0) { + for (int32_t i = 0; i < copied; i++) { + btConvexHullInternal::Vertex* v = oldVertices[i]; + btConvexHullInternal::Edge* firstEdge = v->edges; + if (firstEdge) { + btConvexHullInternal::Edge* e = firstEdge; + do { + if (e->copy >= 0) { #ifdef DEBUG_CONVEX_HULL - printf("Vertex *%d has edge to *%d\n", i, edges[e->copy].getTargetVertex()); + printf("Vertex *%d has edge to *%d\n", i, edges[e->copy].getTargetVertex()); #endif - faces.push_back(e->copy); - btConvexHullInternal::Edge *f = e; - do { + faces.push_back(e->copy); + btConvexHullInternal::Edge* f = e; + do { #ifdef DEBUG_CONVEX_HULL - printf(" Face *%d\n", edges[f->copy].getTargetVertex()); + printf(" Face *%d\n", edges[f->copy].getTargetVertex()); #endif - f->copy = -1; - f = f->reverse->prev; - } while (f != e); - } - e = e->next; - } while (e != firstEdge); - } - } + f->copy = -1; + f = f->reverse->prev; + } while (f != e); + } + e = e->next; + } while (e != firstEdge); + } + } - return shift; + return shift; } - -//GODOT ADDITION -}; -// From 668439d16a5c30587ab77461fb3211306ad62587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Verschelde?= Date: Thu, 11 Apr 2019 17:48:49 +0200 Subject: [PATCH 2/3] vhacd: Reapply downstream changes to namespace conflicting bullet code Also adding a patch to easily identify and reapply them. --- thirdparty/README.md | 5 + thirdparty/vhacd/0001-bullet-namespace.patch | 213 ++++++++++++++++++ thirdparty/vhacd/inc/btAlignedAllocator.h | 9 + thirdparty/vhacd/inc/btAlignedObjectArray.h | 8 + thirdparty/vhacd/inc/btConvexHullComputer.h | 8 + thirdparty/vhacd/inc/btMinMax.h | 8 + thirdparty/vhacd/inc/btScalar.h | 17 ++ thirdparty/vhacd/inc/btVector3.h | 8 + thirdparty/vhacd/src/btAlignedAllocator.cpp | 8 + thirdparty/vhacd/src/btConvexHullComputer.cpp | 8 + 10 files changed, 292 insertions(+) create mode 100644 thirdparty/vhacd/0001-bullet-namespace.patch diff --git a/thirdparty/README.md b/thirdparty/README.md index 6e8be5d5ee0..6997a9ba96d 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -539,6 +539,11 @@ Files extracted from upstream source: - From `src/VHACD_Lib/`: `inc`, `public` and `src` - `LICENSE` +Some downstream changes have been made and are identified by +`// -- GODOT start --` and `// -- GODOT end --` comments. +They can be reapplied using the patches included in the `vhacd` +folder. + ## zlib diff --git a/thirdparty/vhacd/0001-bullet-namespace.patch b/thirdparty/vhacd/0001-bullet-namespace.patch new file mode 100644 index 00000000000..cfb1ffeff07 --- /dev/null +++ b/thirdparty/vhacd/0001-bullet-namespace.patch @@ -0,0 +1,213 @@ +diff --git a/thirdparty/vhacd/inc/btAlignedAllocator.h b/thirdparty/vhacd/inc/btAlignedAllocator.h +index 11f6e12dc..94e71d512 100644 +--- a/thirdparty/vhacd/inc/btAlignedAllocator.h ++++ b/thirdparty/vhacd/inc/btAlignedAllocator.h +@@ -21,6 +21,11 @@ subject to the following restrictions: + ///that is better portable and more predictable + + #include "btScalar.h" ++ ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + //#define BT_DEBUG_MEMORY_ALLOCATIONS 1 + #ifdef BT_DEBUG_MEMORY_ALLOCATIONS + +@@ -101,4 +106,8 @@ public: + friend bool operator==(const self_type&, const self_type&) { return true; } + }; + ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #endif //BT_ALIGNED_ALLOCATOR +diff --git a/thirdparty/vhacd/inc/btAlignedObjectArray.h b/thirdparty/vhacd/inc/btAlignedObjectArray.h +index e6620adf6..1ce03d21b 100644 +--- a/thirdparty/vhacd/inc/btAlignedObjectArray.h ++++ b/thirdparty/vhacd/inc/btAlignedObjectArray.h +@@ -38,6 +38,10 @@ subject to the following restrictions: + #include //for placement new + #endif //BT_USE_PLACEMENT_NEW + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods + ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data + template +@@ -445,4 +449,8 @@ public: + } + }; + ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #endif //BT_OBJECT_ARRAY__ +diff --git a/thirdparty/vhacd/inc/btConvexHullComputer.h b/thirdparty/vhacd/inc/btConvexHullComputer.h +index 3c5075c2c..04bb96f64 100644 +--- a/thirdparty/vhacd/inc/btConvexHullComputer.h ++++ b/thirdparty/vhacd/inc/btConvexHullComputer.h +@@ -18,6 +18,10 @@ subject to the following restrictions: + #include "btAlignedObjectArray.h" + #include "btVector3.h" + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + /// Convex hull implementation based on Preparata and Hong + /// See http://code.google.com/p/bullet/issues/detail?id=275 + /// Ole Kniemeyer, MAXON Computer GmbH +@@ -94,4 +98,8 @@ public: + } + }; + ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #endif //BT_CONVEX_HULL_COMPUTER_H +diff --git a/thirdparty/vhacd/inc/btMinMax.h b/thirdparty/vhacd/inc/btMinMax.h +index 40b0ceb6e..9bc1e1c77 100644 +--- a/thirdparty/vhacd/inc/btMinMax.h ++++ b/thirdparty/vhacd/inc/btMinMax.h +@@ -17,6 +17,10 @@ subject to the following restrictions: + + #include "btScalar.h" + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + template + SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b) + { +@@ -62,4 +66,8 @@ SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub) + } + } + ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #endif //BT_GEN_MINMAX_H +diff --git a/thirdparty/vhacd/inc/btScalar.h b/thirdparty/vhacd/inc/btScalar.h +index b814474bd..617fd7c44 100644 +--- a/thirdparty/vhacd/inc/btScalar.h ++++ b/thirdparty/vhacd/inc/btScalar.h +@@ -28,11 +28,19 @@ subject to the following restrictions: + /* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/ + #define BT_BULLET_VERSION 279 + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + inline int32_t btGetVersion() + { + return BT_BULLET_VERSION; + } + ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #if defined(DEBUG) || defined(_DEBUG) + #define BT_DEBUG + #endif +@@ -199,6 +207,10 @@ inline int32_t btGetVersion() + #endif //__CELLOS_LV2__ + #endif + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + ///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision. + #if defined(BT_USE_DOUBLE_PRECISION) + typedef double btScalar; +@@ -530,4 +542,9 @@ struct btTypedObject { + return m_objectType; + } + }; ++ ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #endif //BT_SCALAR_H +diff --git a/thirdparty/vhacd/inc/btVector3.h b/thirdparty/vhacd/inc/btVector3.h +index 0f2fefbbd..4ed971673 100644 +--- a/thirdparty/vhacd/inc/btVector3.h ++++ b/thirdparty/vhacd/inc/btVector3.h +@@ -26,6 +26,10 @@ subject to the following restrictions: + #define btVector3DataName "btVector3FloatData" + #endif //BT_USE_DOUBLE_PRECISION + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + /**@brief btVector3 can be used to represent 3D points and vectors. + * It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user + * Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers +@@ -712,4 +716,8 @@ SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn + m_floats[i] = dataIn.m_floats[i]; + } + ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #endif //BT_VECTOR3_H +diff --git a/thirdparty/vhacd/src/btAlignedAllocator.cpp b/thirdparty/vhacd/src/btAlignedAllocator.cpp +index 11d594f6c..ce0e7f26f 100644 +--- a/thirdparty/vhacd/src/btAlignedAllocator.cpp ++++ b/thirdparty/vhacd/src/btAlignedAllocator.cpp +@@ -15,6 +15,10 @@ subject to the following restrictions: + + #include "btAlignedAllocator.h" + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + #ifdef _MSC_VER + #pragma warning(disable:4311 4302) + #endif +@@ -177,4 +181,8 @@ void btAlignedFreeInternal(void* ptr) + sAlignedFreeFunc(ptr); + } + ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- ++ + #endif //BT_DEBUG_MEMORY_ALLOCATIONS +diff --git a/thirdparty/vhacd/src/btConvexHullComputer.cpp b/thirdparty/vhacd/src/btConvexHullComputer.cpp +index d3d749adb..8ab34af2a 100644 +--- a/thirdparty/vhacd/src/btConvexHullComputer.cpp ++++ b/thirdparty/vhacd/src/btConvexHullComputer.cpp +@@ -49,6 +49,10 @@ typedef unsigned long long int32_t uint64_t; + #include + #endif + ++// -- GODOT start -- ++namespace VHACD { ++// -- GODOT end -- ++ + // Convex hull implementation based on Preparata and Hong + // Ole Kniemeyer, MAXON Computer GmbH + class btConvexHullInternal { +@@ -2477,3 +2481,7 @@ btScalar btConvexHullComputer::compute(const void* coords, bool doubleCoords, in + + return shift; + } ++ ++// -- GODOT start -- ++}; // namespace VHACD ++// -- GODOT end -- diff --git a/thirdparty/vhacd/inc/btAlignedAllocator.h b/thirdparty/vhacd/inc/btAlignedAllocator.h index 11f6e12dcaf..94e71d51250 100644 --- a/thirdparty/vhacd/inc/btAlignedAllocator.h +++ b/thirdparty/vhacd/inc/btAlignedAllocator.h @@ -21,6 +21,11 @@ subject to the following restrictions: ///that is better portable and more predictable #include "btScalar.h" + +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + //#define BT_DEBUG_MEMORY_ALLOCATIONS 1 #ifdef BT_DEBUG_MEMORY_ALLOCATIONS @@ -101,4 +106,8 @@ public: friend bool operator==(const self_type&, const self_type&) { return true; } }; +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #endif //BT_ALIGNED_ALLOCATOR diff --git a/thirdparty/vhacd/inc/btAlignedObjectArray.h b/thirdparty/vhacd/inc/btAlignedObjectArray.h index e6620adf6fa..1ce03d21bce 100644 --- a/thirdparty/vhacd/inc/btAlignedObjectArray.h +++ b/thirdparty/vhacd/inc/btAlignedObjectArray.h @@ -38,6 +38,10 @@ subject to the following restrictions: #include //for placement new #endif //BT_USE_PLACEMENT_NEW +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data template @@ -445,4 +449,8 @@ public: } }; +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #endif //BT_OBJECT_ARRAY__ diff --git a/thirdparty/vhacd/inc/btConvexHullComputer.h b/thirdparty/vhacd/inc/btConvexHullComputer.h index 3c5075c2cbb..04bb96f64aa 100644 --- a/thirdparty/vhacd/inc/btConvexHullComputer.h +++ b/thirdparty/vhacd/inc/btConvexHullComputer.h @@ -18,6 +18,10 @@ subject to the following restrictions: #include "btAlignedObjectArray.h" #include "btVector3.h" +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + /// Convex hull implementation based on Preparata and Hong /// See http://code.google.com/p/bullet/issues/detail?id=275 /// Ole Kniemeyer, MAXON Computer GmbH @@ -94,4 +98,8 @@ public: } }; +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #endif //BT_CONVEX_HULL_COMPUTER_H diff --git a/thirdparty/vhacd/inc/btMinMax.h b/thirdparty/vhacd/inc/btMinMax.h index 40b0ceb6eda..9bc1e1c7707 100644 --- a/thirdparty/vhacd/inc/btMinMax.h +++ b/thirdparty/vhacd/inc/btMinMax.h @@ -17,6 +17,10 @@ subject to the following restrictions: #include "btScalar.h" +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + template SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b) { @@ -62,4 +66,8 @@ SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub) } } +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #endif //BT_GEN_MINMAX_H diff --git a/thirdparty/vhacd/inc/btScalar.h b/thirdparty/vhacd/inc/btScalar.h index b814474bdff..617fd7c44f5 100644 --- a/thirdparty/vhacd/inc/btScalar.h +++ b/thirdparty/vhacd/inc/btScalar.h @@ -28,11 +28,19 @@ subject to the following restrictions: /* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/ #define BT_BULLET_VERSION 279 +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + inline int32_t btGetVersion() { return BT_BULLET_VERSION; } +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #if defined(DEBUG) || defined(_DEBUG) #define BT_DEBUG #endif @@ -199,6 +207,10 @@ inline int32_t btGetVersion() #endif //__CELLOS_LV2__ #endif +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + ///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision. #if defined(BT_USE_DOUBLE_PRECISION) typedef double btScalar; @@ -530,4 +542,9 @@ struct btTypedObject { return m_objectType; } }; + +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #endif //BT_SCALAR_H diff --git a/thirdparty/vhacd/inc/btVector3.h b/thirdparty/vhacd/inc/btVector3.h index 0f2fefbbd5a..4ed97167341 100644 --- a/thirdparty/vhacd/inc/btVector3.h +++ b/thirdparty/vhacd/inc/btVector3.h @@ -26,6 +26,10 @@ subject to the following restrictions: #define btVector3DataName "btVector3FloatData" #endif //BT_USE_DOUBLE_PRECISION +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + /**@brief btVector3 can be used to represent 3D points and vectors. * It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user * Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers @@ -712,4 +716,8 @@ SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn m_floats[i] = dataIn.m_floats[i]; } +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #endif //BT_VECTOR3_H diff --git a/thirdparty/vhacd/src/btAlignedAllocator.cpp b/thirdparty/vhacd/src/btAlignedAllocator.cpp index 11d594f6c95..ce0e7f26f95 100644 --- a/thirdparty/vhacd/src/btAlignedAllocator.cpp +++ b/thirdparty/vhacd/src/btAlignedAllocator.cpp @@ -15,6 +15,10 @@ subject to the following restrictions: #include "btAlignedAllocator.h" +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + #ifdef _MSC_VER #pragma warning(disable:4311 4302) #endif @@ -177,4 +181,8 @@ void btAlignedFreeInternal(void* ptr) sAlignedFreeFunc(ptr); } +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- + #endif //BT_DEBUG_MEMORY_ALLOCATIONS diff --git a/thirdparty/vhacd/src/btConvexHullComputer.cpp b/thirdparty/vhacd/src/btConvexHullComputer.cpp index d3d749adbe3..8ab34af2abd 100644 --- a/thirdparty/vhacd/src/btConvexHullComputer.cpp +++ b/thirdparty/vhacd/src/btConvexHullComputer.cpp @@ -49,6 +49,10 @@ typedef unsigned long long int32_t uint64_t; #include #endif +// -- GODOT start -- +namespace VHACD { +// -- GODOT end -- + // Convex hull implementation based on Preparata and Hong // Ole Kniemeyer, MAXON Computer GmbH class btConvexHullInternal { @@ -2477,3 +2481,7 @@ btScalar btConvexHullComputer::compute(const void* coords, bool doubleCoords, in return shift; } + +// -- GODOT start -- +}; // namespace VHACD +// -- GODOT end -- From 74ab31b7f70bc34be5e5fb00a378166e72cd29bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Verschelde?= Date: Thu, 11 Apr 2019 18:01:04 +0200 Subject: [PATCH 3/3] vhacd: Fix -fpermissive build issue with MinGW Fixes #27926. --- thirdparty/vhacd/0002-fpermissive-fix.patch | 53 +++++++++++++++++++++ thirdparty/vhacd/inc/btScalar.h | 23 +++++++++ thirdparty/vhacd/src/btAlignedAllocator.cpp | 8 +++- 3 files changed, 82 insertions(+), 2 deletions(-) create mode 100644 thirdparty/vhacd/0002-fpermissive-fix.patch diff --git a/thirdparty/vhacd/0002-fpermissive-fix.patch b/thirdparty/vhacd/0002-fpermissive-fix.patch new file mode 100644 index 00000000000..965df2944ab --- /dev/null +++ b/thirdparty/vhacd/0002-fpermissive-fix.patch @@ -0,0 +1,53 @@ +diff --git a/thirdparty/vhacd/inc/btScalar.h b/thirdparty/vhacd/inc/btScalar.h +index 487205062..52297cd78 100644 +--- a/thirdparty/vhacd/inc/btScalar.h ++++ b/thirdparty/vhacd/inc/btScalar.h +@@ -535,6 +535,29 @@ struct btTypedObject { + } + }; + ++// -- GODOT start -- ++// Cherry-picked from Bullet 2.88 to fix GH-27926 ++///align a pointer to the provided alignment, upwards ++template ++T *btAlignPointer(T *unalignedPtr, size_t alignment) ++{ ++ struct btConvertPointerSizeT ++ { ++ union { ++ T *ptr; ++ size_t integer; ++ }; ++ }; ++ btConvertPointerSizeT converter; ++ ++ const size_t bit_mask = ~(alignment - 1); ++ converter.ptr = unalignedPtr; ++ converter.integer += alignment - 1; ++ converter.integer &= bit_mask; ++ return converter.ptr; ++} ++// -- GODOT end -- ++ + // -- GODOT start -- + }; // namespace VHACD + // -- GODOT end -- +diff --git a/thirdparty/vhacd/src/btAlignedAllocator.cpp b/thirdparty/vhacd/src/btAlignedAllocator.cpp +index ce0e7f26f..8dee31e7e 100644 +--- a/thirdparty/vhacd/src/btAlignedAllocator.cpp ++++ b/thirdparty/vhacd/src/btAlignedAllocator.cpp +@@ -72,8 +72,12 @@ static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) + + real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1)); + if (real) { +- offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1); +- ret = (void*)((real + sizeof(void*)) + offset); ++ // -- GODOT start -- ++ // Synced with Bullet 2.88 to fix GH-27926 ++ //offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1); ++ //ret = (void*)((real + sizeof(void*)) + offset); ++ ret = btAlignPointer(real + sizeof(void *), alignment); ++ // -- GODOT end -- + *((void**)(ret)-1) = (void*)(real); + } + else { diff --git a/thirdparty/vhacd/inc/btScalar.h b/thirdparty/vhacd/inc/btScalar.h index 617fd7c44f5..3999a71521e 100644 --- a/thirdparty/vhacd/inc/btScalar.h +++ b/thirdparty/vhacd/inc/btScalar.h @@ -543,6 +543,29 @@ struct btTypedObject { } }; +// -- GODOT start -- +// Cherry-picked from Bullet 2.88 to fix GH-27926 +///align a pointer to the provided alignment, upwards +template +T *btAlignPointer(T *unalignedPtr, size_t alignment) +{ + struct btConvertPointerSizeT + { + union { + T *ptr; + size_t integer; + }; + }; + btConvertPointerSizeT converter; + + const size_t bit_mask = ~(alignment - 1); + converter.ptr = unalignedPtr; + converter.integer += alignment - 1; + converter.integer &= bit_mask; + return converter.ptr; +} +// -- GODOT end -- + // -- GODOT start -- }; // namespace VHACD // -- GODOT end -- diff --git a/thirdparty/vhacd/src/btAlignedAllocator.cpp b/thirdparty/vhacd/src/btAlignedAllocator.cpp index ce0e7f26f95..8dee31e7e36 100644 --- a/thirdparty/vhacd/src/btAlignedAllocator.cpp +++ b/thirdparty/vhacd/src/btAlignedAllocator.cpp @@ -72,8 +72,12 @@ static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1)); if (real) { - offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1); - ret = (void*)((real + sizeof(void*)) + offset); + // -- GODOT start -- + // Synced with Bullet 2.88 to fix GH-27926 + //offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1); + //ret = (void*)((real + sizeof(void*)) + offset); + ret = btAlignPointer(real + sizeof(void *), alignment); + // -- GODOT end -- *((void**)(ret)-1) = (void*)(real); } else {