vhacd: Recommit unmodified upstream code without style changes

Godot-specific changes will then be redone without touching upstream formatting.
Also documented current state in thirdparty/README.md and added LICENSE.

Add vhacd to COPYRIGHT.txt.
This commit is contained in:
Rémi Verschelde 2019-04-11 17:30:12 +02:00
parent 7f2ad8bd3f
commit 531b158897
11 changed files with 3302 additions and 3050 deletions

View File

@ -385,6 +385,12 @@ Copyright: 2014-2018, Syoyo Fujita
2002, Industrial Light & Magic, a division of Lucas Digital Ltd. LLC 2002, Industrial Light & Magic, a division of Lucas Digital Ltd. LLC
License: BSD-3-clause License: BSD-3-clause
Files: ./thirdparty/vhacd/
Comment: V-HACD
Copyright: 2011, Khaled Mamou
2003-2009, Erwin Coumans
License: BSD-3-clause
Files: ./thirdparty/zlib/ Files: ./thirdparty/zlib/
Comment: zlib Comment: zlib
Copyright: 1995-2017, Jean-loup Gailly and Mark Adler Copyright: 1995-2017, Jean-loup Gailly and Mark Adler

21
thirdparty/README.md vendored
View File

@ -1,5 +1,6 @@
# Third party libraries # Third party libraries
## assimp ## assimp
- Upstream: http://github.com/assimp/assimp - Upstream: http://github.com/assimp/assimp
@ -294,8 +295,12 @@ Godot build configurations, check them out when updating.
File extracted from upstream release tarball `mbedtls-2.16.0-apache.tgz`: File extracted from upstream release tarball `mbedtls-2.16.0-apache.tgz`:
- All `*.h` from `include/mbedtls/` to `thirdparty/mbedtls/include/mbedtls/` - All `*.h` from `include/mbedtls/` to `thirdparty/mbedtls/include/mbedtls/`
- All `*.c` from `library/` to `thirdparty/mbedtls/library/` - All `*.c` from `library/` to `thirdparty/mbedtls/library/`
- Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453). Soon to be merged upstream. Check it out at next update. - Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453).
- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA padlock support which defines a symbol `unsupported` which clashses with a symbol in libwebsockets. Soon to be merged upstream. Check it out at next update.
- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA
padlock support which defines a symbol `unsupported` which clashes with
a symbol in libwebsockets.
## miniupnpc ## miniupnpc
@ -523,6 +528,18 @@ Files extracted from upstream source:
- `tinyexr.{cc,h}` - `tinyexr.{cc,h}`
## vhacd
- Upstream: https://github.com/kmammou/v-hacd
- Version: git (2297aa1, 2018)
- License: BSD-3-Clause
Files extracted from upstream source:
- From `src/VHACD_Lib/`: `inc`, `public` and `src`
- `LICENSE`
## zlib ## zlib
- Upstream: http://www.zlib.net - Upstream: http://www.zlib.net

29
thirdparty/vhacd/LICENSE vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2011, Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -21,27 +21,22 @@ subject to the following restrictions:
///that is better portable and more predictable ///that is better portable and more predictable
#include "btScalar.h" #include "btScalar.h"
//GODOT ADDITION
namespace VHACD {
//
//#define BT_DEBUG_MEMORY_ALLOCATIONS 1 //#define BT_DEBUG_MEMORY_ALLOCATIONS 1
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS #ifdef BT_DEBUG_MEMORY_ALLOCATIONS
#define btAlignedAlloc(a, b) \ #define btAlignedAlloc(a, b) \
btAlignedAllocInternal(a, b, __LINE__, __FILE__) btAlignedAllocInternal(a, b, __LINE__, __FILE__)
#define btAlignedFree(ptr) \ #define btAlignedFree(ptr) \
btAlignedFreeInternal(ptr, __LINE__, __FILE__) btAlignedFreeInternal(ptr, __LINE__, __FILE__)
void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char *filename); void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename);
void btAlignedFreeInternal(void *ptr, int32_t line, char *filename); void btAlignedFreeInternal(void* ptr, int32_t line, char* filename);
#else #else
void *btAlignedAllocInternal(size_t size, int32_t alignment); void* btAlignedAllocInternal(size_t size, int32_t alignment);
void btAlignedFreeInternal(void *ptr); void btAlignedFreeInternal(void* ptr);
#define btAlignedAlloc(size, alignment) btAlignedAllocInternal(size, alignment) #define btAlignedAlloc(size, alignment) btAlignedAllocInternal(size, alignment)
#define btAlignedFree(ptr) btAlignedFreeInternal(ptr) #define btAlignedFree(ptr) btAlignedFreeInternal(ptr)
@ -49,63 +44,61 @@ void btAlignedFreeInternal(void *ptr);
#endif #endif
typedef int32_t size_type; typedef int32_t size_type;
typedef void *(btAlignedAllocFunc)(size_t size, int32_t alignment); typedef void*(btAlignedAllocFunc)(size_t size, int32_t alignment);
typedef void(btAlignedFreeFunc)(void *memblock); typedef void(btAlignedFreeFunc)(void* memblock);
typedef void *(btAllocFunc)(size_t size); typedef void*(btAllocFunc)(size_t size);
typedef void(btFreeFunc)(void *memblock); typedef void(btFreeFunc)(void* memblock);
///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom ///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom
void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc); void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc);
///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it. ///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it.
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc); void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc);
///The btAlignedAllocator is a portable class for aligned memory allocations. ///The btAlignedAllocator is a portable class for aligned memory allocations.
///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned. ///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned.
template <typename T, unsigned Alignment> template <typename T, unsigned Alignment>
class btAlignedAllocator { class btAlignedAllocator {
typedef btAlignedAllocator<T, Alignment> self_type; typedef btAlignedAllocator<T, Alignment> self_type;
public: public:
//just going down a list: //just going down a list:
btAlignedAllocator() {} btAlignedAllocator() {}
/* /*
btAlignedAllocator( const self_type & ) {} btAlignedAllocator( const self_type & ) {}
*/ */
template <typename Other> template <typename Other>
btAlignedAllocator(const btAlignedAllocator<Other, Alignment> &) {} btAlignedAllocator(const btAlignedAllocator<Other, Alignment>&) {}
typedef const T *const_pointer; typedef const T* const_pointer;
typedef const T &const_reference; typedef const T& const_reference;
typedef T *pointer; typedef T* pointer;
typedef T &reference; typedef T& reference;
typedef T value_type; typedef T value_type;
pointer address(reference ref) const { return &ref; } pointer address(reference ref) const { return &ref; }
const_pointer address(const_reference ref) const { return &ref; } const_pointer address(const_reference ref) const { return &ref; }
pointer allocate(size_type n, const_pointer *hint = 0) { pointer allocate(size_type n, const_pointer* hint = 0)
(void)hint; {
return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment)); (void)hint;
} return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment));
void construct(pointer ptr, const value_type &value) { new (ptr) value_type(value); } }
void deallocate(pointer ptr) { void construct(pointer ptr, const value_type& value) { new (ptr) value_type(value); }
btAlignedFree(reinterpret_cast<void *>(ptr)); void deallocate(pointer ptr)
} {
void destroy(pointer ptr) { ptr->~value_type(); } btAlignedFree(reinterpret_cast<void*>(ptr));
}
void destroy(pointer ptr) { ptr->~value_type(); }
template <typename O> template <typename O>
struct rebind { struct rebind {
typedef btAlignedAllocator<O, Alignment> other; typedef btAlignedAllocator<O, Alignment> other;
}; };
template <typename O> template <typename O>
self_type &operator=(const btAlignedAllocator<O, Alignment> &) { return *this; } self_type& operator=(const btAlignedAllocator<O, Alignment>&) { return *this; }
friend bool operator==(const self_type &, const self_type &) { return true; } friend bool operator==(const self_type&, const self_type&) { return true; }
}; };
//GODOT ADDITION
}; // namespace VHACD
//
#endif //BT_ALIGNED_ALLOCATOR #endif //BT_ALIGNED_ALLOCATOR

View File

@ -38,383 +38,411 @@ subject to the following restrictions:
#include <new> //for placement new #include <new> //for placement new
#endif //BT_USE_PLACEMENT_NEW #endif //BT_USE_PLACEMENT_NEW
//GODOT ADDITION
namespace VHACD {
//
///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods
///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data
template <typename T> template <typename T>
//template <class T> //template <class T>
class btAlignedObjectArray { class btAlignedObjectArray {
btAlignedAllocator<T, 16> m_allocator; btAlignedAllocator<T, 16> m_allocator;
int32_t m_size; int32_t m_size;
int32_t m_capacity; int32_t m_capacity;
T *m_data; T* m_data;
//PCK: added this line //PCK: added this line
bool m_ownsMemory; bool m_ownsMemory;
#ifdef BT_ALLOW_ARRAY_COPY_OPERATOR #ifdef BT_ALLOW_ARRAY_COPY_OPERATOR
public: public:
SIMD_FORCE_INLINE btAlignedObjectArray<T> &operator=(const btAlignedObjectArray<T> &other) { SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other)
copyFromArray(other); {
return *this; copyFromArray(other);
} return *this;
}
#else //BT_ALLOW_ARRAY_COPY_OPERATOR #else //BT_ALLOW_ARRAY_COPY_OPERATOR
private: private:
SIMD_FORCE_INLINE btAlignedObjectArray<T> &operator=(const btAlignedObjectArray<T> &other); SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other);
#endif //BT_ALLOW_ARRAY_COPY_OPERATOR #endif //BT_ALLOW_ARRAY_COPY_OPERATOR
protected: protected:
SIMD_FORCE_INLINE int32_t allocSize(int32_t size) { SIMD_FORCE_INLINE int32_t allocSize(int32_t size)
return (size ? size * 2 : 1); {
} return (size ? size * 2 : 1);
SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T *dest) const { }
int32_t i; SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const
for (i = start; i < end; ++i) {
int32_t i;
for (i = start; i < end; ++i)
#ifdef BT_USE_PLACEMENT_NEW #ifdef BT_USE_PLACEMENT_NEW
new (&dest[i]) T(m_data[i]); new (&dest[i]) T(m_data[i]);
#else #else
dest[i] = m_data[i]; dest[i] = m_data[i];
#endif //BT_USE_PLACEMENT_NEW #endif //BT_USE_PLACEMENT_NEW
} }
SIMD_FORCE_INLINE void init() { SIMD_FORCE_INLINE void init()
//PCK: added this line {
m_ownsMemory = true; //PCK: added this line
m_data = 0; m_ownsMemory = true;
m_size = 0; m_data = 0;
m_capacity = 0; m_size = 0;
} m_capacity = 0;
SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) { }
int32_t i; SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last)
for (i = first; i < last; i++) { {
m_data[i].~T(); int32_t i;
} for (i = first; i < last; i++) {
} m_data[i].~T();
}
}
SIMD_FORCE_INLINE void *allocate(int32_t size) { SIMD_FORCE_INLINE void* allocate(int32_t size)
if (size) {
return m_allocator.allocate(size); if (size)
return 0; return m_allocator.allocate(size);
} return 0;
}
SIMD_FORCE_INLINE void deallocate() { SIMD_FORCE_INLINE void deallocate()
if (m_data) { {
//PCK: enclosed the deallocation in this block if (m_data) {
if (m_ownsMemory) { //PCK: enclosed the deallocation in this block
m_allocator.deallocate(m_data); if (m_ownsMemory) {
} m_allocator.deallocate(m_data);
m_data = 0; }
} m_data = 0;
} }
}
public: public:
btAlignedObjectArray() { btAlignedObjectArray()
init(); {
} init();
}
~btAlignedObjectArray() { ~btAlignedObjectArray()
clear(); {
} clear();
}
///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead. ///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead.
btAlignedObjectArray(const btAlignedObjectArray &otherArray) { btAlignedObjectArray(const btAlignedObjectArray& otherArray)
init(); {
init();
int32_t otherSize = otherArray.size(); int32_t otherSize = otherArray.size();
resize(otherSize); resize(otherSize);
otherArray.copy(0, otherSize, m_data); otherArray.copy(0, otherSize, m_data);
} }
/// return the number of elements in the array /// return the number of elements in the array
SIMD_FORCE_INLINE int32_t size() const { SIMD_FORCE_INLINE int32_t size() const
return m_size; {
} return m_size;
}
SIMD_FORCE_INLINE const T &at(int32_t n) const { SIMD_FORCE_INLINE const T& at(int32_t n) const
btAssert(n >= 0); {
btAssert(n < size()); btAssert(n >= 0);
return m_data[n]; btAssert(n < size());
} return m_data[n];
}
SIMD_FORCE_INLINE T &at(int32_t n) { SIMD_FORCE_INLINE T& at(int32_t n)
btAssert(n >= 0); {
btAssert(n < size()); btAssert(n >= 0);
return m_data[n]; btAssert(n < size());
} return m_data[n];
}
SIMD_FORCE_INLINE const T &operator[](int32_t n) const { SIMD_FORCE_INLINE const T& operator[](int32_t n) const
btAssert(n >= 0); {
btAssert(n < size()); btAssert(n >= 0);
return m_data[n]; btAssert(n < size());
} return m_data[n];
}
SIMD_FORCE_INLINE T &operator[](int32_t n) { SIMD_FORCE_INLINE T& operator[](int32_t n)
btAssert(n >= 0); {
btAssert(n < size()); btAssert(n >= 0);
return m_data[n]; btAssert(n < size());
} return m_data[n];
}
///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations. ///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations.
SIMD_FORCE_INLINE void clear() { SIMD_FORCE_INLINE void clear()
destroy(0, size()); {
destroy(0, size());
deallocate(); deallocate();
init(); init();
} }
SIMD_FORCE_INLINE void pop_back() { SIMD_FORCE_INLINE void pop_back()
btAssert(m_size > 0); {
m_size--; btAssert(m_size > 0);
m_data[m_size].~T(); m_size--;
} m_data[m_size].~T();
}
///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument. ///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument.
///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations. ///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations.
SIMD_FORCE_INLINE void resize(int32_t newsize, const T &fillData = T()) { SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T())
int32_t curSize = size(); {
int32_t curSize = size();
if (newsize < curSize) { if (newsize < curSize) {
for (int32_t i = newsize; i < curSize; i++) { for (int32_t i = newsize; i < curSize; i++) {
m_data[i].~T(); m_data[i].~T();
} }
} else { }
if (newsize > size()) { else {
reserve(newsize); if (newsize > size()) {
} reserve(newsize);
}
#ifdef BT_USE_PLACEMENT_NEW #ifdef BT_USE_PLACEMENT_NEW
for (int32_t i = curSize; i < newsize; i++) { for (int32_t i = curSize; i < newsize; i++) {
new (&m_data[i]) T(fillData); new (&m_data[i]) T(fillData);
} }
#endif //BT_USE_PLACEMENT_NEW #endif //BT_USE_PLACEMENT_NEW
} }
m_size = newsize; m_size = newsize;
} }
SIMD_FORCE_INLINE T &expandNonInitializing() { SIMD_FORCE_INLINE T& expandNonInitializing()
int32_t sz = size(); {
if (sz == capacity()) { int32_t sz = size();
reserve(allocSize(size())); if (sz == capacity()) {
} reserve(allocSize(size()));
m_size++; }
m_size++;
return m_data[sz]; return m_data[sz];
} }
SIMD_FORCE_INLINE T &expand(const T &fillValue = T()) { SIMD_FORCE_INLINE T& expand(const T& fillValue = T())
int32_t sz = size(); {
if (sz == capacity()) { int32_t sz = size();
reserve(allocSize(size())); if (sz == capacity()) {
} reserve(allocSize(size()));
m_size++; }
m_size++;
#ifdef BT_USE_PLACEMENT_NEW #ifdef BT_USE_PLACEMENT_NEW
new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory) new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory)
#endif #endif
return m_data[sz]; return m_data[sz];
} }
SIMD_FORCE_INLINE void push_back(const T &_Val) { SIMD_FORCE_INLINE void push_back(const T& _Val)
int32_t sz = size(); {
if (sz == capacity()) { int32_t sz = size();
reserve(allocSize(size())); if (sz == capacity()) {
} reserve(allocSize(size()));
}
#ifdef BT_USE_PLACEMENT_NEW #ifdef BT_USE_PLACEMENT_NEW
new (&m_data[m_size]) T(_Val); new (&m_data[m_size]) T(_Val);
#else #else
m_data[size()] = _Val; m_data[size()] = _Val;
#endif //BT_USE_PLACEMENT_NEW #endif //BT_USE_PLACEMENT_NEW
m_size++; m_size++;
} }
/// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve() /// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve()
SIMD_FORCE_INLINE int32_t capacity() const { SIMD_FORCE_INLINE int32_t capacity() const
return m_capacity; {
} return m_capacity;
}
SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage SIMD_FORCE_INLINE void reserve(int32_t _Count)
if (capacity() < _Count) { // not enough room, reallocate { // determine new minimum length of allocated storage
T *s = (T *)allocate(_Count); if (capacity() < _Count) { // not enough room, reallocate
T* s = (T*)allocate(_Count);
copy(0, size(), s); copy(0, size(), s);
destroy(0, size()); destroy(0, size());
deallocate(); deallocate();
//PCK: added this line //PCK: added this line
m_ownsMemory = true; m_ownsMemory = true;
m_data = s; m_data = s;
m_capacity = _Count; m_capacity = _Count;
} }
} }
class less { class less {
public: public:
bool operator()(const T &a, const T &b) { bool operator()(const T& a, const T& b)
return (a < b); {
} return (a < b);
}; }
};
template <typename L> template <typename L>
void quickSortInternal(const L &CompareFunc, int32_t lo, int32_t hi) { void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi)
// lo is the lower index, hi is the upper index {
// of the region of array a that is to be sorted // lo is the lower index, hi is the upper index
int32_t i = lo, j = hi; // of the region of array a that is to be sorted
T x = m_data[(lo + hi) / 2]; int32_t i = lo, j = hi;
T x = m_data[(lo + hi) / 2];
// partition // partition
do { do {
while (CompareFunc(m_data[i], x)) while (CompareFunc(m_data[i], x))
i++; i++;
while (CompareFunc(x, m_data[j])) while (CompareFunc(x, m_data[j]))
j--; j--;
if (i <= j) { if (i <= j) {
swap(i, j); swap(i, j);
i++; i++;
j--; j--;
} }
} while (i <= j); } while (i <= j);
// recursion // recursion
if (lo < j) if (lo < j)
quickSortInternal(CompareFunc, lo, j); quickSortInternal(CompareFunc, lo, j);
if (i < hi) if (i < hi)
quickSortInternal(CompareFunc, i, hi); quickSortInternal(CompareFunc, i, hi);
} }
template <typename L> template <typename L>
void quickSort(const L &CompareFunc) { void quickSort(const L& CompareFunc)
//don't sort 0 or 1 elements {
if (size() > 1) { //don't sort 0 or 1 elements
quickSortInternal(CompareFunc, 0, size() - 1); if (size() > 1) {
} quickSortInternal(CompareFunc, 0, size() - 1);
} }
}
///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/ ///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/
template <typename L> template <typename L>
void downHeap(T *pArr, int32_t k, int32_t n, const L &CompareFunc) { void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc)
/* PRE: a[k+1..N] is a heap */ {
/* POST: a[k..N] is a heap */ /* PRE: a[k+1..N] is a heap */
/* POST: a[k..N] is a heap */
T temp = pArr[k - 1]; T temp = pArr[k - 1];
/* k has child(s) */ /* k has child(s) */
while (k <= n / 2) { while (k <= n / 2) {
int32_t child = 2 * k; int32_t child = 2 * k;
if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) { if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) {
child++; child++;
} }
/* pick larger child */ /* pick larger child */
if (CompareFunc(temp, pArr[child - 1])) { if (CompareFunc(temp, pArr[child - 1])) {
/* move child up */ /* move child up */
pArr[k - 1] = pArr[child - 1]; pArr[k - 1] = pArr[child - 1];
k = child; k = child;
} else { }
break; else {
} break;
} }
pArr[k - 1] = temp; }
} /*downHeap*/ pArr[k - 1] = temp;
} /*downHeap*/
void swap(int32_t index0, int32_t index1) { void swap(int32_t index0, int32_t index1)
{
#ifdef BT_USE_MEMCPY #ifdef BT_USE_MEMCPY
char temp[sizeof(T)]; char temp[sizeof(T)];
memcpy(temp, &m_data[index0], sizeof(T)); memcpy(temp, &m_data[index0], sizeof(T));
memcpy(&m_data[index0], &m_data[index1], sizeof(T)); memcpy(&m_data[index0], &m_data[index1], sizeof(T));
memcpy(&m_data[index1], temp, sizeof(T)); memcpy(&m_data[index1], temp, sizeof(T));
#else #else
T temp = m_data[index0]; T temp = m_data[index0];
m_data[index0] = m_data[index1]; m_data[index0] = m_data[index1];
m_data[index1] = temp; m_data[index1] = temp;
#endif //BT_USE_PLACEMENT_NEW #endif //BT_USE_PLACEMENT_NEW
} }
template <typename L> template <typename L>
void heapSort(const L &CompareFunc) { void heapSort(const L& CompareFunc)
/* sort a[0..N-1], N.B. 0 to N-1 */ {
int32_t k; /* sort a[0..N-1], N.B. 0 to N-1 */
int32_t n = m_size; int32_t k;
for (k = n / 2; k > 0; k--) { int32_t n = m_size;
downHeap(m_data, k, n, CompareFunc); for (k = n / 2; k > 0; k--) {
} downHeap(m_data, k, n, CompareFunc);
}
/* a[1..N] is now a heap */ /* a[1..N] is now a heap */
while (n >= 1) { while (n >= 1) {
swap(0, n - 1); /* largest of a[0..n-1] */ swap(0, n - 1); /* largest of a[0..n-1] */
n = n - 1; n = n - 1;
/* restore a[1..i-1] heap */ /* restore a[1..i-1] heap */
downHeap(m_data, 1, n, CompareFunc); downHeap(m_data, 1, n, CompareFunc);
} }
} }
///non-recursive binary search, assumes sorted array ///non-recursive binary search, assumes sorted array
int32_t findBinarySearch(const T &key) const { int32_t findBinarySearch(const T& key) const
int32_t first = 0; {
int32_t last = size() - 1; int32_t first = 0;
int32_t last = size() - 1;
//assume sorted array //assume sorted array
while (first <= last) { while (first <= last) {
int32_t mid = (first + last) / 2; // compute mid point. int32_t mid = (first + last) / 2; // compute mid point.
if (key > m_data[mid]) if (key > m_data[mid])
first = mid + 1; // repeat search in top half. first = mid + 1; // repeat search in top half.
else if (key < m_data[mid]) else if (key < m_data[mid])
last = mid - 1; // repeat search in bottom half. last = mid - 1; // repeat search in bottom half.
else else
return mid; // found it. return position ///// return mid; // found it. return position /////
} }
return size(); // failed to find key return size(); // failed to find key
} }
int32_t findLinearSearch(const T &key) const { int32_t findLinearSearch(const T& key) const
int32_t index = size(); {
int32_t i; int32_t index = size();
int32_t i;
for (i = 0; i < size(); i++) { for (i = 0; i < size(); i++) {
if (m_data[i] == key) { if (m_data[i] == key) {
index = i; index = i;
break; break;
} }
} }
return index; return index;
} }
void remove(const T &key) { void remove(const T& key)
{
int32_t findIndex = findLinearSearch(key); int32_t findIndex = findLinearSearch(key);
if (findIndex < size()) { if (findIndex < size()) {
swap(findIndex, size() - 1); swap(findIndex, size() - 1);
pop_back(); pop_back();
} }
} }
//PCK: whole function //PCK: whole function
void initializeFromBuffer(void *buffer, int32_t size, int32_t capacity) { void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity)
clear(); {
m_ownsMemory = false; clear();
m_data = (T *)buffer; m_ownsMemory = false;
m_size = size; m_data = (T*)buffer;
m_capacity = capacity; m_size = size;
} m_capacity = capacity;
}
void copyFromArray(const btAlignedObjectArray &otherArray) { void copyFromArray(const btAlignedObjectArray& otherArray)
int32_t otherSize = otherArray.size(); {
resize(otherSize); int32_t otherSize = otherArray.size();
otherArray.copy(0, otherSize, m_data); resize(otherSize);
} otherArray.copy(0, otherSize, m_data);
}
}; };
//GODOT ADDITION
}; // namespace VHACD
//
#endif //BT_OBJECT_ARRAY__ #endif //BT_OBJECT_ARRAY__

View File

@ -18,60 +18,59 @@ subject to the following restrictions:
#include "btAlignedObjectArray.h" #include "btAlignedObjectArray.h"
#include "btVector3.h" #include "btVector3.h"
//GODOT ADDITION
namespace VHACD {
//
/// Convex hull implementation based on Preparata and Hong /// Convex hull implementation based on Preparata and Hong
/// See http://code.google.com/p/bullet/issues/detail?id=275 /// See http://code.google.com/p/bullet/issues/detail?id=275
/// Ole Kniemeyer, MAXON Computer GmbH /// Ole Kniemeyer, MAXON Computer GmbH
class btConvexHullComputer { class btConvexHullComputer {
private: private:
btScalar compute(const void *coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp); btScalar compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp);
public: public:
class Edge { class Edge {
private: private:
int32_t next; int32_t next;
int32_t reverse; int32_t reverse;
int32_t targetVertex; int32_t targetVertex;
friend class btConvexHullComputer; friend class btConvexHullComputer;
public: public:
int32_t getSourceVertex() const { int32_t getSourceVertex() const
return (this + reverse)->targetVertex; {
} return (this + reverse)->targetVertex;
}
int32_t getTargetVertex() const { int32_t getTargetVertex() const
return targetVertex; {
} return targetVertex;
}
const Edge *getNextEdgeOfVertex() const // clockwise list of all edges of a vertex const Edge* getNextEdgeOfVertex() const // clockwise list of all edges of a vertex
{ {
return this + next; return this + next;
} }
const Edge *getNextEdgeOfFace() const // counter-clockwise list of all edges of a face const Edge* getNextEdgeOfFace() const // counter-clockwise list of all edges of a face
{ {
return (this + reverse)->getNextEdgeOfVertex(); return (this + reverse)->getNextEdgeOfVertex();
} }
const Edge *getReverseEdge() const { const Edge* getReverseEdge() const
return this + reverse; {
} return this + reverse;
}; }
};
// Vertices of the output hull // Vertices of the output hull
btAlignedObjectArray<btVector3> vertices; btAlignedObjectArray<btVector3> vertices;
// Edges of the output hull // Edges of the output hull
btAlignedObjectArray<Edge> edges; btAlignedObjectArray<Edge> edges;
// Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons // Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons
btAlignedObjectArray<int32_t> faces; btAlignedObjectArray<int32_t> faces;
/* /*
Compute convex hull of "count" vertices stored in "coords". "stride" is the difference in bytes Compute convex hull of "count" vertices stored in "coords". "stride" is the difference in bytes
between the addresses of consecutive vertices. If "shrink" is positive, the convex hull is shrunken between the addresses of consecutive vertices. If "shrink" is positive, the convex hull is shrunken
by that amount (each face is moved by "shrink" length units towards the center along its normal). by that amount (each face is moved by "shrink" length units towards the center along its normal).
@ -83,18 +82,16 @@ public:
The output convex hull can be found in the member variables "vertices", "edges", "faces". The output convex hull can be found in the member variables "vertices", "edges", "faces".
*/ */
btScalar compute(const float *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { btScalar compute(const float* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp)
return compute(coords, false, stride, count, shrink, shrinkClamp); {
} return compute(coords, false, stride, count, shrink, shrinkClamp);
}
// same as above, but double precision // same as above, but double precision
btScalar compute(const double *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { btScalar compute(const double* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp)
return compute(coords, true, stride, count, shrink, shrinkClamp); {
} return compute(coords, true, stride, count, shrink, shrinkClamp);
}
}; };
//GODOT ADDITION
}; // namespace VHACD
//
#endif //BT_CONVEX_HULL_COMPUTER_H #endif //BT_CONVEX_HULL_COMPUTER_H

View File

@ -17,50 +17,49 @@ subject to the following restrictions:
#include "btScalar.h" #include "btScalar.h"
//GODOT ADDITION
namespace VHACD {
//
template <class T> template <class T>
SIMD_FORCE_INLINE const T &btMin(const T &a, const T &b) { SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b)
return a < b ? a : b; {
return a < b ? a : b;
} }
template <class T> template <class T>
SIMD_FORCE_INLINE const T &btMax(const T &a, const T &b) { SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b)
return a > b ? a : b; {
return a > b ? a : b;
} }
template <class T> template <class T>
SIMD_FORCE_INLINE const T &btClamped(const T &a, const T &lb, const T &ub) { SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub)
return a < lb ? lb : (ub < a ? ub : a); {
return a < lb ? lb : (ub < a ? ub : a);
} }
template <class T> template <class T>
SIMD_FORCE_INLINE void btSetMin(T &a, const T &b) { SIMD_FORCE_INLINE void btSetMin(T& a, const T& b)
if (b < a) { {
a = b; if (b < a) {
} a = b;
}
} }
template <class T> template <class T>
SIMD_FORCE_INLINE void btSetMax(T &a, const T &b) { SIMD_FORCE_INLINE void btSetMax(T& a, const T& b)
if (a < b) { {
a = b; if (a < b) {
} a = b;
}
} }
template <class T> template <class T>
SIMD_FORCE_INLINE void btClamp(T &a, const T &lb, const T &ub) { SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub)
if (a < lb) { {
a = lb; if (a < lb) {
} else if (ub < a) { a = lb;
a = ub; }
} else if (ub < a) {
a = ub;
}
} }
//GODOT ADDITION
}; // namespace VHACD
//
#endif //BT_GEN_MINMAX_H #endif //BT_GEN_MINMAX_H

View File

@ -22,24 +22,17 @@ subject to the following restrictions:
#include <float.h> #include <float.h>
#include <math.h> #include <math.h>
#include <stdint.h>
#include <stdlib.h> //size_t for MSVC 6.0 #include <stdlib.h> //size_t for MSVC 6.0
#include <stdint.h>
/* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/ /* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/
#define BT_BULLET_VERSION 279 #define BT_BULLET_VERSION 279
//GODOT ADDITION inline int32_t btGetVersion()
namespace VHACD { {
// return BT_BULLET_VERSION;
inline int32_t btGetVersion() {
return BT_BULLET_VERSION;
} }
//GODOT ADDITION
}; // namespace VHACD
//
#if defined(DEBUG) || defined(_DEBUG) #if defined(DEBUG) || defined(_DEBUG)
#define BT_DEBUG #define BT_DEBUG
#endif #endif
@ -107,12 +100,12 @@ inline int32_t btGetVersion() {
#include <spu_printf.h> #include <spu_printf.h>
#define printf spu_printf #define printf spu_printf
#define btAssert(x) \ #define btAssert(x) \
{ \ { \
if (!(x)) { \ if (!(x)) { \
printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \ printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \
spu_hcmpeq(0, 0); \ spu_hcmpeq(0, 0); \
} \ } \
} }
#else #else
#define btAssert assert #define btAssert assert
#endif #endif
@ -206,10 +199,6 @@ inline int32_t btGetVersion() {
#endif //__CELLOS_LV2__ #endif //__CELLOS_LV2__
#endif #endif
//GODOT ADDITION
namespace VHACD {
//
///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision. ///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision.
#if defined(BT_USE_DOUBLE_PRECISION) #if defined(BT_USE_DOUBLE_PRECISION)
typedef double btScalar; typedef double btScalar;
@ -222,130 +211,96 @@ typedef float btScalar;
#endif #endif
#define BT_DECLARE_ALIGNED_ALLOCATOR() \ #define BT_DECLARE_ALIGNED_ALLOCATOR() \
SIMD_FORCE_INLINE void *operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
SIMD_FORCE_INLINE void operator delete(void *ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \
SIMD_FORCE_INLINE void *operator new(size_t, void *ptr) { return ptr; } \ SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \
SIMD_FORCE_INLINE void operator delete(void *, void *) {} \ SIMD_FORCE_INLINE void operator delete(void*, void*) {} \
SIMD_FORCE_INLINE void *operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
SIMD_FORCE_INLINE void operator delete[](void *ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \
SIMD_FORCE_INLINE void *operator new[](size_t, void *ptr) { return ptr; } \ SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \
SIMD_FORCE_INLINE void operator delete[](void *, void *) {} SIMD_FORCE_INLINE void operator delete[](void*, void*) {}
#if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS) #if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS)
SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) { SIMD_FORCE_INLINE btScalar btSqrt(btScalar x)
return sqrt(x); {
return sqrt(x);
} }
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); }
return fabs(x); SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); }
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); }
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); }
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
{
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return acos(x);
} }
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
return cos(x); {
} if (x < btScalar(-1))
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { x = btScalar(-1);
return sin(x); if (x > btScalar(1))
} x = btScalar(1);
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return asin(x);
return tan(x);
}
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) {
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return acos(x);
}
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) {
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return asin(x);
}
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) {
return atan(x);
}
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) {
return atan2(x, y);
}
SIMD_FORCE_INLINE btScalar btExp(btScalar x) {
return exp(x);
}
SIMD_FORCE_INLINE btScalar btLog(btScalar x) {
return log(x);
}
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) {
return pow(x, y);
}
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
return fmod(x, y);
} }
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); }
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); }
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); }
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); }
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); }
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); }
#else #else
SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) { SIMD_FORCE_INLINE btScalar btSqrt(btScalar y)
{
#ifdef USE_APPROXIMATION #ifdef USE_APPROXIMATION
double x, z, tempf; double x, z, tempf;
unsigned long *tfptr = ((unsigned long *)&tempf) + 1; unsigned long* tfptr = ((unsigned long*)&tempf) + 1;
tempf = y; tempf = y;
*tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */ *tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */
x = tempf; x = tempf;
z = y * btScalar(0.5); z = y * btScalar(0.5);
x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */ x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */
x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z);
return x * y; return x * y;
#else #else
return sqrtf(y); return sqrtf(y);
#endif #endif
} }
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); }
return fabsf(x); SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); }
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); }
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); }
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
{
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return acosf(x);
} }
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
return cosf(x); {
} if (x < btScalar(-1))
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { x = btScalar(-1);
return sinf(x); if (x > btScalar(1))
} x = btScalar(1);
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return asinf(x);
return tanf(x);
}
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) {
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return acosf(x);
}
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) {
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return asinf(x);
}
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) {
return atanf(x);
}
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) {
return atan2f(x, y);
}
SIMD_FORCE_INLINE btScalar btExp(btScalar x) {
return expf(x);
}
SIMD_FORCE_INLINE btScalar btLog(btScalar x) {
return logf(x);
}
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) {
return powf(x, y);
}
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
return fmodf(x, y);
} }
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); }
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); }
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); }
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); }
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); }
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); }
#endif #endif
@ -366,110 +321,119 @@ SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
#define SIMD_INFINITY FLT_MAX #define SIMD_INFINITY FLT_MAX
#endif #endif
SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) { SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x)
btScalar coeff_1 = SIMD_PI / 4.0f; {
btScalar coeff_2 = 3.0f * coeff_1; btScalar coeff_1 = SIMD_PI / 4.0f;
btScalar abs_y = btFabs(y); btScalar coeff_2 = 3.0f * coeff_1;
btScalar angle; btScalar abs_y = btFabs(y);
if (x >= 0.0f) { btScalar angle;
btScalar r = (x - abs_y) / (x + abs_y); if (x >= 0.0f) {
angle = coeff_1 - coeff_1 * r; btScalar r = (x - abs_y) / (x + abs_y);
} else { angle = coeff_1 - coeff_1 * r;
btScalar r = (x + abs_y) / (abs_y - x); }
angle = coeff_2 - coeff_1 * r; else {
} btScalar r = (x + abs_y) / (abs_y - x);
return (y < 0.0f) ? -angle : angle; angle = coeff_2 - coeff_1 * r;
}
return (y < 0.0f) ? -angle : angle;
} }
SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; }
return btFabs(x) < SIMD_EPSILON;
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps)
{
return (((a) <= eps) && !((a) < -eps));
}
SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps)
{
return (!((a) <= eps));
} }
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) { SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x)
return (((a) <= eps) && !((a) < -eps)); {
} return x < btScalar(0.0) ? 1 : 0;
SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps) {
return (!((a) <= eps));
} }
SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x) { SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; }
return x < btScalar(0.0) ? 1 : 0; SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; }
}
SIMD_FORCE_INLINE btScalar btRadians(btScalar x) {
return x * SIMD_RADS_PER_DEG;
}
SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) {
return x * SIMD_DEGS_PER_RAD;
}
#define BT_DECLARE_HANDLE(name) \ #define BT_DECLARE_HANDLE(name) \
typedef struct name##__ { \ typedef struct name##__ { \
int32_t unused; \ int32_t unused; \
} * name } * name
#ifndef btFsel #ifndef btFsel
SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) { SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c)
return a >= 0 ? b : c; {
return a >= 0 ? b : c;
} }
#endif #endif
#define btFsels(a, b, c) (btScalar) btFsel(a, b, c) #define btFsels(a, b, c) (btScalar) btFsel(a, b, c)
SIMD_FORCE_INLINE bool btMachineIsLittleEndian() { SIMD_FORCE_INLINE bool btMachineIsLittleEndian()
long int i = 1; {
const char *p = (const char *)&i; long int i = 1;
if (p[0] == 1) // Lowest address contains the least significant byte const char* p = (const char*)&i;
return true; if (p[0] == 1) // Lowest address contains the least significant byte
else return true;
return false; else
return false;
} }
///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360 ///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360
///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html ///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) { SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero)
// Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero {
// Rely on positive value or'ed with its negative having sign bit on // Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero
// and zero value or'ed with its negative (which is still zero) having sign bit off // Rely on positive value or'ed with its negative having sign bit on
// Use arithmetic shift right, shifting the sign bit through all 32 bits // and zero value or'ed with its negative (which is still zero) having sign bit off
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); // Use arithmetic shift right, shifting the sign bit through all 32 bits
unsigned testEqz = ~testNz; unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); unsigned testEqz = ~testNz;
return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
} }
SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero) { SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero)
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); {
unsigned testEqz = ~testNz; unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); unsigned testEqz = ~testNz;
return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
} }
SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) { SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero)
{
#ifdef BT_HAVE_NATIVE_FSEL #ifdef BT_HAVE_NATIVE_FSEL
return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero); return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero);
#else #else
return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero; return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero;
#endif #endif
} }
template <typename T> template <typename T>
SIMD_FORCE_INLINE void btSwap(T &a, T &b) { SIMD_FORCE_INLINE void btSwap(T& a, T& b)
T tmp = a; {
a = b; T tmp = a;
b = tmp; a = b;
b = tmp;
} }
//PCK: endian swapping functions //PCK: endian swapping functions
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) { SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val)
return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24)); {
return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24));
} }
SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val) { SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val)
return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8)); {
return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8));
} }
SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val) { SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val)
return btSwapEndian((unsigned)val); {
return btSwapEndian((unsigned)val);
} }
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) { SIMD_FORCE_INLINE unsigned short btSwapEndian(short val)
return btSwapEndian((unsigned short)val); {
return btSwapEndian((unsigned short)val);
} }
///btSwapFloat uses using char pointers to swap the endianness ///btSwapFloat uses using char pointers to swap the endianness
@ -478,88 +442,92 @@ SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) {
///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception. ///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception.
///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you. ///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you.
///so instead of returning a float/double, we return integer/long long integer ///so instead of returning a float/double, we return integer/long long integer
SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) { SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d)
uint32_t a = 0; {
unsigned char *dst = (unsigned char *)&a; uint32_t a = 0;
unsigned char *src = (unsigned char *)&d; unsigned char* dst = (unsigned char*)&a;
unsigned char* src = (unsigned char*)&d;
dst[0] = src[3]; dst[0] = src[3];
dst[1] = src[2]; dst[1] = src[2];
dst[2] = src[1]; dst[2] = src[1];
dst[3] = src[0]; dst[3] = src[0];
return a; return a;
} }
// unswap using char pointers // unswap using char pointers
SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) { SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a)
float d = 0.0f; {
unsigned char *src = (unsigned char *)&a; float d = 0.0f;
unsigned char *dst = (unsigned char *)&d; unsigned char* src = (unsigned char*)&a;
unsigned char* dst = (unsigned char*)&d;
dst[0] = src[3]; dst[0] = src[3];
dst[1] = src[2]; dst[1] = src[2];
dst[2] = src[1]; dst[2] = src[1];
dst[3] = src[0]; dst[3] = src[0];
return d; return d;
} }
// swap using char pointers // swap using char pointers
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char *dst) { SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst)
unsigned char *src = (unsigned char *)&d; {
unsigned char* src = (unsigned char*)&d;
dst[0] = src[7]; dst[0] = src[7];
dst[1] = src[6]; dst[1] = src[6];
dst[2] = src[5]; dst[2] = src[5];
dst[3] = src[4]; dst[3] = src[4];
dst[4] = src[3]; dst[4] = src[3];
dst[5] = src[2]; dst[5] = src[2];
dst[6] = src[1]; dst[6] = src[1];
dst[7] = src[0]; dst[7] = src[0];
} }
// unswap using char pointers // unswap using char pointers
SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src) { SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char* src)
double d = 0.0; {
unsigned char *dst = (unsigned char *)&d; double d = 0.0;
unsigned char* dst = (unsigned char*)&d;
dst[0] = src[7]; dst[0] = src[7];
dst[1] = src[6]; dst[1] = src[6];
dst[2] = src[5]; dst[2] = src[5];
dst[3] = src[4]; dst[3] = src[4];
dst[4] = src[3]; dst[4] = src[3];
dst[5] = src[2]; dst[5] = src[2];
dst[6] = src[1]; dst[6] = src[1];
dst[7] = src[0]; dst[7] = src[0];
return d; return d;
} }
// returns normalized value in range [-SIMD_PI, SIMD_PI] // returns normalized value in range [-SIMD_PI, SIMD_PI]
SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) { SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians)
angleInRadians = btFmod(angleInRadians, SIMD_2_PI); {
if (angleInRadians < -SIMD_PI) { angleInRadians = btFmod(angleInRadians, SIMD_2_PI);
return angleInRadians + SIMD_2_PI; if (angleInRadians < -SIMD_PI) {
} else if (angleInRadians > SIMD_PI) { return angleInRadians + SIMD_2_PI;
return angleInRadians - SIMD_2_PI; }
} else { else if (angleInRadians > SIMD_PI) {
return angleInRadians; return angleInRadians - SIMD_2_PI;
} }
else {
return angleInRadians;
}
} }
///rudimentary class to provide type info ///rudimentary class to provide type info
struct btTypedObject { struct btTypedObject {
btTypedObject(int32_t objectType) : btTypedObject(int32_t objectType)
m_objectType(objectType) { : m_objectType(objectType)
} {
int32_t m_objectType; }
inline int32_t getObjectType() const { int32_t m_objectType;
return m_objectType; inline int32_t getObjectType() const
} {
return m_objectType;
}
}; };
//GODOT ADDITION
}; // namespace VHACD
//
#endif //BT_SCALAR_H #endif //BT_SCALAR_H

File diff suppressed because it is too large Load Diff

View File

@ -15,157 +15,166 @@ subject to the following restrictions:
#include "btAlignedAllocator.h" #include "btAlignedAllocator.h"
//GODOT ADDITION
namespace VHACD {
//
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(disable : 4311 4302) #pragma warning(disable:4311 4302)
#endif #endif
int32_t gNumAlignedAllocs = 0; int32_t gNumAlignedAllocs = 0;
int32_t gNumAlignedFree = 0; int32_t gNumAlignedFree = 0;
int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks
static void *btAllocDefault(size_t size) { static void* btAllocDefault(size_t size)
return malloc(size); {
return malloc(size);
} }
static void btFreeDefault(void *ptr) { static void btFreeDefault(void* ptr)
free(ptr); {
free(ptr);
} }
static btAllocFunc *sAllocFunc = btAllocDefault; static btAllocFunc* sAllocFunc = btAllocDefault;
static btFreeFunc *sFreeFunc = btFreeDefault; static btFreeFunc* sFreeFunc = btFreeDefault;
#if defined(BT_HAS_ALIGNED_ALLOCATOR) #if defined(BT_HAS_ALIGNED_ALLOCATOR)
#include <malloc.h> #include <malloc.h>
static void *btAlignedAllocDefault(size_t size, int32_t alignment) { static void* btAlignedAllocDefault(size_t size, int32_t alignment)
return _aligned_malloc(size, (size_t)alignment); {
return _aligned_malloc(size, (size_t)alignment);
} }
static void btAlignedFreeDefault(void *ptr) { static void btAlignedFreeDefault(void* ptr)
_aligned_free(ptr); {
_aligned_free(ptr);
} }
#elif defined(__CELLOS_LV2__) #elif defined(__CELLOS_LV2__)
#include <stdlib.h> #include <stdlib.h>
static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) { static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
return memalign(alignment, size); {
return memalign(alignment, size);
} }
static inline void btAlignedFreeDefault(void *ptr) { static inline void btAlignedFreeDefault(void* ptr)
free(ptr); {
free(ptr);
} }
#else #else
static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) { static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
void *ret; {
char *real; void* ret;
unsigned long offset; char* real;
unsigned long offset;
real = (char *)sAllocFunc(size + sizeof(void *) + (alignment - 1)); real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1));
if (real) { if (real) {
offset = (alignment - (unsigned long)(real + sizeof(void *))) & (alignment - 1); offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1);
ret = (void *)((real + sizeof(void *)) + offset); ret = (void*)((real + sizeof(void*)) + offset);
*((void **)(ret)-1) = (void *)(real); *((void**)(ret)-1) = (void*)(real);
} else { }
ret = (void *)(real); else {
} ret = (void*)(real);
return (ret); }
return (ret);
} }
static inline void btAlignedFreeDefault(void *ptr) { static inline void btAlignedFreeDefault(void* ptr)
void *real; {
void* real;
if (ptr) { if (ptr) {
real = *((void **)(ptr)-1); real = *((void**)(ptr)-1);
sFreeFunc(real); sFreeFunc(real);
} }
} }
#endif #endif
static btAlignedAllocFunc *sAlignedAllocFunc = btAlignedAllocDefault; static btAlignedAllocFunc* sAlignedAllocFunc = btAlignedAllocDefault;
static btAlignedFreeFunc *sAlignedFreeFunc = btAlignedFreeDefault; static btAlignedFreeFunc* sAlignedFreeFunc = btAlignedFreeDefault;
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc) { void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc)
sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault; {
sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault; sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault;
sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault;
} }
void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc) { void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc)
sAllocFunc = allocFunc ? allocFunc : btAllocDefault; {
sFreeFunc = freeFunc ? freeFunc : btFreeDefault; sAllocFunc = allocFunc ? allocFunc : btAllocDefault;
sFreeFunc = freeFunc ? freeFunc : btFreeDefault;
} }
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS #ifdef BT_DEBUG_MEMORY_ALLOCATIONS
//this generic allocator provides the total allocated number of bytes //this generic allocator provides the total allocated number of bytes
#include <stdio.h> #include <stdio.h>
void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char *filename) { void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename)
void *ret; {
char *real; void* ret;
unsigned long offset; char* real;
unsigned long offset;
gTotalBytesAlignedAllocs += size; gTotalBytesAlignedAllocs += size;
gNumAlignedAllocs++; gNumAlignedAllocs++;
real = (char *)sAllocFunc(size + 2 * sizeof(void *) + (alignment - 1)); real = (char*)sAllocFunc(size + 2 * sizeof(void*) + (alignment - 1));
if (real) { if (real) {
offset = (alignment - (unsigned long)(real + 2 * sizeof(void *))) & (alignment - 1); offset = (alignment - (unsigned long)(real + 2 * sizeof(void*))) & (alignment - 1);
ret = (void *)((real + 2 * sizeof(void *)) + offset); ret = (void*)((real + 2 * sizeof(void*)) + offset);
*((void **)(ret)-1) = (void *)(real); *((void**)(ret)-1) = (void*)(real);
*((int32_t *)(ret)-2) = size; *((int32_t*)(ret)-2) = size;
} else { }
ret = (void *)(real); //?? else {
} ret = (void*)(real); //??
}
printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size); printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size);
int32_t *ptr = (int32_t *)ret; int32_t* ptr = (int32_t*)ret;
*ptr = 12; *ptr = 12;
return (ret); return (ret);
} }
void btAlignedFreeInternal(void *ptr, int32_t line, char *filename) { void btAlignedFreeInternal(void* ptr, int32_t line, char* filename)
{
void *real; void* real;
gNumAlignedFree++; gNumAlignedFree++;
if (ptr) { if (ptr) {
real = *((void **)(ptr)-1); real = *((void**)(ptr)-1);
int32_t size = *((int32_t *)(ptr)-2); int32_t size = *((int32_t*)(ptr)-2);
gTotalBytesAlignedAllocs -= size; gTotalBytesAlignedAllocs -= size;
printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size); printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size);
sFreeFunc(real); sFreeFunc(real);
} else { }
printf("NULL ptr\n"); else {
} printf("NULL ptr\n");
}
} }
#else //BT_DEBUG_MEMORY_ALLOCATIONS #else //BT_DEBUG_MEMORY_ALLOCATIONS
void *btAlignedAllocInternal(size_t size, int32_t alignment) { void* btAlignedAllocInternal(size_t size, int32_t alignment)
gNumAlignedAllocs++; {
void *ptr; gNumAlignedAllocs++;
ptr = sAlignedAllocFunc(size, alignment); void* ptr;
// printf("btAlignedAllocInternal %d, %x\n",size,ptr); ptr = sAlignedAllocFunc(size, alignment);
return ptr; // printf("btAlignedAllocInternal %d, %x\n",size,ptr);
return ptr;
} }
void btAlignedFreeInternal(void *ptr) { void btAlignedFreeInternal(void* ptr)
if (!ptr) { {
return; if (!ptr) {
} return;
}
gNumAlignedFree++; gNumAlignedFree++;
// printf("btAlignedFreeInternal %x\n",ptr); // printf("btAlignedFreeInternal %x\n",ptr);
sAlignedFreeFunc(ptr); sAlignedFreeFunc(ptr);
} }
//GODOT ADDITION
};
//
#endif //BT_DEBUG_MEMORY_ALLOCATIONS #endif //BT_DEBUG_MEMORY_ALLOCATIONS

File diff suppressed because it is too large Load Diff