Merge pull request #27929 from akien-mga/vhacd-cleanup
vhacd: Properly identify downstream changes, fix MinGW build issue
This commit is contained in:
commit
abb8e97122
|
@ -385,6 +385,12 @@ Copyright: 2014-2018, Syoyo Fujita
|
|||
2002, Industrial Light & Magic, a division of Lucas Digital Ltd. LLC
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: ./thirdparty/vhacd/
|
||||
Comment: V-HACD
|
||||
Copyright: 2011, Khaled Mamou
|
||||
2003-2009, Erwin Coumans
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: ./thirdparty/zlib/
|
||||
Comment: zlib
|
||||
Copyright: 1995-2017, Jean-loup Gailly and Mark Adler
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Third party libraries
|
||||
|
||||
|
||||
## assimp
|
||||
|
||||
- Upstream: http://github.com/assimp/assimp
|
||||
|
@ -294,8 +295,12 @@ Godot build configurations, check them out when updating.
|
|||
File extracted from upstream release tarball `mbedtls-2.16.0-apache.tgz`:
|
||||
- All `*.h` from `include/mbedtls/` to `thirdparty/mbedtls/include/mbedtls/`
|
||||
- All `*.c` from `library/` to `thirdparty/mbedtls/library/`
|
||||
- Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453). Soon to be merged upstream. Check it out at next update.
|
||||
- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA padlock support which defines a symbol `unsupported` which clashses with a symbol in libwebsockets.
|
||||
- Applied the patch in `thirdparty/mbedtls/1453.diff` (PR 1453).
|
||||
Soon to be merged upstream. Check it out at next update.
|
||||
- Applied the patch in `thirdparty/mbedtls/padlock.diff`. This disables VIA
|
||||
padlock support which defines a symbol `unsupported` which clashes with
|
||||
a symbol in libwebsockets.
|
||||
|
||||
|
||||
## miniupnpc
|
||||
|
||||
|
@ -523,6 +528,23 @@ Files extracted from upstream source:
|
|||
- `tinyexr.{cc,h}`
|
||||
|
||||
|
||||
## vhacd
|
||||
|
||||
- Upstream: https://github.com/kmammou/v-hacd
|
||||
- Version: git (2297aa1, 2018)
|
||||
- License: BSD-3-Clause
|
||||
|
||||
Files extracted from upstream source:
|
||||
|
||||
- From `src/VHACD_Lib/`: `inc`, `public` and `src`
|
||||
- `LICENSE`
|
||||
|
||||
Some downstream changes have been made and are identified by
|
||||
`// -- GODOT start --` and `// -- GODOT end --` comments.
|
||||
They can be reapplied using the patches included in the `vhacd`
|
||||
folder.
|
||||
|
||||
|
||||
## zlib
|
||||
|
||||
- Upstream: http://www.zlib.net
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
diff --git a/thirdparty/vhacd/inc/btAlignedAllocator.h b/thirdparty/vhacd/inc/btAlignedAllocator.h
|
||||
index 11f6e12dc..94e71d512 100644
|
||||
--- a/thirdparty/vhacd/inc/btAlignedAllocator.h
|
||||
+++ b/thirdparty/vhacd/inc/btAlignedAllocator.h
|
||||
@@ -21,6 +21,11 @@ subject to the following restrictions:
|
||||
///that is better portable and more predictable
|
||||
|
||||
#include "btScalar.h"
|
||||
+
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
//#define BT_DEBUG_MEMORY_ALLOCATIONS 1
|
||||
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
@@ -101,4 +106,8 @@ public:
|
||||
friend bool operator==(const self_type&, const self_type&) { return true; }
|
||||
};
|
||||
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#endif //BT_ALIGNED_ALLOCATOR
|
||||
diff --git a/thirdparty/vhacd/inc/btAlignedObjectArray.h b/thirdparty/vhacd/inc/btAlignedObjectArray.h
|
||||
index e6620adf6..1ce03d21b 100644
|
||||
--- a/thirdparty/vhacd/inc/btAlignedObjectArray.h
|
||||
+++ b/thirdparty/vhacd/inc/btAlignedObjectArray.h
|
||||
@@ -38,6 +38,10 @@ subject to the following restrictions:
|
||||
#include <new> //for placement new
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods
|
||||
///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data
|
||||
template <typename T>
|
||||
@@ -445,4 +449,8 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#endif //BT_OBJECT_ARRAY__
|
||||
diff --git a/thirdparty/vhacd/inc/btConvexHullComputer.h b/thirdparty/vhacd/inc/btConvexHullComputer.h
|
||||
index 3c5075c2c..04bb96f64 100644
|
||||
--- a/thirdparty/vhacd/inc/btConvexHullComputer.h
|
||||
+++ b/thirdparty/vhacd/inc/btConvexHullComputer.h
|
||||
@@ -18,6 +18,10 @@ subject to the following restrictions:
|
||||
#include "btAlignedObjectArray.h"
|
||||
#include "btVector3.h"
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
/// Convex hull implementation based on Preparata and Hong
|
||||
/// See http://code.google.com/p/bullet/issues/detail?id=275
|
||||
/// Ole Kniemeyer, MAXON Computer GmbH
|
||||
@@ -94,4 +98,8 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#endif //BT_CONVEX_HULL_COMPUTER_H
|
||||
diff --git a/thirdparty/vhacd/inc/btMinMax.h b/thirdparty/vhacd/inc/btMinMax.h
|
||||
index 40b0ceb6e..9bc1e1c77 100644
|
||||
--- a/thirdparty/vhacd/inc/btMinMax.h
|
||||
+++ b/thirdparty/vhacd/inc/btMinMax.h
|
||||
@@ -17,6 +17,10 @@ subject to the following restrictions:
|
||||
|
||||
#include "btScalar.h"
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b)
|
||||
{
|
||||
@@ -62,4 +66,8 @@ SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub)
|
||||
}
|
||||
}
|
||||
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#endif //BT_GEN_MINMAX_H
|
||||
diff --git a/thirdparty/vhacd/inc/btScalar.h b/thirdparty/vhacd/inc/btScalar.h
|
||||
index b814474bd..617fd7c44 100644
|
||||
--- a/thirdparty/vhacd/inc/btScalar.h
|
||||
+++ b/thirdparty/vhacd/inc/btScalar.h
|
||||
@@ -28,11 +28,19 @@ subject to the following restrictions:
|
||||
/* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/
|
||||
#define BT_BULLET_VERSION 279
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
inline int32_t btGetVersion()
|
||||
{
|
||||
return BT_BULLET_VERSION;
|
||||
}
|
||||
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#if defined(DEBUG) || defined(_DEBUG)
|
||||
#define BT_DEBUG
|
||||
#endif
|
||||
@@ -199,6 +207,10 @@ inline int32_t btGetVersion()
|
||||
#endif //__CELLOS_LV2__
|
||||
#endif
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision.
|
||||
#if defined(BT_USE_DOUBLE_PRECISION)
|
||||
typedef double btScalar;
|
||||
@@ -530,4 +542,9 @@ struct btTypedObject {
|
||||
return m_objectType;
|
||||
}
|
||||
};
|
||||
+
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#endif //BT_SCALAR_H
|
||||
diff --git a/thirdparty/vhacd/inc/btVector3.h b/thirdparty/vhacd/inc/btVector3.h
|
||||
index 0f2fefbbd..4ed971673 100644
|
||||
--- a/thirdparty/vhacd/inc/btVector3.h
|
||||
+++ b/thirdparty/vhacd/inc/btVector3.h
|
||||
@@ -26,6 +26,10 @@ subject to the following restrictions:
|
||||
#define btVector3DataName "btVector3FloatData"
|
||||
#endif //BT_USE_DOUBLE_PRECISION
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
/**@brief btVector3 can be used to represent 3D points and vectors.
|
||||
* It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user
|
||||
* Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers
|
||||
@@ -712,4 +716,8 @@ SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn
|
||||
m_floats[i] = dataIn.m_floats[i];
|
||||
}
|
||||
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#endif //BT_VECTOR3_H
|
||||
diff --git a/thirdparty/vhacd/src/btAlignedAllocator.cpp b/thirdparty/vhacd/src/btAlignedAllocator.cpp
|
||||
index 11d594f6c..ce0e7f26f 100644
|
||||
--- a/thirdparty/vhacd/src/btAlignedAllocator.cpp
|
||||
+++ b/thirdparty/vhacd/src/btAlignedAllocator.cpp
|
||||
@@ -15,6 +15,10 @@ subject to the following restrictions:
|
||||
|
||||
#include "btAlignedAllocator.h"
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable:4311 4302)
|
||||
#endif
|
||||
@@ -177,4 +181,8 @@ void btAlignedFreeInternal(void* ptr)
|
||||
sAlignedFreeFunc(ptr);
|
||||
}
|
||||
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
||||
+
|
||||
#endif //BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
diff --git a/thirdparty/vhacd/src/btConvexHullComputer.cpp b/thirdparty/vhacd/src/btConvexHullComputer.cpp
|
||||
index d3d749adb..8ab34af2a 100644
|
||||
--- a/thirdparty/vhacd/src/btConvexHullComputer.cpp
|
||||
+++ b/thirdparty/vhacd/src/btConvexHullComputer.cpp
|
||||
@@ -49,6 +49,10 @@ typedef unsigned long long int32_t uint64_t;
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
+// -- GODOT start --
|
||||
+namespace VHACD {
|
||||
+// -- GODOT end --
|
||||
+
|
||||
// Convex hull implementation based on Preparata and Hong
|
||||
// Ole Kniemeyer, MAXON Computer GmbH
|
||||
class btConvexHullInternal {
|
||||
@@ -2477,3 +2481,7 @@ btScalar btConvexHullComputer::compute(const void* coords, bool doubleCoords, in
|
||||
|
||||
return shift;
|
||||
}
|
||||
+
|
||||
+// -- GODOT start --
|
||||
+}; // namespace VHACD
|
||||
+// -- GODOT end --
|
|
@ -0,0 +1,53 @@
|
|||
diff --git a/thirdparty/vhacd/inc/btScalar.h b/thirdparty/vhacd/inc/btScalar.h
|
||||
index 487205062..52297cd78 100644
|
||||
--- a/thirdparty/vhacd/inc/btScalar.h
|
||||
+++ b/thirdparty/vhacd/inc/btScalar.h
|
||||
@@ -535,6 +535,29 @@ struct btTypedObject {
|
||||
}
|
||||
};
|
||||
|
||||
+// -- GODOT start --
|
||||
+// Cherry-picked from Bullet 2.88 to fix GH-27926
|
||||
+///align a pointer to the provided alignment, upwards
|
||||
+template <typename T>
|
||||
+T *btAlignPointer(T *unalignedPtr, size_t alignment)
|
||||
+{
|
||||
+ struct btConvertPointerSizeT
|
||||
+ {
|
||||
+ union {
|
||||
+ T *ptr;
|
||||
+ size_t integer;
|
||||
+ };
|
||||
+ };
|
||||
+ btConvertPointerSizeT converter;
|
||||
+
|
||||
+ const size_t bit_mask = ~(alignment - 1);
|
||||
+ converter.ptr = unalignedPtr;
|
||||
+ converter.integer += alignment - 1;
|
||||
+ converter.integer &= bit_mask;
|
||||
+ return converter.ptr;
|
||||
+}
|
||||
+// -- GODOT end --
|
||||
+
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
// -- GODOT end --
|
||||
diff --git a/thirdparty/vhacd/src/btAlignedAllocator.cpp b/thirdparty/vhacd/src/btAlignedAllocator.cpp
|
||||
index ce0e7f26f..8dee31e7e 100644
|
||||
--- a/thirdparty/vhacd/src/btAlignedAllocator.cpp
|
||||
+++ b/thirdparty/vhacd/src/btAlignedAllocator.cpp
|
||||
@@ -72,8 +72,12 @@ static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
|
||||
|
||||
real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1));
|
||||
if (real) {
|
||||
- offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1);
|
||||
- ret = (void*)((real + sizeof(void*)) + offset);
|
||||
+ // -- GODOT start --
|
||||
+ // Synced with Bullet 2.88 to fix GH-27926
|
||||
+ //offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1);
|
||||
+ //ret = (void*)((real + sizeof(void*)) + offset);
|
||||
+ ret = btAlignPointer(real + sizeof(void *), alignment);
|
||||
+ // -- GODOT end --
|
||||
*((void**)(ret)-1) = (void*)(real);
|
||||
}
|
||||
else {
|
|
@ -0,0 +1,29 @@
|
|||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2011, Khaled Mamou (kmamou at gmail dot com)
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -22,26 +22,26 @@ subject to the following restrictions:
|
|||
|
||||
#include "btScalar.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
namespace VHACD {
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
//#define BT_DEBUG_MEMORY_ALLOCATIONS 1
|
||||
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
#define btAlignedAlloc(a, b) \
|
||||
btAlignedAllocInternal(a, b, __LINE__, __FILE__)
|
||||
btAlignedAllocInternal(a, b, __LINE__, __FILE__)
|
||||
|
||||
#define btAlignedFree(ptr) \
|
||||
btAlignedFreeInternal(ptr, __LINE__, __FILE__)
|
||||
btAlignedFreeInternal(ptr, __LINE__, __FILE__)
|
||||
|
||||
void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char *filename);
|
||||
void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename);
|
||||
|
||||
void btAlignedFreeInternal(void *ptr, int32_t line, char *filename);
|
||||
void btAlignedFreeInternal(void* ptr, int32_t line, char* filename);
|
||||
|
||||
#else
|
||||
void *btAlignedAllocInternal(size_t size, int32_t alignment);
|
||||
void btAlignedFreeInternal(void *ptr);
|
||||
void* btAlignedAllocInternal(size_t size, int32_t alignment);
|
||||
void btAlignedFreeInternal(void* ptr);
|
||||
|
||||
#define btAlignedAlloc(size, alignment) btAlignedAllocInternal(size, alignment)
|
||||
#define btAlignedFree(ptr) btAlignedFreeInternal(ptr)
|
||||
|
@ -49,63 +49,65 @@ void btAlignedFreeInternal(void *ptr);
|
|||
#endif
|
||||
typedef int32_t size_type;
|
||||
|
||||
typedef void *(btAlignedAllocFunc)(size_t size, int32_t alignment);
|
||||
typedef void(btAlignedFreeFunc)(void *memblock);
|
||||
typedef void *(btAllocFunc)(size_t size);
|
||||
typedef void(btFreeFunc)(void *memblock);
|
||||
typedef void*(btAlignedAllocFunc)(size_t size, int32_t alignment);
|
||||
typedef void(btAlignedFreeFunc)(void* memblock);
|
||||
typedef void*(btAllocFunc)(size_t size);
|
||||
typedef void(btFreeFunc)(void* memblock);
|
||||
|
||||
///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom
|
||||
void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc);
|
||||
void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc);
|
||||
///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it.
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc);
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc);
|
||||
|
||||
///The btAlignedAllocator is a portable class for aligned memory allocations.
|
||||
///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned.
|
||||
template <typename T, unsigned Alignment>
|
||||
class btAlignedAllocator {
|
||||
|
||||
typedef btAlignedAllocator<T, Alignment> self_type;
|
||||
typedef btAlignedAllocator<T, Alignment> self_type;
|
||||
|
||||
public:
|
||||
//just going down a list:
|
||||
btAlignedAllocator() {}
|
||||
/*
|
||||
//just going down a list:
|
||||
btAlignedAllocator() {}
|
||||
/*
|
||||
btAlignedAllocator( const self_type & ) {}
|
||||
*/
|
||||
|
||||
template <typename Other>
|
||||
btAlignedAllocator(const btAlignedAllocator<Other, Alignment> &) {}
|
||||
template <typename Other>
|
||||
btAlignedAllocator(const btAlignedAllocator<Other, Alignment>&) {}
|
||||
|
||||
typedef const T *const_pointer;
|
||||
typedef const T &const_reference;
|
||||
typedef T *pointer;
|
||||
typedef T &reference;
|
||||
typedef T value_type;
|
||||
typedef const T* const_pointer;
|
||||
typedef const T& const_reference;
|
||||
typedef T* pointer;
|
||||
typedef T& reference;
|
||||
typedef T value_type;
|
||||
|
||||
pointer address(reference ref) const { return &ref; }
|
||||
const_pointer address(const_reference ref) const { return &ref; }
|
||||
pointer allocate(size_type n, const_pointer *hint = 0) {
|
||||
(void)hint;
|
||||
return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment));
|
||||
}
|
||||
void construct(pointer ptr, const value_type &value) { new (ptr) value_type(value); }
|
||||
void deallocate(pointer ptr) {
|
||||
btAlignedFree(reinterpret_cast<void *>(ptr));
|
||||
}
|
||||
void destroy(pointer ptr) { ptr->~value_type(); }
|
||||
pointer address(reference ref) const { return &ref; }
|
||||
const_pointer address(const_reference ref) const { return &ref; }
|
||||
pointer allocate(size_type n, const_pointer* hint = 0)
|
||||
{
|
||||
(void)hint;
|
||||
return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment));
|
||||
}
|
||||
void construct(pointer ptr, const value_type& value) { new (ptr) value_type(value); }
|
||||
void deallocate(pointer ptr)
|
||||
{
|
||||
btAlignedFree(reinterpret_cast<void*>(ptr));
|
||||
}
|
||||
void destroy(pointer ptr) { ptr->~value_type(); }
|
||||
|
||||
template <typename O>
|
||||
struct rebind {
|
||||
typedef btAlignedAllocator<O, Alignment> other;
|
||||
};
|
||||
template <typename O>
|
||||
self_type &operator=(const btAlignedAllocator<O, Alignment> &) { return *this; }
|
||||
template <typename O>
|
||||
struct rebind {
|
||||
typedef btAlignedAllocator<O, Alignment> other;
|
||||
};
|
||||
template <typename O>
|
||||
self_type& operator=(const btAlignedAllocator<O, Alignment>&) { return *this; }
|
||||
|
||||
friend bool operator==(const self_type &, const self_type &) { return true; }
|
||||
friend bool operator==(const self_type&, const self_type&) { return true; }
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
#endif //BT_ALIGNED_ALLOCATOR
|
||||
|
|
|
@ -38,383 +38,419 @@ subject to the following restrictions:
|
|||
#include <new> //for placement new
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
namespace VHACD {
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods
|
||||
///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data
|
||||
template <typename T>
|
||||
//template <class T>
|
||||
class btAlignedObjectArray {
|
||||
btAlignedAllocator<T, 16> m_allocator;
|
||||
btAlignedAllocator<T, 16> m_allocator;
|
||||
|
||||
int32_t m_size;
|
||||
int32_t m_capacity;
|
||||
T *m_data;
|
||||
//PCK: added this line
|
||||
bool m_ownsMemory;
|
||||
int32_t m_size;
|
||||
int32_t m_capacity;
|
||||
T* m_data;
|
||||
//PCK: added this line
|
||||
bool m_ownsMemory;
|
||||
|
||||
#ifdef BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
public:
|
||||
SIMD_FORCE_INLINE btAlignedObjectArray<T> &operator=(const btAlignedObjectArray<T> &other) {
|
||||
copyFromArray(other);
|
||||
return *this;
|
||||
}
|
||||
SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other)
|
||||
{
|
||||
copyFromArray(other);
|
||||
return *this;
|
||||
}
|
||||
#else //BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
private:
|
||||
SIMD_FORCE_INLINE btAlignedObjectArray<T> &operator=(const btAlignedObjectArray<T> &other);
|
||||
SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other);
|
||||
#endif //BT_ALLOW_ARRAY_COPY_OPERATOR
|
||||
|
||||
protected:
|
||||
SIMD_FORCE_INLINE int32_t allocSize(int32_t size) {
|
||||
return (size ? size * 2 : 1);
|
||||
}
|
||||
SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T *dest) const {
|
||||
int32_t i;
|
||||
for (i = start; i < end; ++i)
|
||||
SIMD_FORCE_INLINE int32_t allocSize(int32_t size)
|
||||
{
|
||||
return (size ? size * 2 : 1);
|
||||
}
|
||||
SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const
|
||||
{
|
||||
int32_t i;
|
||||
for (i = start; i < end; ++i)
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
new (&dest[i]) T(m_data[i]);
|
||||
new (&dest[i]) T(m_data[i]);
|
||||
#else
|
||||
dest[i] = m_data[i];
|
||||
dest[i] = m_data[i];
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
}
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void init() {
|
||||
//PCK: added this line
|
||||
m_ownsMemory = true;
|
||||
m_data = 0;
|
||||
m_size = 0;
|
||||
m_capacity = 0;
|
||||
}
|
||||
SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) {
|
||||
int32_t i;
|
||||
for (i = first; i < last; i++) {
|
||||
m_data[i].~T();
|
||||
}
|
||||
}
|
||||
SIMD_FORCE_INLINE void init()
|
||||
{
|
||||
//PCK: added this line
|
||||
m_ownsMemory = true;
|
||||
m_data = 0;
|
||||
m_size = 0;
|
||||
m_capacity = 0;
|
||||
}
|
||||
SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last)
|
||||
{
|
||||
int32_t i;
|
||||
for (i = first; i < last; i++) {
|
||||
m_data[i].~T();
|
||||
}
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void *allocate(int32_t size) {
|
||||
if (size)
|
||||
return m_allocator.allocate(size);
|
||||
return 0;
|
||||
}
|
||||
SIMD_FORCE_INLINE void* allocate(int32_t size)
|
||||
{
|
||||
if (size)
|
||||
return m_allocator.allocate(size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void deallocate() {
|
||||
if (m_data) {
|
||||
//PCK: enclosed the deallocation in this block
|
||||
if (m_ownsMemory) {
|
||||
m_allocator.deallocate(m_data);
|
||||
}
|
||||
m_data = 0;
|
||||
}
|
||||
}
|
||||
SIMD_FORCE_INLINE void deallocate()
|
||||
{
|
||||
if (m_data) {
|
||||
//PCK: enclosed the deallocation in this block
|
||||
if (m_ownsMemory) {
|
||||
m_allocator.deallocate(m_data);
|
||||
}
|
||||
m_data = 0;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
btAlignedObjectArray() {
|
||||
init();
|
||||
}
|
||||
btAlignedObjectArray()
|
||||
{
|
||||
init();
|
||||
}
|
||||
|
||||
~btAlignedObjectArray() {
|
||||
clear();
|
||||
}
|
||||
~btAlignedObjectArray()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead.
|
||||
btAlignedObjectArray(const btAlignedObjectArray &otherArray) {
|
||||
init();
|
||||
///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead.
|
||||
btAlignedObjectArray(const btAlignedObjectArray& otherArray)
|
||||
{
|
||||
init();
|
||||
|
||||
int32_t otherSize = otherArray.size();
|
||||
resize(otherSize);
|
||||
otherArray.copy(0, otherSize, m_data);
|
||||
}
|
||||
int32_t otherSize = otherArray.size();
|
||||
resize(otherSize);
|
||||
otherArray.copy(0, otherSize, m_data);
|
||||
}
|
||||
|
||||
/// return the number of elements in the array
|
||||
SIMD_FORCE_INLINE int32_t size() const {
|
||||
return m_size;
|
||||
}
|
||||
/// return the number of elements in the array
|
||||
SIMD_FORCE_INLINE int32_t size() const
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE const T &at(int32_t n) const {
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
SIMD_FORCE_INLINE const T& at(int32_t n) const
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &at(int32_t n) {
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
SIMD_FORCE_INLINE T& at(int32_t n)
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE const T &operator[](int32_t n) const {
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
SIMD_FORCE_INLINE const T& operator[](int32_t n) const
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &operator[](int32_t n) {
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
SIMD_FORCE_INLINE T& operator[](int32_t n)
|
||||
{
|
||||
btAssert(n >= 0);
|
||||
btAssert(n < size());
|
||||
return m_data[n];
|
||||
}
|
||||
|
||||
///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations.
|
||||
SIMD_FORCE_INLINE void clear() {
|
||||
destroy(0, size());
|
||||
///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations.
|
||||
SIMD_FORCE_INLINE void clear()
|
||||
{
|
||||
destroy(0, size());
|
||||
|
||||
deallocate();
|
||||
deallocate();
|
||||
|
||||
init();
|
||||
}
|
||||
init();
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void pop_back() {
|
||||
btAssert(m_size > 0);
|
||||
m_size--;
|
||||
m_data[m_size].~T();
|
||||
}
|
||||
SIMD_FORCE_INLINE void pop_back()
|
||||
{
|
||||
btAssert(m_size > 0);
|
||||
m_size--;
|
||||
m_data[m_size].~T();
|
||||
}
|
||||
|
||||
///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument.
|
||||
///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations.
|
||||
SIMD_FORCE_INLINE void resize(int32_t newsize, const T &fillData = T()) {
|
||||
int32_t curSize = size();
|
||||
///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument.
|
||||
///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations.
|
||||
SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T())
|
||||
{
|
||||
int32_t curSize = size();
|
||||
|
||||
if (newsize < curSize) {
|
||||
for (int32_t i = newsize; i < curSize; i++) {
|
||||
m_data[i].~T();
|
||||
}
|
||||
} else {
|
||||
if (newsize > size()) {
|
||||
reserve(newsize);
|
||||
}
|
||||
if (newsize < curSize) {
|
||||
for (int32_t i = newsize; i < curSize; i++) {
|
||||
m_data[i].~T();
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (newsize > size()) {
|
||||
reserve(newsize);
|
||||
}
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
for (int32_t i = curSize; i < newsize; i++) {
|
||||
new (&m_data[i]) T(fillData);
|
||||
}
|
||||
for (int32_t i = curSize; i < newsize; i++) {
|
||||
new (&m_data[i]) T(fillData);
|
||||
}
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
}
|
||||
}
|
||||
|
||||
m_size = newsize;
|
||||
}
|
||||
m_size = newsize;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &expandNonInitializing() {
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
}
|
||||
m_size++;
|
||||
SIMD_FORCE_INLINE T& expandNonInitializing()
|
||||
{
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
}
|
||||
m_size++;
|
||||
|
||||
return m_data[sz];
|
||||
}
|
||||
return m_data[sz];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE T &expand(const T &fillValue = T()) {
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
}
|
||||
m_size++;
|
||||
SIMD_FORCE_INLINE T& expand(const T& fillValue = T())
|
||||
{
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
}
|
||||
m_size++;
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory)
|
||||
new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory)
|
||||
#endif
|
||||
|
||||
return m_data[sz];
|
||||
}
|
||||
return m_data[sz];
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void push_back(const T &_Val) {
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
}
|
||||
SIMD_FORCE_INLINE void push_back(const T& _Val)
|
||||
{
|
||||
int32_t sz = size();
|
||||
if (sz == capacity()) {
|
||||
reserve(allocSize(size()));
|
||||
}
|
||||
|
||||
#ifdef BT_USE_PLACEMENT_NEW
|
||||
new (&m_data[m_size]) T(_Val);
|
||||
new (&m_data[m_size]) T(_Val);
|
||||
#else
|
||||
m_data[size()] = _Val;
|
||||
m_data[size()] = _Val;
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
|
||||
m_size++;
|
||||
}
|
||||
m_size++;
|
||||
}
|
||||
|
||||
/// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve()
|
||||
SIMD_FORCE_INLINE int32_t capacity() const {
|
||||
return m_capacity;
|
||||
}
|
||||
/// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve()
|
||||
SIMD_FORCE_INLINE int32_t capacity() const
|
||||
{
|
||||
return m_capacity;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage
|
||||
if (capacity() < _Count) { // not enough room, reallocate
|
||||
T *s = (T *)allocate(_Count);
|
||||
SIMD_FORCE_INLINE void reserve(int32_t _Count)
|
||||
{ // determine new minimum length of allocated storage
|
||||
if (capacity() < _Count) { // not enough room, reallocate
|
||||
T* s = (T*)allocate(_Count);
|
||||
|
||||
copy(0, size(), s);
|
||||
copy(0, size(), s);
|
||||
|
||||
destroy(0, size());
|
||||
destroy(0, size());
|
||||
|
||||
deallocate();
|
||||
deallocate();
|
||||
|
||||
//PCK: added this line
|
||||
m_ownsMemory = true;
|
||||
//PCK: added this line
|
||||
m_ownsMemory = true;
|
||||
|
||||
m_data = s;
|
||||
m_data = s;
|
||||
|
||||
m_capacity = _Count;
|
||||
}
|
||||
}
|
||||
m_capacity = _Count;
|
||||
}
|
||||
}
|
||||
|
||||
class less {
|
||||
public:
|
||||
bool operator()(const T &a, const T &b) {
|
||||
return (a < b);
|
||||
}
|
||||
};
|
||||
class less {
|
||||
public:
|
||||
bool operator()(const T& a, const T& b)
|
||||
{
|
||||
return (a < b);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L>
|
||||
void quickSortInternal(const L &CompareFunc, int32_t lo, int32_t hi) {
|
||||
// lo is the lower index, hi is the upper index
|
||||
// of the region of array a that is to be sorted
|
||||
int32_t i = lo, j = hi;
|
||||
T x = m_data[(lo + hi) / 2];
|
||||
template <typename L>
|
||||
void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi)
|
||||
{
|
||||
// lo is the lower index, hi is the upper index
|
||||
// of the region of array a that is to be sorted
|
||||
int32_t i = lo, j = hi;
|
||||
T x = m_data[(lo + hi) / 2];
|
||||
|
||||
// partition
|
||||
do {
|
||||
while (CompareFunc(m_data[i], x))
|
||||
i++;
|
||||
while (CompareFunc(x, m_data[j]))
|
||||
j--;
|
||||
if (i <= j) {
|
||||
swap(i, j);
|
||||
i++;
|
||||
j--;
|
||||
}
|
||||
} while (i <= j);
|
||||
// partition
|
||||
do {
|
||||
while (CompareFunc(m_data[i], x))
|
||||
i++;
|
||||
while (CompareFunc(x, m_data[j]))
|
||||
j--;
|
||||
if (i <= j) {
|
||||
swap(i, j);
|
||||
i++;
|
||||
j--;
|
||||
}
|
||||
} while (i <= j);
|
||||
|
||||
// recursion
|
||||
if (lo < j)
|
||||
quickSortInternal(CompareFunc, lo, j);
|
||||
if (i < hi)
|
||||
quickSortInternal(CompareFunc, i, hi);
|
||||
}
|
||||
// recursion
|
||||
if (lo < j)
|
||||
quickSortInternal(CompareFunc, lo, j);
|
||||
if (i < hi)
|
||||
quickSortInternal(CompareFunc, i, hi);
|
||||
}
|
||||
|
||||
template <typename L>
|
||||
void quickSort(const L &CompareFunc) {
|
||||
//don't sort 0 or 1 elements
|
||||
if (size() > 1) {
|
||||
quickSortInternal(CompareFunc, 0, size() - 1);
|
||||
}
|
||||
}
|
||||
template <typename L>
|
||||
void quickSort(const L& CompareFunc)
|
||||
{
|
||||
//don't sort 0 or 1 elements
|
||||
if (size() > 1) {
|
||||
quickSortInternal(CompareFunc, 0, size() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/
|
||||
template <typename L>
|
||||
void downHeap(T *pArr, int32_t k, int32_t n, const L &CompareFunc) {
|
||||
/* PRE: a[k+1..N] is a heap */
|
||||
/* POST: a[k..N] is a heap */
|
||||
///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/
|
||||
template <typename L>
|
||||
void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc)
|
||||
{
|
||||
/* PRE: a[k+1..N] is a heap */
|
||||
/* POST: a[k..N] is a heap */
|
||||
|
||||
T temp = pArr[k - 1];
|
||||
/* k has child(s) */
|
||||
while (k <= n / 2) {
|
||||
int32_t child = 2 * k;
|
||||
T temp = pArr[k - 1];
|
||||
/* k has child(s) */
|
||||
while (k <= n / 2) {
|
||||
int32_t child = 2 * k;
|
||||
|
||||
if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) {
|
||||
child++;
|
||||
}
|
||||
/* pick larger child */
|
||||
if (CompareFunc(temp, pArr[child - 1])) {
|
||||
/* move child up */
|
||||
pArr[k - 1] = pArr[child - 1];
|
||||
k = child;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
pArr[k - 1] = temp;
|
||||
} /*downHeap*/
|
||||
if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) {
|
||||
child++;
|
||||
}
|
||||
/* pick larger child */
|
||||
if (CompareFunc(temp, pArr[child - 1])) {
|
||||
/* move child up */
|
||||
pArr[k - 1] = pArr[child - 1];
|
||||
k = child;
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
pArr[k - 1] = temp;
|
||||
} /*downHeap*/
|
||||
|
||||
void swap(int32_t index0, int32_t index1) {
|
||||
void swap(int32_t index0, int32_t index1)
|
||||
{
|
||||
#ifdef BT_USE_MEMCPY
|
||||
char temp[sizeof(T)];
|
||||
memcpy(temp, &m_data[index0], sizeof(T));
|
||||
memcpy(&m_data[index0], &m_data[index1], sizeof(T));
|
||||
memcpy(&m_data[index1], temp, sizeof(T));
|
||||
char temp[sizeof(T)];
|
||||
memcpy(temp, &m_data[index0], sizeof(T));
|
||||
memcpy(&m_data[index0], &m_data[index1], sizeof(T));
|
||||
memcpy(&m_data[index1], temp, sizeof(T));
|
||||
#else
|
||||
T temp = m_data[index0];
|
||||
m_data[index0] = m_data[index1];
|
||||
m_data[index1] = temp;
|
||||
T temp = m_data[index0];
|
||||
m_data[index0] = m_data[index1];
|
||||
m_data[index1] = temp;
|
||||
#endif //BT_USE_PLACEMENT_NEW
|
||||
}
|
||||
}
|
||||
|
||||
template <typename L>
|
||||
void heapSort(const L &CompareFunc) {
|
||||
/* sort a[0..N-1], N.B. 0 to N-1 */
|
||||
int32_t k;
|
||||
int32_t n = m_size;
|
||||
for (k = n / 2; k > 0; k--) {
|
||||
downHeap(m_data, k, n, CompareFunc);
|
||||
}
|
||||
template <typename L>
|
||||
void heapSort(const L& CompareFunc)
|
||||
{
|
||||
/* sort a[0..N-1], N.B. 0 to N-1 */
|
||||
int32_t k;
|
||||
int32_t n = m_size;
|
||||
for (k = n / 2; k > 0; k--) {
|
||||
downHeap(m_data, k, n, CompareFunc);
|
||||
}
|
||||
|
||||
/* a[1..N] is now a heap */
|
||||
while (n >= 1) {
|
||||
swap(0, n - 1); /* largest of a[0..n-1] */
|
||||
/* a[1..N] is now a heap */
|
||||
while (n >= 1) {
|
||||
swap(0, n - 1); /* largest of a[0..n-1] */
|
||||
|
||||
n = n - 1;
|
||||
/* restore a[1..i-1] heap */
|
||||
downHeap(m_data, 1, n, CompareFunc);
|
||||
}
|
||||
}
|
||||
n = n - 1;
|
||||
/* restore a[1..i-1] heap */
|
||||
downHeap(m_data, 1, n, CompareFunc);
|
||||
}
|
||||
}
|
||||
|
||||
///non-recursive binary search, assumes sorted array
|
||||
int32_t findBinarySearch(const T &key) const {
|
||||
int32_t first = 0;
|
||||
int32_t last = size() - 1;
|
||||
///non-recursive binary search, assumes sorted array
|
||||
int32_t findBinarySearch(const T& key) const
|
||||
{
|
||||
int32_t first = 0;
|
||||
int32_t last = size() - 1;
|
||||
|
||||
//assume sorted array
|
||||
while (first <= last) {
|
||||
int32_t mid = (first + last) / 2; // compute mid point.
|
||||
if (key > m_data[mid])
|
||||
first = mid + 1; // repeat search in top half.
|
||||
else if (key < m_data[mid])
|
||||
last = mid - 1; // repeat search in bottom half.
|
||||
else
|
||||
return mid; // found it. return position /////
|
||||
}
|
||||
return size(); // failed to find key
|
||||
}
|
||||
//assume sorted array
|
||||
while (first <= last) {
|
||||
int32_t mid = (first + last) / 2; // compute mid point.
|
||||
if (key > m_data[mid])
|
||||
first = mid + 1; // repeat search in top half.
|
||||
else if (key < m_data[mid])
|
||||
last = mid - 1; // repeat search in bottom half.
|
||||
else
|
||||
return mid; // found it. return position /////
|
||||
}
|
||||
return size(); // failed to find key
|
||||
}
|
||||
|
||||
int32_t findLinearSearch(const T &key) const {
|
||||
int32_t index = size();
|
||||
int32_t i;
|
||||
int32_t findLinearSearch(const T& key) const
|
||||
{
|
||||
int32_t index = size();
|
||||
int32_t i;
|
||||
|
||||
for (i = 0; i < size(); i++) {
|
||||
if (m_data[i] == key) {
|
||||
index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return index;
|
||||
}
|
||||
for (i = 0; i < size(); i++) {
|
||||
if (m_data[i] == key) {
|
||||
index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
void remove(const T &key) {
|
||||
void remove(const T& key)
|
||||
{
|
||||
|
||||
int32_t findIndex = findLinearSearch(key);
|
||||
if (findIndex < size()) {
|
||||
swap(findIndex, size() - 1);
|
||||
pop_back();
|
||||
}
|
||||
}
|
||||
int32_t findIndex = findLinearSearch(key);
|
||||
if (findIndex < size()) {
|
||||
swap(findIndex, size() - 1);
|
||||
pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
//PCK: whole function
|
||||
void initializeFromBuffer(void *buffer, int32_t size, int32_t capacity) {
|
||||
clear();
|
||||
m_ownsMemory = false;
|
||||
m_data = (T *)buffer;
|
||||
m_size = size;
|
||||
m_capacity = capacity;
|
||||
}
|
||||
//PCK: whole function
|
||||
void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity)
|
||||
{
|
||||
clear();
|
||||
m_ownsMemory = false;
|
||||
m_data = (T*)buffer;
|
||||
m_size = size;
|
||||
m_capacity = capacity;
|
||||
}
|
||||
|
||||
void copyFromArray(const btAlignedObjectArray &otherArray) {
|
||||
int32_t otherSize = otherArray.size();
|
||||
resize(otherSize);
|
||||
otherArray.copy(0, otherSize, m_data);
|
||||
}
|
||||
void copyFromArray(const btAlignedObjectArray& otherArray)
|
||||
{
|
||||
int32_t otherSize = otherArray.size();
|
||||
resize(otherSize);
|
||||
otherArray.copy(0, otherSize, m_data);
|
||||
}
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
#endif //BT_OBJECT_ARRAY__
|
||||
|
|
|
@ -18,60 +18,63 @@ subject to the following restrictions:
|
|||
#include "btAlignedObjectArray.h"
|
||||
#include "btVector3.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
namespace VHACD {
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
/// Convex hull implementation based on Preparata and Hong
|
||||
/// See http://code.google.com/p/bullet/issues/detail?id=275
|
||||
/// Ole Kniemeyer, MAXON Computer GmbH
|
||||
class btConvexHullComputer {
|
||||
private:
|
||||
btScalar compute(const void *coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp);
|
||||
btScalar compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp);
|
||||
|
||||
public:
|
||||
class Edge {
|
||||
private:
|
||||
int32_t next;
|
||||
int32_t reverse;
|
||||
int32_t targetVertex;
|
||||
class Edge {
|
||||
private:
|
||||
int32_t next;
|
||||
int32_t reverse;
|
||||
int32_t targetVertex;
|
||||
|
||||
friend class btConvexHullComputer;
|
||||
friend class btConvexHullComputer;
|
||||
|
||||
public:
|
||||
int32_t getSourceVertex() const {
|
||||
return (this + reverse)->targetVertex;
|
||||
}
|
||||
public:
|
||||
int32_t getSourceVertex() const
|
||||
{
|
||||
return (this + reverse)->targetVertex;
|
||||
}
|
||||
|
||||
int32_t getTargetVertex() const {
|
||||
return targetVertex;
|
||||
}
|
||||
int32_t getTargetVertex() const
|
||||
{
|
||||
return targetVertex;
|
||||
}
|
||||
|
||||
const Edge *getNextEdgeOfVertex() const // clockwise list of all edges of a vertex
|
||||
{
|
||||
return this + next;
|
||||
}
|
||||
const Edge* getNextEdgeOfVertex() const // clockwise list of all edges of a vertex
|
||||
{
|
||||
return this + next;
|
||||
}
|
||||
|
||||
const Edge *getNextEdgeOfFace() const // counter-clockwise list of all edges of a face
|
||||
{
|
||||
return (this + reverse)->getNextEdgeOfVertex();
|
||||
}
|
||||
const Edge* getNextEdgeOfFace() const // counter-clockwise list of all edges of a face
|
||||
{
|
||||
return (this + reverse)->getNextEdgeOfVertex();
|
||||
}
|
||||
|
||||
const Edge *getReverseEdge() const {
|
||||
return this + reverse;
|
||||
}
|
||||
};
|
||||
const Edge* getReverseEdge() const
|
||||
{
|
||||
return this + reverse;
|
||||
}
|
||||
};
|
||||
|
||||
// Vertices of the output hull
|
||||
btAlignedObjectArray<btVector3> vertices;
|
||||
// Vertices of the output hull
|
||||
btAlignedObjectArray<btVector3> vertices;
|
||||
|
||||
// Edges of the output hull
|
||||
btAlignedObjectArray<Edge> edges;
|
||||
// Edges of the output hull
|
||||
btAlignedObjectArray<Edge> edges;
|
||||
|
||||
// Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons
|
||||
btAlignedObjectArray<int32_t> faces;
|
||||
// Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons
|
||||
btAlignedObjectArray<int32_t> faces;
|
||||
|
||||
/*
|
||||
/*
|
||||
Compute convex hull of "count" vertices stored in "coords". "stride" is the difference in bytes
|
||||
between the addresses of consecutive vertices. If "shrink" is positive, the convex hull is shrunken
|
||||
by that amount (each face is moved by "shrink" length units towards the center along its normal).
|
||||
|
@ -83,18 +86,20 @@ public:
|
|||
|
||||
The output convex hull can be found in the member variables "vertices", "edges", "faces".
|
||||
*/
|
||||
btScalar compute(const float *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) {
|
||||
return compute(coords, false, stride, count, shrink, shrinkClamp);
|
||||
}
|
||||
btScalar compute(const float* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp)
|
||||
{
|
||||
return compute(coords, false, stride, count, shrink, shrinkClamp);
|
||||
}
|
||||
|
||||
// same as above, but double precision
|
||||
btScalar compute(const double *coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) {
|
||||
return compute(coords, true, stride, count, shrink, shrinkClamp);
|
||||
}
|
||||
// same as above, but double precision
|
||||
btScalar compute(const double* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp)
|
||||
{
|
||||
return compute(coords, true, stride, count, shrink, shrinkClamp);
|
||||
}
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
#endif //BT_CONVEX_HULL_COMPUTER_H
|
||||
|
|
|
@ -17,50 +17,57 @@ subject to the following restrictions:
|
|||
|
||||
#include "btScalar.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
namespace VHACD {
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T &btMin(const T &a, const T &b) {
|
||||
return a < b ? a : b;
|
||||
SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b)
|
||||
{
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T &btMax(const T &a, const T &b) {
|
||||
return a > b ? a : b;
|
||||
SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b)
|
||||
{
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE const T &btClamped(const T &a, const T &lb, const T &ub) {
|
||||
return a < lb ? lb : (ub < a ? ub : a);
|
||||
SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub)
|
||||
{
|
||||
return a < lb ? lb : (ub < a ? ub : a);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btSetMin(T &a, const T &b) {
|
||||
if (b < a) {
|
||||
a = b;
|
||||
}
|
||||
SIMD_FORCE_INLINE void btSetMin(T& a, const T& b)
|
||||
{
|
||||
if (b < a) {
|
||||
a = b;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btSetMax(T &a, const T &b) {
|
||||
if (a < b) {
|
||||
a = b;
|
||||
}
|
||||
SIMD_FORCE_INLINE void btSetMax(T& a, const T& b)
|
||||
{
|
||||
if (a < b) {
|
||||
a = b;
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
SIMD_FORCE_INLINE void btClamp(T &a, const T &lb, const T &ub) {
|
||||
if (a < lb) {
|
||||
a = lb;
|
||||
} else if (ub < a) {
|
||||
a = ub;
|
||||
}
|
||||
SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub)
|
||||
{
|
||||
if (a < lb) {
|
||||
a = lb;
|
||||
}
|
||||
else if (ub < a) {
|
||||
a = ub;
|
||||
}
|
||||
}
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
#endif //BT_GEN_MINMAX_H
|
||||
|
|
|
@ -22,23 +22,24 @@ subject to the following restrictions:
|
|||
|
||||
#include <float.h>
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h> //size_t for MSVC 6.0
|
||||
#include <stdint.h>
|
||||
|
||||
/* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/
|
||||
#define BT_BULLET_VERSION 279
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
namespace VHACD {
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
inline int32_t btGetVersion() {
|
||||
return BT_BULLET_VERSION;
|
||||
inline int32_t btGetVersion()
|
||||
{
|
||||
return BT_BULLET_VERSION;
|
||||
}
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
#if defined(DEBUG) || defined(_DEBUG)
|
||||
#define BT_DEBUG
|
||||
|
@ -107,12 +108,12 @@ inline int32_t btGetVersion() {
|
|||
#include <spu_printf.h>
|
||||
#define printf spu_printf
|
||||
#define btAssert(x) \
|
||||
{ \
|
||||
if (!(x)) { \
|
||||
printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \
|
||||
spu_hcmpeq(0, 0); \
|
||||
} \
|
||||
}
|
||||
{ \
|
||||
if (!(x)) { \
|
||||
printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \
|
||||
spu_hcmpeq(0, 0); \
|
||||
} \
|
||||
}
|
||||
#else
|
||||
#define btAssert assert
|
||||
#endif
|
||||
|
@ -206,9 +207,9 @@ inline int32_t btGetVersion() {
|
|||
#endif //__CELLOS_LV2__
|
||||
#endif
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
namespace VHACD {
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision.
|
||||
#if defined(BT_USE_DOUBLE_PRECISION)
|
||||
|
@ -222,130 +223,96 @@ typedef float btScalar;
|
|||
#endif
|
||||
|
||||
#define BT_DECLARE_ALIGNED_ALLOCATOR() \
|
||||
SIMD_FORCE_INLINE void *operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
|
||||
SIMD_FORCE_INLINE void operator delete(void *ptr) { btAlignedFree(ptr); } \
|
||||
SIMD_FORCE_INLINE void *operator new(size_t, void *ptr) { return ptr; } \
|
||||
SIMD_FORCE_INLINE void operator delete(void *, void *) {} \
|
||||
SIMD_FORCE_INLINE void *operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
|
||||
SIMD_FORCE_INLINE void operator delete[](void *ptr) { btAlignedFree(ptr); } \
|
||||
SIMD_FORCE_INLINE void *operator new[](size_t, void *ptr) { return ptr; } \
|
||||
SIMD_FORCE_INLINE void operator delete[](void *, void *) {}
|
||||
SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
|
||||
SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \
|
||||
SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \
|
||||
SIMD_FORCE_INLINE void operator delete(void*, void*) {} \
|
||||
SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
|
||||
SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \
|
||||
SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \
|
||||
SIMD_FORCE_INLINE void operator delete[](void*, void*) {}
|
||||
|
||||
#if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS)
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) {
|
||||
return sqrt(x);
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar x)
|
||||
{
|
||||
return sqrt(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) {
|
||||
return fabs(x);
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); }
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); }
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); }
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return acos(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) {
|
||||
return cos(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) {
|
||||
return sin(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) {
|
||||
return tan(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) {
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return acos(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) {
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return asin(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) {
|
||||
return atan(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) {
|
||||
return atan2(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) {
|
||||
return exp(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) {
|
||||
return log(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) {
|
||||
return pow(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
|
||||
return fmod(x, y);
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return asin(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); }
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); }
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); }
|
||||
|
||||
#else
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) {
|
||||
SIMD_FORCE_INLINE btScalar btSqrt(btScalar y)
|
||||
{
|
||||
#ifdef USE_APPROXIMATION
|
||||
double x, z, tempf;
|
||||
unsigned long *tfptr = ((unsigned long *)&tempf) + 1;
|
||||
double x, z, tempf;
|
||||
unsigned long* tfptr = ((unsigned long*)&tempf) + 1;
|
||||
|
||||
tempf = y;
|
||||
*tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */
|
||||
x = tempf;
|
||||
z = y * btScalar(0.5);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
return x * y;
|
||||
tempf = y;
|
||||
*tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */
|
||||
x = tempf;
|
||||
z = y * btScalar(0.5);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
x = (btScalar(1.5) * x) - (x * x) * (x * z);
|
||||
return x * y;
|
||||
#else
|
||||
return sqrtf(y);
|
||||
return sqrtf(y);
|
||||
#endif
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) {
|
||||
return fabsf(x);
|
||||
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return acosf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btCos(btScalar x) {
|
||||
return cosf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btSin(btScalar x) {
|
||||
return sinf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btTan(btScalar x) {
|
||||
return tanf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAcos(btScalar x) {
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return acosf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x) {
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return asinf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) {
|
||||
return atanf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) {
|
||||
return atan2f(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) {
|
||||
return expf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) {
|
||||
return logf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) {
|
||||
return powf(x, y);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
|
||||
return fmodf(x, y);
|
||||
SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
|
||||
{
|
||||
if (x < btScalar(-1))
|
||||
x = btScalar(-1);
|
||||
if (x > btScalar(1))
|
||||
x = btScalar(1);
|
||||
return asinf(x);
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); }
|
||||
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); }
|
||||
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); }
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -366,110 +333,119 @@ SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) {
|
|||
#define SIMD_INFINITY FLT_MAX
|
||||
#endif
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) {
|
||||
btScalar coeff_1 = SIMD_PI / 4.0f;
|
||||
btScalar coeff_2 = 3.0f * coeff_1;
|
||||
btScalar abs_y = btFabs(y);
|
||||
btScalar angle;
|
||||
if (x >= 0.0f) {
|
||||
btScalar r = (x - abs_y) / (x + abs_y);
|
||||
angle = coeff_1 - coeff_1 * r;
|
||||
} else {
|
||||
btScalar r = (x + abs_y) / (abs_y - x);
|
||||
angle = coeff_2 - coeff_1 * r;
|
||||
}
|
||||
return (y < 0.0f) ? -angle : angle;
|
||||
SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x)
|
||||
{
|
||||
btScalar coeff_1 = SIMD_PI / 4.0f;
|
||||
btScalar coeff_2 = 3.0f * coeff_1;
|
||||
btScalar abs_y = btFabs(y);
|
||||
btScalar angle;
|
||||
if (x >= 0.0f) {
|
||||
btScalar r = (x - abs_y) / (x + abs_y);
|
||||
angle = coeff_1 - coeff_1 * r;
|
||||
}
|
||||
else {
|
||||
btScalar r = (x + abs_y) / (abs_y - x);
|
||||
angle = coeff_2 - coeff_1 * r;
|
||||
}
|
||||
return (y < 0.0f) ? -angle : angle;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) {
|
||||
return btFabs(x) < SIMD_EPSILON;
|
||||
SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; }
|
||||
|
||||
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps)
|
||||
{
|
||||
return (((a) <= eps) && !((a) < -eps));
|
||||
}
|
||||
SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps)
|
||||
{
|
||||
return (!((a) <= eps));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) {
|
||||
return (((a) <= eps) && !((a) < -eps));
|
||||
}
|
||||
SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps) {
|
||||
return (!((a) <= eps));
|
||||
SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x)
|
||||
{
|
||||
return x < btScalar(0.0) ? 1 : 0;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x) {
|
||||
return x < btScalar(0.0) ? 1 : 0;
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE btScalar btRadians(btScalar x) {
|
||||
return x * SIMD_RADS_PER_DEG;
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) {
|
||||
return x * SIMD_DEGS_PER_RAD;
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; }
|
||||
SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; }
|
||||
|
||||
#define BT_DECLARE_HANDLE(name) \
|
||||
typedef struct name##__ { \
|
||||
int32_t unused; \
|
||||
} * name
|
||||
typedef struct name##__ { \
|
||||
int32_t unused; \
|
||||
} * name
|
||||
|
||||
#ifndef btFsel
|
||||
SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) {
|
||||
return a >= 0 ? b : c;
|
||||
SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c)
|
||||
{
|
||||
return a >= 0 ? b : c;
|
||||
}
|
||||
#endif
|
||||
#define btFsels(a, b, c) (btScalar) btFsel(a, b, c)
|
||||
|
||||
SIMD_FORCE_INLINE bool btMachineIsLittleEndian() {
|
||||
long int i = 1;
|
||||
const char *p = (const char *)&i;
|
||||
if (p[0] == 1) // Lowest address contains the least significant byte
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
SIMD_FORCE_INLINE bool btMachineIsLittleEndian()
|
||||
{
|
||||
long int i = 1;
|
||||
const char* p = (const char*)&i;
|
||||
if (p[0] == 1) // Lowest address contains the least significant byte
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360
|
||||
///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html
|
||||
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) {
|
||||
// Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero
|
||||
// Rely on positive value or'ed with its negative having sign bit on
|
||||
// and zero value or'ed with its negative (which is still zero) having sign bit off
|
||||
// Use arithmetic shift right, shifting the sign bit through all 32 bits
|
||||
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
|
||||
unsigned testEqz = ~testNz;
|
||||
return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero)
|
||||
{
|
||||
// Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero
|
||||
// Rely on positive value or'ed with its negative having sign bit on
|
||||
// and zero value or'ed with its negative (which is still zero) having sign bit off
|
||||
// Use arithmetic shift right, shifting the sign bit through all 32 bits
|
||||
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
|
||||
unsigned testEqz = ~testNz;
|
||||
return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
}
|
||||
SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero) {
|
||||
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
|
||||
unsigned testEqz = ~testNz;
|
||||
return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero)
|
||||
{
|
||||
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
|
||||
unsigned testEqz = ~testNz;
|
||||
return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
|
||||
}
|
||||
SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) {
|
||||
SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero)
|
||||
{
|
||||
#ifdef BT_HAVE_NATIVE_FSEL
|
||||
return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero);
|
||||
return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero);
|
||||
#else
|
||||
return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero;
|
||||
return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero;
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
SIMD_FORCE_INLINE void btSwap(T &a, T &b) {
|
||||
T tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
SIMD_FORCE_INLINE void btSwap(T& a, T& b)
|
||||
{
|
||||
T tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
|
||||
//PCK: endian swapping functions
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) {
|
||||
return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24));
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val)
|
||||
{
|
||||
return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val) {
|
||||
return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8));
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val)
|
||||
{
|
||||
return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8));
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val) {
|
||||
return btSwapEndian((unsigned)val);
|
||||
SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val)
|
||||
{
|
||||
return btSwapEndian((unsigned)val);
|
||||
}
|
||||
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) {
|
||||
return btSwapEndian((unsigned short)val);
|
||||
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val)
|
||||
{
|
||||
return btSwapEndian((unsigned short)val);
|
||||
}
|
||||
|
||||
///btSwapFloat uses using char pointers to swap the endianness
|
||||
|
@ -478,88 +454,120 @@ SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) {
|
|||
///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception.
|
||||
///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you.
|
||||
///so instead of returning a float/double, we return integer/long long integer
|
||||
SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) {
|
||||
uint32_t a = 0;
|
||||
unsigned char *dst = (unsigned char *)&a;
|
||||
unsigned char *src = (unsigned char *)&d;
|
||||
SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d)
|
||||
{
|
||||
uint32_t a = 0;
|
||||
unsigned char* dst = (unsigned char*)&a;
|
||||
unsigned char* src = (unsigned char*)&d;
|
||||
|
||||
dst[0] = src[3];
|
||||
dst[1] = src[2];
|
||||
dst[2] = src[1];
|
||||
dst[3] = src[0];
|
||||
return a;
|
||||
dst[0] = src[3];
|
||||
dst[1] = src[2];
|
||||
dst[2] = src[1];
|
||||
dst[3] = src[0];
|
||||
return a;
|
||||
}
|
||||
|
||||
// unswap using char pointers
|
||||
SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) {
|
||||
float d = 0.0f;
|
||||
unsigned char *src = (unsigned char *)&a;
|
||||
unsigned char *dst = (unsigned char *)&d;
|
||||
SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a)
|
||||
{
|
||||
float d = 0.0f;
|
||||
unsigned char* src = (unsigned char*)&a;
|
||||
unsigned char* dst = (unsigned char*)&d;
|
||||
|
||||
dst[0] = src[3];
|
||||
dst[1] = src[2];
|
||||
dst[2] = src[1];
|
||||
dst[3] = src[0];
|
||||
dst[0] = src[3];
|
||||
dst[1] = src[2];
|
||||
dst[2] = src[1];
|
||||
dst[3] = src[0];
|
||||
|
||||
return d;
|
||||
return d;
|
||||
}
|
||||
|
||||
// swap using char pointers
|
||||
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char *dst) {
|
||||
unsigned char *src = (unsigned char *)&d;
|
||||
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst)
|
||||
{
|
||||
unsigned char* src = (unsigned char*)&d;
|
||||
|
||||
dst[0] = src[7];
|
||||
dst[1] = src[6];
|
||||
dst[2] = src[5];
|
||||
dst[3] = src[4];
|
||||
dst[4] = src[3];
|
||||
dst[5] = src[2];
|
||||
dst[6] = src[1];
|
||||
dst[7] = src[0];
|
||||
dst[0] = src[7];
|
||||
dst[1] = src[6];
|
||||
dst[2] = src[5];
|
||||
dst[3] = src[4];
|
||||
dst[4] = src[3];
|
||||
dst[5] = src[2];
|
||||
dst[6] = src[1];
|
||||
dst[7] = src[0];
|
||||
}
|
||||
|
||||
// unswap using char pointers
|
||||
SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char *src) {
|
||||
double d = 0.0;
|
||||
unsigned char *dst = (unsigned char *)&d;
|
||||
SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char* src)
|
||||
{
|
||||
double d = 0.0;
|
||||
unsigned char* dst = (unsigned char*)&d;
|
||||
|
||||
dst[0] = src[7];
|
||||
dst[1] = src[6];
|
||||
dst[2] = src[5];
|
||||
dst[3] = src[4];
|
||||
dst[4] = src[3];
|
||||
dst[5] = src[2];
|
||||
dst[6] = src[1];
|
||||
dst[7] = src[0];
|
||||
dst[0] = src[7];
|
||||
dst[1] = src[6];
|
||||
dst[2] = src[5];
|
||||
dst[3] = src[4];
|
||||
dst[4] = src[3];
|
||||
dst[5] = src[2];
|
||||
dst[6] = src[1];
|
||||
dst[7] = src[0];
|
||||
|
||||
return d;
|
||||
return d;
|
||||
}
|
||||
|
||||
// returns normalized value in range [-SIMD_PI, SIMD_PI]
|
||||
SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) {
|
||||
angleInRadians = btFmod(angleInRadians, SIMD_2_PI);
|
||||
if (angleInRadians < -SIMD_PI) {
|
||||
return angleInRadians + SIMD_2_PI;
|
||||
} else if (angleInRadians > SIMD_PI) {
|
||||
return angleInRadians - SIMD_2_PI;
|
||||
} else {
|
||||
return angleInRadians;
|
||||
}
|
||||
SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians)
|
||||
{
|
||||
angleInRadians = btFmod(angleInRadians, SIMD_2_PI);
|
||||
if (angleInRadians < -SIMD_PI) {
|
||||
return angleInRadians + SIMD_2_PI;
|
||||
}
|
||||
else if (angleInRadians > SIMD_PI) {
|
||||
return angleInRadians - SIMD_2_PI;
|
||||
}
|
||||
else {
|
||||
return angleInRadians;
|
||||
}
|
||||
}
|
||||
|
||||
///rudimentary class to provide type info
|
||||
struct btTypedObject {
|
||||
btTypedObject(int32_t objectType) :
|
||||
m_objectType(objectType) {
|
||||
}
|
||||
int32_t m_objectType;
|
||||
inline int32_t getObjectType() const {
|
||||
return m_objectType;
|
||||
}
|
||||
btTypedObject(int32_t objectType)
|
||||
: m_objectType(objectType)
|
||||
{
|
||||
}
|
||||
int32_t m_objectType;
|
||||
inline int32_t getObjectType() const
|
||||
{
|
||||
return m_objectType;
|
||||
}
|
||||
};
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
// Cherry-picked from Bullet 2.88 to fix GH-27926
|
||||
///align a pointer to the provided alignment, upwards
|
||||
template <typename T>
|
||||
T *btAlignPointer(T *unalignedPtr, size_t alignment)
|
||||
{
|
||||
struct btConvertPointerSizeT
|
||||
{
|
||||
union {
|
||||
T *ptr;
|
||||
size_t integer;
|
||||
};
|
||||
};
|
||||
btConvertPointerSizeT converter;
|
||||
|
||||
const size_t bit_mask = ~(alignment - 1);
|
||||
converter.ptr = unalignedPtr;
|
||||
converter.integer += alignment - 1;
|
||||
converter.integer &= bit_mask;
|
||||
return converter.ptr;
|
||||
}
|
||||
// -- GODOT end --
|
||||
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
#endif //BT_SCALAR_H
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -15,157 +15,178 @@ subject to the following restrictions:
|
|||
|
||||
#include "btAlignedAllocator.h"
|
||||
|
||||
//GODOT ADDITION
|
||||
// -- GODOT start --
|
||||
namespace VHACD {
|
||||
//
|
||||
// -- GODOT end --
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable : 4311 4302)
|
||||
#pragma warning(disable:4311 4302)
|
||||
#endif
|
||||
|
||||
int32_t gNumAlignedAllocs = 0;
|
||||
int32_t gNumAlignedFree = 0;
|
||||
int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks
|
||||
|
||||
static void *btAllocDefault(size_t size) {
|
||||
return malloc(size);
|
||||
static void* btAllocDefault(size_t size)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
static void btFreeDefault(void *ptr) {
|
||||
free(ptr);
|
||||
static void btFreeDefault(void* ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
static btAllocFunc *sAllocFunc = btAllocDefault;
|
||||
static btFreeFunc *sFreeFunc = btFreeDefault;
|
||||
static btAllocFunc* sAllocFunc = btAllocDefault;
|
||||
static btFreeFunc* sFreeFunc = btFreeDefault;
|
||||
|
||||
#if defined(BT_HAS_ALIGNED_ALLOCATOR)
|
||||
#include <malloc.h>
|
||||
static void *btAlignedAllocDefault(size_t size, int32_t alignment) {
|
||||
return _aligned_malloc(size, (size_t)alignment);
|
||||
static void* btAlignedAllocDefault(size_t size, int32_t alignment)
|
||||
{
|
||||
return _aligned_malloc(size, (size_t)alignment);
|
||||
}
|
||||
|
||||
static void btAlignedFreeDefault(void *ptr) {
|
||||
_aligned_free(ptr);
|
||||
static void btAlignedFreeDefault(void* ptr)
|
||||
{
|
||||
_aligned_free(ptr);
|
||||
}
|
||||
#elif defined(__CELLOS_LV2__)
|
||||
#include <stdlib.h>
|
||||
|
||||
static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) {
|
||||
return memalign(alignment, size);
|
||||
static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
|
||||
{
|
||||
return memalign(alignment, size);
|
||||
}
|
||||
|
||||
static inline void btAlignedFreeDefault(void *ptr) {
|
||||
free(ptr);
|
||||
static inline void btAlignedFreeDefault(void* ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
#else
|
||||
static inline void *btAlignedAllocDefault(size_t size, int32_t alignment) {
|
||||
void *ret;
|
||||
char *real;
|
||||
unsigned long offset;
|
||||
static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
|
||||
{
|
||||
void* ret;
|
||||
char* real;
|
||||
unsigned long offset;
|
||||
|
||||
real = (char *)sAllocFunc(size + sizeof(void *) + (alignment - 1));
|
||||
if (real) {
|
||||
offset = (alignment - (unsigned long)(real + sizeof(void *))) & (alignment - 1);
|
||||
ret = (void *)((real + sizeof(void *)) + offset);
|
||||
*((void **)(ret)-1) = (void *)(real);
|
||||
} else {
|
||||
ret = (void *)(real);
|
||||
}
|
||||
return (ret);
|
||||
real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1));
|
||||
if (real) {
|
||||
// -- GODOT start --
|
||||
// Synced with Bullet 2.88 to fix GH-27926
|
||||
//offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1);
|
||||
//ret = (void*)((real + sizeof(void*)) + offset);
|
||||
ret = btAlignPointer(real + sizeof(void *), alignment);
|
||||
// -- GODOT end --
|
||||
*((void**)(ret)-1) = (void*)(real);
|
||||
}
|
||||
else {
|
||||
ret = (void*)(real);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static inline void btAlignedFreeDefault(void *ptr) {
|
||||
void *real;
|
||||
static inline void btAlignedFreeDefault(void* ptr)
|
||||
{
|
||||
void* real;
|
||||
|
||||
if (ptr) {
|
||||
real = *((void **)(ptr)-1);
|
||||
sFreeFunc(real);
|
||||
}
|
||||
if (ptr) {
|
||||
real = *((void**)(ptr)-1);
|
||||
sFreeFunc(real);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static btAlignedAllocFunc *sAlignedAllocFunc = btAlignedAllocDefault;
|
||||
static btAlignedFreeFunc *sAlignedFreeFunc = btAlignedFreeDefault;
|
||||
static btAlignedAllocFunc* sAlignedAllocFunc = btAlignedAllocDefault;
|
||||
static btAlignedFreeFunc* sAlignedFreeFunc = btAlignedFreeDefault;
|
||||
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc *allocFunc, btAlignedFreeFunc *freeFunc) {
|
||||
sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault;
|
||||
sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault;
|
||||
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc)
|
||||
{
|
||||
sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault;
|
||||
sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault;
|
||||
}
|
||||
|
||||
void btAlignedAllocSetCustom(btAllocFunc *allocFunc, btFreeFunc *freeFunc) {
|
||||
sAllocFunc = allocFunc ? allocFunc : btAllocDefault;
|
||||
sFreeFunc = freeFunc ? freeFunc : btFreeDefault;
|
||||
void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc)
|
||||
{
|
||||
sAllocFunc = allocFunc ? allocFunc : btAllocDefault;
|
||||
sFreeFunc = freeFunc ? freeFunc : btFreeDefault;
|
||||
}
|
||||
|
||||
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
//this generic allocator provides the total allocated number of bytes
|
||||
#include <stdio.h>
|
||||
|
||||
void *btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char *filename) {
|
||||
void *ret;
|
||||
char *real;
|
||||
unsigned long offset;
|
||||
void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename)
|
||||
{
|
||||
void* ret;
|
||||
char* real;
|
||||
unsigned long offset;
|
||||
|
||||
gTotalBytesAlignedAllocs += size;
|
||||
gNumAlignedAllocs++;
|
||||
gTotalBytesAlignedAllocs += size;
|
||||
gNumAlignedAllocs++;
|
||||
|
||||
real = (char *)sAllocFunc(size + 2 * sizeof(void *) + (alignment - 1));
|
||||
if (real) {
|
||||
offset = (alignment - (unsigned long)(real + 2 * sizeof(void *))) & (alignment - 1);
|
||||
ret = (void *)((real + 2 * sizeof(void *)) + offset);
|
||||
*((void **)(ret)-1) = (void *)(real);
|
||||
*((int32_t *)(ret)-2) = size;
|
||||
} else {
|
||||
ret = (void *)(real); //??
|
||||
}
|
||||
real = (char*)sAllocFunc(size + 2 * sizeof(void*) + (alignment - 1));
|
||||
if (real) {
|
||||
offset = (alignment - (unsigned long)(real + 2 * sizeof(void*))) & (alignment - 1);
|
||||
ret = (void*)((real + 2 * sizeof(void*)) + offset);
|
||||
*((void**)(ret)-1) = (void*)(real);
|
||||
*((int32_t*)(ret)-2) = size;
|
||||
}
|
||||
else {
|
||||
ret = (void*)(real); //??
|
||||
}
|
||||
|
||||
printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size);
|
||||
printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size);
|
||||
|
||||
int32_t *ptr = (int32_t *)ret;
|
||||
*ptr = 12;
|
||||
return (ret);
|
||||
int32_t* ptr = (int32_t*)ret;
|
||||
*ptr = 12;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void btAlignedFreeInternal(void *ptr, int32_t line, char *filename) {
|
||||
void btAlignedFreeInternal(void* ptr, int32_t line, char* filename)
|
||||
{
|
||||
|
||||
void *real;
|
||||
gNumAlignedFree++;
|
||||
void* real;
|
||||
gNumAlignedFree++;
|
||||
|
||||
if (ptr) {
|
||||
real = *((void **)(ptr)-1);
|
||||
int32_t size = *((int32_t *)(ptr)-2);
|
||||
gTotalBytesAlignedAllocs -= size;
|
||||
if (ptr) {
|
||||
real = *((void**)(ptr)-1);
|
||||
int32_t size = *((int32_t*)(ptr)-2);
|
||||
gTotalBytesAlignedAllocs -= size;
|
||||
|
||||
printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size);
|
||||
printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size);
|
||||
|
||||
sFreeFunc(real);
|
||||
} else {
|
||||
printf("NULL ptr\n");
|
||||
}
|
||||
sFreeFunc(real);
|
||||
}
|
||||
else {
|
||||
printf("NULL ptr\n");
|
||||
}
|
||||
}
|
||||
|
||||
#else //BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
||||
void *btAlignedAllocInternal(size_t size, int32_t alignment) {
|
||||
gNumAlignedAllocs++;
|
||||
void *ptr;
|
||||
ptr = sAlignedAllocFunc(size, alignment);
|
||||
// printf("btAlignedAllocInternal %d, %x\n",size,ptr);
|
||||
return ptr;
|
||||
void* btAlignedAllocInternal(size_t size, int32_t alignment)
|
||||
{
|
||||
gNumAlignedAllocs++;
|
||||
void* ptr;
|
||||
ptr = sAlignedAllocFunc(size, alignment);
|
||||
// printf("btAlignedAllocInternal %d, %x\n",size,ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void btAlignedFreeInternal(void *ptr) {
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
void btAlignedFreeInternal(void* ptr)
|
||||
{
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
gNumAlignedFree++;
|
||||
// printf("btAlignedFreeInternal %x\n",ptr);
|
||||
sAlignedFreeFunc(ptr);
|
||||
gNumAlignedFree++;
|
||||
// printf("btAlignedFreeInternal %x\n",ptr);
|
||||
sAlignedFreeFunc(ptr);
|
||||
}
|
||||
|
||||
//GODOT ADDITION
|
||||
};
|
||||
//
|
||||
// -- GODOT start --
|
||||
}; // namespace VHACD
|
||||
// -- GODOT end --
|
||||
|
||||
#endif //BT_DEBUG_MEMORY_ALLOCATIONS
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue