typedefs: Cleanup unused macros and unnecessary checks

We now require a compiler with C++17 support, so we don't need to
check for features added to GCC 5 or Clang 3.2.

Clang builtin availability checks were unused anyway as Clang defines
`__GNUC__` as it's also a GNU C implementation.

Fixes #36986.
This commit is contained in:
Rémi Verschelde 2020-03-11 21:01:02 +01:00
parent 5b97db325a
commit 1c2f2a805d
6 changed files with 79 additions and 153 deletions

View File

@ -82,24 +82,25 @@ private:
}
_FORCE_INLINE_ size_t _get_alloc_size(size_t p_elements) const {
//return nearest_power_of_2_templated(p_elements*sizeof(T)+sizeof(SafeRefCount)+sizeof(int));
return next_power_of_2(p_elements * sizeof(T));
}
_FORCE_INLINE_ bool _get_alloc_size_checked(size_t p_elements, size_t *out) const {
#if defined(_add_overflow) && defined(_mul_overflow)
#if defined(__GNUC__)
size_t o;
size_t p;
if (_mul_overflow(p_elements, sizeof(T), &o)) {
if (__builtin_mul_overflow(p_elements, sizeof(T), &o)) {
*out = 0;
return false;
}
*out = next_power_of_2(o);
if (_add_overflow(o, static_cast<size_t>(32), &p)) return false; //no longer allocated here
if (__builtin_add_overflow(o, static_cast<size_t>(32), &p)) {
return false; // No longer allocated here.
}
return true;
#else
// Speed is more important than correctness here, do the operations unchecked
// and hope the best
// and hope for the best.
*out = _get_alloc_size(p_elements);
return true;
#endif

View File

@ -504,7 +504,7 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li
*/
#define ERR_FAIL_V(m_retval) \
if (1) { \
_err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " __STR(m_retval)); \
_err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " _STR(m_retval)); \
return m_retval; \
} else \
((void)0)
@ -517,7 +517,7 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li
*/
#define ERR_FAIL_V_MSG(m_retval, m_msg) \
if (1) { \
_err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " __STR(m_retval), DEBUG_STR(m_msg)); \
_err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " _STR(m_retval), DEBUG_STR(m_msg)); \
return m_retval; \
} else \
((void)0)

View File

@ -37,10 +37,10 @@
#include "thirdparty/misc/pcg.h"
#if defined(__GNUC__) || (_llvm_has_builtin(__builtin_clz))
#if defined(__GNUC__)
#define CLZ32(x) __builtin_clz(x)
#elif defined(_MSC_VER)
#include "intrin.h"
#include <intrin.h>
static int __bsr_clz32(uint32_t x) {
unsigned long index;
_BitScanReverse(&index, x);
@ -50,11 +50,11 @@ static int __bsr_clz32(uint32_t x) {
#else
#endif
#if defined(__GNUC__) || (_llvm_has_builtin(__builtin_ldexp) && _llvm_has_builtin(__builtin_ldexpf))
#if defined(__GNUC__)
#define LDEXP(s, e) __builtin_ldexp(s, e)
#define LDEXPF(s, e) __builtin_ldexpf(s, e)
#else
#include "math.h"
#include <math.h>
#define LDEXP(s, e) ldexp(s, e)
#define LDEXPF(s, e) ldexp(s, e)
#endif

View File

@ -37,60 +37,41 @@
* Basic definitions and simple functions to be used everywhere.
*/
// Include first in case the platform needs to pre-define/include some things.
#include "platform_config.h"
// Should be available everywhere.
#include "core/error_list.h"
#include "core/int_types.h"
// Turn argument to string constant:
// https://gcc.gnu.org/onlinedocs/cpp/Stringizing.html#Stringizing
#ifndef _STR
#define _STR(m_x) #m_x
#define _MKSTR(m_x) _STR(m_x)
#endif
//should always inline no matter what
// Should always inline no matter what.
#ifndef _ALWAYS_INLINE_
#if defined(__GNUC__) && (__GNUC__ >= 4)
#define _ALWAYS_INLINE_ __attribute__((always_inline)) inline
#elif defined(__llvm__)
#if defined(__GNUC__)
#define _ALWAYS_INLINE_ __attribute__((always_inline)) inline
#elif defined(_MSC_VER)
#define _ALWAYS_INLINE_ __forceinline
#else
#define _ALWAYS_INLINE_ inline
#endif
#endif
//should always inline, except in some cases because it makes debugging harder
// Should always inline, except in debug builds because it makes debugging harder.
#ifndef _FORCE_INLINE_
#ifdef DISABLE_FORCED_INLINE
#define _FORCE_INLINE_ inline
#else
#define _FORCE_INLINE_ _ALWAYS_INLINE_
#endif
#endif
//custom, gcc-safe offsetof, because gcc complains a lot.
template <class T>
T *_nullptr() {
T *t = NULL;
return t;
}
#define OFFSET_OF(st, m) \
((size_t)((char *)&(_nullptr<st>()->m) - (char *)0))
/**
* Some platforms (devices) don't define NULL
*/
#ifndef NULL
#define NULL 0
#endif
/**
* Windows badly defines a lot of stuff we'll never use. Undefine it.
*/
// Windows badly defines a lot of stuff we'll never use. Undefine it.
#ifdef _WIN32
#undef min // override standard definition
#undef max // override standard definition
@ -105,18 +86,11 @@ T *_nullptr() {
#undef CONNECT_DEFERRED // override from Windows SDK, clashes with Object enum
#endif
#include "core/int_types.h"
#include "core/error_list.h"
/** Generic ABS function, for math uses please use Math::abs */
// Generic ABS function, for math uses please use Math::abs.
#ifndef ABS
#define ABS(m_v) (((m_v) < 0) ? (-(m_v)) : (m_v))
#endif
#define ABSDIFF(x, y) (((x) < (y)) ? ((y) - (x)) : ((x) - (y)))
#ifndef SGN
#define SGN(m_v) (((m_v) < 0) ? (-1.0) : (+1.0))
#endif
@ -133,49 +107,24 @@ T *_nullptr() {
#define CLAMP(m_a, m_min, m_max) (((m_a) < (m_min)) ? (m_min) : (((m_a) > (m_max)) ? m_max : m_a))
#endif
/** Generic swap template */
// Generic swap template.
#ifndef SWAP
#define SWAP(m_x, m_y) __swap_tmpl((m_x), (m_y))
template <class T>
inline void __swap_tmpl(T &x, T &y) {
T aux = x;
x = y;
y = aux;
}
#endif // SWAP
#endif //swap
/* clang-format off */
#define HEX2CHR(m_hex) \
((m_hex >= '0' && m_hex <= '9') ? (m_hex - '0') : \
((m_hex >= 'A' && m_hex <= 'F') ? (10 + m_hex - 'A') : \
((m_hex >= 'a' && m_hex <= 'f') ? (10 + m_hex - 'a') : 0)))
/* clang-format on */
// Macro to check whether we are compiled by clang
// and we have a specific builtin
#if defined(__llvm__) && defined(__has_builtin)
#define _llvm_has_builtin(x) __has_builtin(x)
#else
#define _llvm_has_builtin(x) 0
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 5)) || _llvm_has_builtin(__builtin_mul_overflow)
#define _mul_overflow __builtin_mul_overflow
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 5)) || _llvm_has_builtin(__builtin_add_overflow)
#define _add_overflow __builtin_add_overflow
#endif
/** Function to find the next power of 2 to an integer */
/* Functions to handle powers of 2 and shifting. */
// Function to find the next power of 2 to an integer.
static _FORCE_INLINE_ unsigned int next_power_of_2(unsigned int x) {
if (x == 0)
if (x == 0) {
return 0;
}
--x;
x |= x >> 1;
@ -187,8 +136,8 @@ static _FORCE_INLINE_ unsigned int next_power_of_2(unsigned int x) {
return ++x;
}
// Function to find the previous power of 2 to an integer.
static _FORCE_INLINE_ unsigned int previous_power_of_2(unsigned int x) {
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
@ -197,40 +146,45 @@ static _FORCE_INLINE_ unsigned int previous_power_of_2(unsigned int x) {
return x - (x >> 1);
}
// Function to find the closest power of 2 to an integer.
static _FORCE_INLINE_ unsigned int closest_power_of_2(unsigned int x) {
unsigned int nx = next_power_of_2(x);
unsigned int px = previous_power_of_2(x);
return (nx - x) > (x - px) ? px : nx;
}
// We need this definition inside the function below.
static inline int get_shift_from_power_of_2(unsigned int p_pixel);
// Get a shift value from a power of 2.
static inline int get_shift_from_power_of_2(unsigned int p_bits) {
for (unsigned int i = 0; i < 32; i++) {
if (p_bits == (unsigned int)(1 << i)) {
return i;
}
}
return -1;
}
template <class T>
static _FORCE_INLINE_ T nearest_power_of_2_templated(T x) {
--x;
// The number of operations on x is the base two logarithm
// of the p_number of bits in the type. Add three to account
// of the number of bits in the type. Add three to account
// for sizeof(T) being in bytes.
size_t num = get_shift_from_power_of_2(sizeof(T)) + 3;
// If the compiler is smart, it unrolls this loop
// If its dumb, this is a bit slow.
for (size_t i = 0; i < num; i++)
// If the compiler is smart, it unrolls this loop.
// If it's dumb, this is a bit slow.
for (size_t i = 0; i < num; i++) {
x |= x >> (1 << i);
}
return ++x;
}
/** Function to find the nearest (bigger) power of 2 to an integer */
// Function to find the nearest (bigger) power of 2 to an integer.
static inline unsigned int nearest_shift(unsigned int p_number) {
for (int i = 30; i >= 0; i--) {
if (p_number & (1 << i))
return i + 1;
}
@ -238,41 +192,20 @@ static inline unsigned int nearest_shift(unsigned int p_number) {
return 0;
}
/** get a shift value from a power of 2 */
static inline int get_shift_from_power_of_2(unsigned int p_pixel) {
// return a GL_TEXTURE_SIZE_ENUM
for (unsigned int i = 0; i < 32; i++) {
if (p_pixel == (unsigned int)(1 << i))
return i;
}
return -1;
}
/** Swap 16 bits value for endianness */
#if defined(__GNUC__) || _llvm_has_builtin(__builtin_bswap16)
// Swap 16, 32 and 64 bits value for endianness.
#if defined(__GNUC__)
#define BSWAP16(x) __builtin_bswap16(x)
#define BSWAP32(x) __builtin_bswap32(x)
#define BSWAP64(x) __builtin_bswap64(x)
#else
static inline uint16_t BSWAP16(uint16_t x) {
return (x >> 8) | (x << 8);
}
#endif
/** Swap 32 bits value for endianness */
#if defined(__GNUC__) || _llvm_has_builtin(__builtin_bswap32)
#define BSWAP32(x) __builtin_bswap32(x)
#else
static inline uint32_t BSWAP32(uint32_t x) {
return ((x << 24) | ((x << 8) & 0x00FF0000) | ((x >> 8) & 0x0000FF00) | (x >> 24));
}
#endif
/** Swap 64 bits value for endianness */
#if defined(__GNUC__) || _llvm_has_builtin(__builtin_bswap64)
#define BSWAP64(x) __builtin_bswap64(x)
#else
static inline uint64_t BSWAP64(uint64_t x) {
x = (x & 0x00000000FFFFFFFF) << 32 | (x & 0xFFFFFFFF00000000) >> 32;
x = (x & 0x0000FFFF0000FFFF) << 16 | (x & 0xFFFF0000FFFF0000) >> 16;
@ -281,40 +214,24 @@ static inline uint64_t BSWAP64(uint64_t x) {
}
#endif
/** When compiling with RTTI, we can add an "extra"
* layer of safeness in many operations, so dynamic_cast
* is used besides casting by enum.
*/
// Generic comparator used in Map, List, etc.
template <class T>
struct Comparator {
_ALWAYS_INLINE_ bool operator()(const T &p_a, const T &p_b) const { return (p_a < p_b); }
};
// Global lock macro, relies on the static Mutex::_global_mutex.
void _global_lock();
void _global_unlock();
struct _GlobalLock {
_GlobalLock() { _global_lock(); }
~_GlobalLock() { _global_unlock(); }
};
#define GLOBAL_LOCK_FUNCTION _GlobalLock _global_lock_;
#ifdef NO_SAFE_CAST
#define SAFE_CAST static_cast
#else
#define SAFE_CAST dynamic_cast
#endif
#define MT_SAFE
#define __STRX(m_index) #m_index
#define __STR(m_index) __STRX(m_index)
#ifdef __GNUC__
#if defined(__GNUC__)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else
@ -330,14 +247,12 @@ struct _GlobalLock {
#define _PRINTF_FORMAT_ATTRIBUTE_2_3
#endif
/** This is needed due to a strange OpenGL API that expects a pointer
* type for an argument that is actually an offset.
*/
// This is needed due to a strange OpenGL API that expects a pointer
// type for an argument that is actually an offset.
#define CAST_INT_TO_UCHAR_PTR(ptr) ((uint8_t *)(uintptr_t)(ptr))
// Home-made index sequence trick, so it can be used everywhere without the costly include of std::tuple.
// https://stackoverflow.com/questions/15014096/c-index-of-type-during-variadic-template-expansion
template <size_t... Is>
struct IndexSequence {};

View File

@ -2194,7 +2194,11 @@ void Collada::_merge_skeletons(VisualScene *p_vscene, Node *p_node) {
ERR_CONTINUE(!state.scene_map.has(nodeid)); //weird, it should have it...
NodeJoint *nj = SAFE_CAST<NodeJoint *>(state.scene_map[nodeid]);
#ifdef NO_SAFE_CAST
NodeJoint *nj = static_cast<NodeJoint *>(state.scene_map[nodeid]);
#else
NodeJoint *nj = dynamic_cast<NodeJoint *>(state.scene_map[nodeid]);
#endif
ERR_CONTINUE(!nj); //broken collada
ERR_CONTINUE(!nj->owner); //weird, node should have a skeleton owner
@ -2366,7 +2370,11 @@ bool Collada::_move_geometry_to_skeletons(VisualScene *p_vscene, Node *p_node, L
String nodeid = ng->skeletons[0];
ERR_FAIL_COND_V(!state.scene_map.has(nodeid), false); //weird, it should have it...
NodeJoint *nj = SAFE_CAST<NodeJoint *>(state.scene_map[nodeid]);
#ifdef NO_SAFE_CAST
NodeJoint *nj = static_cast<NodeJoint *>(state.scene_map[nodeid]);
#else
NodeJoint *nj = dynamic_cast<NodeJoint *>(state.scene_map[nodeid]);
#endif
ERR_FAIL_COND_V(!nj, false);
ERR_FAIL_COND_V(!nj->owner, false); //weird, node should have a skeleton owner

View File

@ -2012,8 +2012,10 @@ void OS_X11::handle_key_event(XKeyEvent *p_event, bool p_echo) {
// is correct, but the xorg developers are
// not very helpful today.
::Time tresh = ABSDIFF(peek_event.xkey.time, xkeyevent->time);
if (peek_event.type == KeyPress && tresh < 5) {
#define ABSDIFF(x, y) (((x) < (y)) ? ((y) - (x)) : ((x) - (y)))
::Time threshold = ABSDIFF(peek_event.xkey.time, xkeyevent->time);
#undef ABSDIFF
if (peek_event.type == KeyPress && threshold < 5) {
KeySym rk;
XLookupString((XKeyEvent *)&peek_event, str, 256, &rk, NULL);
if (rk == keysym_keycode) {