reduce clang warning spam
This commit is contained in:
parent
6b363c612e
commit
7211fd604c
@ -57,113 +57,113 @@
|
|||||||
return m_val; \
|
return m_val; \
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(register uint32_t *pw){
|
_ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(volatile uint32_t *pw){
|
||||||
|
|
||||||
ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t)
|
ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t)
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(register uint32_t *pw) {
|
_ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(volatile uint32_t *pw) {
|
||||||
|
|
||||||
return InterlockedDecrement((LONG volatile *)pw);
|
return InterlockedDecrement((LONG volatile *)pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint32_t _atomic_increment_impl(register uint32_t *pw) {
|
_ALWAYS_INLINE_ uint32_t _atomic_increment_impl(volatile uint32_t *pw) {
|
||||||
|
|
||||||
return InterlockedIncrement((LONG volatile *)pw);
|
return InterlockedIncrement((LONG volatile *)pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint32_t _atomic_sub_impl(register uint32_t *pw, register uint32_t val) {
|
_ALWAYS_INLINE_ uint32_t _atomic_sub_impl(volatile uint32_t *pw, volatile uint32_t val) {
|
||||||
|
|
||||||
return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val;
|
return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint32_t _atomic_add_impl(register uint32_t *pw, register uint32_t val) {
|
_ALWAYS_INLINE_ uint32_t _atomic_add_impl(volatile uint32_t *pw, volatile uint32_t val) {
|
||||||
|
|
||||||
return InterlockedAdd((LONG volatile *)pw, val);
|
return InterlockedAdd((LONG volatile *)pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(register uint32_t *pw, register uint32_t val){
|
_ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(volatile uint32_t *pw, volatile uint32_t val){
|
||||||
|
|
||||||
ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t)
|
ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t)
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(register uint64_t *pw){
|
_ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(volatile uint64_t *pw){
|
||||||
|
|
||||||
ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t)
|
ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t)
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(register uint64_t *pw) {
|
_ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(volatile uint64_t *pw) {
|
||||||
|
|
||||||
return InterlockedDecrement64((LONGLONG volatile *)pw);
|
return InterlockedDecrement64((LONGLONG volatile *)pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(register uint64_t *pw) {
|
_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(volatile uint64_t *pw) {
|
||||||
|
|
||||||
return InterlockedIncrement64((LONGLONG volatile *)pw);
|
return InterlockedIncrement64((LONGLONG volatile *)pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(register uint64_t *pw, register uint64_t val) {
|
_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(volatile uint64_t *pw, volatile uint64_t val) {
|
||||||
|
|
||||||
return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val;
|
return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint64_t _atomic_add_impl(register uint64_t *pw, register uint64_t val) {
|
_ALWAYS_INLINE_ uint64_t _atomic_add_impl(volatile uint64_t *pw, volatile uint64_t val) {
|
||||||
|
|
||||||
return InterlockedAdd64((LONGLONG volatile *)pw, val);
|
return InterlockedAdd64((LONGLONG volatile *)pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(register uint64_t *pw, register uint64_t val){
|
_ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(volatile uint64_t *pw, volatile uint64_t val){
|
||||||
|
|
||||||
ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t)
|
ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The actual advertised functions; they'll call the right implementation
|
// The actual advertised functions; they'll call the right implementation
|
||||||
|
|
||||||
uint32_t atomic_conditional_increment(register uint32_t *pw) {
|
uint32_t atomic_conditional_increment(volatile uint32_t *pw) {
|
||||||
return _atomic_conditional_increment_impl(pw);
|
return _atomic_conditional_increment_impl(pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t atomic_decrement(register uint32_t *pw) {
|
uint32_t atomic_decrement(volatile uint32_t *pw) {
|
||||||
return _atomic_decrement_impl(pw);
|
return _atomic_decrement_impl(pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t atomic_increment(register uint32_t *pw) {
|
uint32_t atomic_increment(volatile uint32_t *pw) {
|
||||||
return _atomic_increment_impl(pw);
|
return _atomic_increment_impl(pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t atomic_sub(register uint32_t *pw, register uint32_t val) {
|
uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val) {
|
||||||
return _atomic_sub_impl(pw, val);
|
return _atomic_sub_impl(pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t atomic_add(register uint32_t *pw, register uint32_t val) {
|
uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val) {
|
||||||
return _atomic_add_impl(pw, val);
|
return _atomic_add_impl(pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val) {
|
uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val) {
|
||||||
return _atomic_exchange_if_greater_impl(pw, val);
|
return _atomic_exchange_if_greater_impl(pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t atomic_conditional_increment(register uint64_t *pw) {
|
uint64_t atomic_conditional_increment(volatile uint64_t *pw) {
|
||||||
return _atomic_conditional_increment_impl(pw);
|
return _atomic_conditional_increment_impl(pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t atomic_decrement(register uint64_t *pw) {
|
uint64_t atomic_decrement(volatile uint64_t *pw) {
|
||||||
return _atomic_decrement_impl(pw);
|
return _atomic_decrement_impl(pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t atomic_increment(register uint64_t *pw) {
|
uint64_t atomic_increment(volatile uint64_t *pw) {
|
||||||
return _atomic_increment_impl(pw);
|
return _atomic_increment_impl(pw);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t atomic_sub(register uint64_t *pw, register uint64_t val) {
|
uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val) {
|
||||||
return _atomic_sub_impl(pw, val);
|
return _atomic_sub_impl(pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t atomic_add(register uint64_t *pw, register uint64_t val) {
|
uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val) {
|
||||||
return _atomic_add_impl(pw, val);
|
return _atomic_add_impl(pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val) {
|
uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val) {
|
||||||
return _atomic_exchange_if_greater_impl(pw, val);
|
return _atomic_exchange_if_greater_impl(pw, val);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -44,7 +44,7 @@
|
|||||||
/* Bogus implementation unaware of multiprocessing */
|
/* Bogus implementation unaware of multiprocessing */
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
|
static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
|
||||||
|
|
||||||
if (*pw == 0)
|
if (*pw == 0)
|
||||||
return 0;
|
return 0;
|
||||||
@ -55,7 +55,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
|
static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
|
||||||
|
|
||||||
(*pw)--;
|
(*pw)--;
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
|
static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
|
||||||
|
|
||||||
(*pw)++;
|
(*pw)++;
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class V>
|
template <class T, class V>
|
||||||
static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
|
static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
|
||||||
|
|
||||||
(*pw) -= val;
|
(*pw) -= val;
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class V>
|
template <class T, class V>
|
||||||
static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
|
static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
|
||||||
|
|
||||||
(*pw) += val;
|
(*pw) += val;
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class V>
|
template <class T, class V>
|
||||||
static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V val) {
|
static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
|
||||||
|
|
||||||
if (val > *pw)
|
if (val > *pw)
|
||||||
*pw = val;
|
*pw = val;
|
||||||
@ -103,7 +103,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V v
|
|||||||
// Clang states it supports GCC atomic builtins.
|
// Clang states it supports GCC atomic builtins.
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
|
static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
T tmp = static_cast<T const volatile &>(*pw);
|
T tmp = static_cast<T const volatile &>(*pw);
|
||||||
@ -115,31 +115,31 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
|
static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
|
||||||
|
|
||||||
return __sync_sub_and_fetch(pw, 1);
|
return __sync_sub_and_fetch(pw, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
|
static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
|
||||||
|
|
||||||
return __sync_add_and_fetch(pw, 1);
|
return __sync_add_and_fetch(pw, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class V>
|
template <class T, class V>
|
||||||
static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
|
static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
|
||||||
|
|
||||||
return __sync_sub_and_fetch(pw, val);
|
return __sync_sub_and_fetch(pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class V>
|
template <class T, class V>
|
||||||
static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
|
static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
|
||||||
|
|
||||||
return __sync_add_and_fetch(pw, val);
|
return __sync_add_and_fetch(pw, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class V>
|
template <class T, class V>
|
||||||
static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V val) {
|
static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
T tmp = static_cast<T const volatile &>(*pw);
|
T tmp = static_cast<T const volatile &>(*pw);
|
||||||
@ -153,19 +153,19 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V v
|
|||||||
#elif defined(_MSC_VER)
|
#elif defined(_MSC_VER)
|
||||||
// For MSVC use a separate compilation unit to prevent windows.h from polluting
|
// For MSVC use a separate compilation unit to prevent windows.h from polluting
|
||||||
// the global namespace.
|
// the global namespace.
|
||||||
uint32_t atomic_conditional_increment(register uint32_t *pw);
|
uint32_t atomic_conditional_increment(volatile uint32_t *pw);
|
||||||
uint32_t atomic_decrement(register uint32_t *pw);
|
uint32_t atomic_decrement(volatile uint32_t *pw);
|
||||||
uint32_t atomic_increment(register uint32_t *pw);
|
uint32_t atomic_increment(volatile uint32_t *pw);
|
||||||
uint32_t atomic_sub(register uint32_t *pw, register uint32_t val);
|
uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val);
|
||||||
uint32_t atomic_add(register uint32_t *pw, register uint32_t val);
|
uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val);
|
||||||
uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val);
|
uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val);
|
||||||
|
|
||||||
uint64_t atomic_conditional_increment(register uint64_t *pw);
|
uint64_t atomic_conditional_increment(volatile uint64_t *pw);
|
||||||
uint64_t atomic_decrement(register uint64_t *pw);
|
uint64_t atomic_decrement(volatile uint64_t *pw);
|
||||||
uint64_t atomic_increment(register uint64_t *pw);
|
uint64_t atomic_increment(volatile uint64_t *pw);
|
||||||
uint64_t atomic_sub(register uint64_t *pw, register uint64_t val);
|
uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val);
|
||||||
uint64_t atomic_add(register uint64_t *pw, register uint64_t val);
|
uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val);
|
||||||
uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val);
|
uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
//no threads supported?
|
//no threads supported?
|
||||||
|
Loading…
Reference in New Issue
Block a user