From 350fcb6d98144f2a15c454170563eeac3c9d6dd1 Mon Sep 17 00:00:00 2001 From: Hein-Pieter van Braam Date: Tue, 26 Sep 2017 18:01:24 +0200 Subject: [PATCH] Build MSVC safe_refcount in a separate compilation unit Including windows.h in a globally included header gives all kinds of issues. Move the MSVC implementation for safe_refcount back into a .cpp file to prevent this from happening. --- core/safe_refcount.cpp | 169 +++++++++++++++++++++++++++++++++++++++++ core/safe_refcount.h | 100 ++++-------------------- 2 files changed, 183 insertions(+), 86 deletions(-) create mode 100644 core/safe_refcount.cpp diff --git a/core/safe_refcount.cpp b/core/safe_refcount.cpp new file mode 100644 index 00000000000..c9acdb79706 --- /dev/null +++ b/core/safe_refcount.cpp @@ -0,0 +1,169 @@ +/*************************************************************************/ +/* safe_refcount.cpp */ +/*************************************************************************/ +/* This file is part of: */ +/* GODOT ENGINE */ +/* https://godotengine.org */ +/*************************************************************************/ +/* Copyright (c) 2007-2017 Juan Linietsky, Ariel Manzur. */ +/* Copyright (c) 2014-2017 Godot Engine contributors (cf. AUTHORS.md) */ +/* */ +/* Permission is hereby granted, free of charge, to any person obtaining */ +/* a copy of this software and associated documentation files (the */ +/* "Software"), to deal in the Software without restriction, including */ +/* without limitation the rights to use, copy, modify, merge, publish, */ +/* distribute, sublicense, and/or sell copies of the Software, and to */ +/* permit persons to whom the Software is furnished to do so, subject to */ +/* the following conditions: */ +/* */ +/* The above copyright notice and this permission notice shall be */ +/* included in all copies or substantial portions of the Software. */ +/* */ +/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ +/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ +/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ +/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ +/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ +/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ +/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +/*************************************************************************/ + +#include "safe_refcount.h" + +#if defined(_MSC_VER) + +/* Implementation for MSVC-Windows */ + +// don't pollute my namespace! +#include + +#define ATOMIC_CONDITIONAL_INCREMENT_BODY(m_pw, m_win_type, m_win_cmpxchg, m_cpp_type) \ + /* try to increment until it actually works */ \ + /* taken from boost */ \ + while (true) { \ + m_cpp_type tmp = static_cast(*(m_pw)); \ + if (tmp == 0) \ + return 0; /* if zero, can't add to it anymore */ \ + if (m_win_cmpxchg((m_win_type volatile *)(m_pw), tmp + 1, tmp) == tmp) \ + return tmp + 1; \ + } + +#define ATOMIC_EXCHANGE_IF_GREATER_BODY(m_pw, m_val, m_win_type, m_win_cmpxchg, m_cpp_type) \ + while (true) { \ + m_cpp_type tmp = static_cast(*(m_pw)); \ + if (tmp >= m_val) \ + return tmp; /* already greater, or equal */ \ + if (m_win_cmpxchg((m_win_type volatile *)(m_pw), m_val, tmp) == tmp) \ + return m_val; \ + } + +_ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(register uint32_t *pw) { + + ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t) +} + +_ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(register uint32_t *pw) { + + return InterlockedDecrement((LONG volatile *)pw); +} + +_ALWAYS_INLINE_ uint32_t _atomic_increment_impl(register uint32_t *pw) { + + return InterlockedIncrement((LONG volatile *)pw); +} + +_ALWAYS_INLINE_ uint32_t _atomic_sub_impl(register uint32_t *pw, register uint32_t val) { + + return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val; +} + +_ALWAYS_INLINE_ uint32_t _atomic_add_impl(register uint32_t *pw, register uint32_t val) { + + return InterlockedAdd((LONG volatile *)pw, val); +} + +_ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(register uint32_t *pw, register uint32_t val) { + + ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t) +} + +_ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(register uint64_t *pw) { + + ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t) +} + +_ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(register uint64_t *pw) { + + return InterlockedDecrement64((LONGLONG volatile *)pw); +} + +_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(register uint64_t *pw) { + + return InterlockedIncrement64((LONGLONG volatile *)pw); +} + +_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(register uint64_t *pw, register uint64_t val) { + + return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val; +} + +_ALWAYS_INLINE_ uint64_t _atomic_add_impl(register uint64_t *pw, register uint64_t val) { + + return InterlockedAdd64((LONGLONG volatile *)pw, val); +} + +_ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(register uint64_t *pw, register uint64_t val) { + + ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t) +} + +// The actual advertised functions; they'll call the right implementation + +uint32_t atomic_conditional_increment(register uint32_t *counter) { + return _atomic_conditional_increment_impl(counter); +} + +uint32_t atomic_decrement(register uint32_t *pw) { + return _atomic_decrement_impl(pw); +} + +uint32_t atomic_increment(register uint32_t *pw) { + return _atomic_increment_impl(pw); +} + +uint32_t atomic_sub(register uint32_t *pw, register uint32_t val) { + return _atomic_sub_impl(pw, val); +} + +uint32_t atomic_add(register uint32_t *pw, register uint32_t val) { + return _atomic_add_impl(pw, val); +} + +uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val) { + return _atomic_exchange_if_greater_impl(pw, val); +} + +uint64_t atomic_conditional_increment(register uint64_t *counter) { + return _atomic_conditional_increment_impl(counter); +} + +uint64_t atomic_decrement(register uint64_t *pw) { + return _atomic_decrement_impl(pw); +} + +uint64_t atomic_increment(register uint64_t *pw) { + return _atomic_increment_impl(pw); +} + +uint64_t atomic_sub(register uint64_t *pw, register uint64_t val) { + return _atomic_sub_impl(pw, val); +} + +uint64_t atomic_add(register uint64_t *pw, register uint64_t val) { + return _atomic_add_impl(pw, val); +} + +uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val) { + return _atomic_exchange_if_greater_impl(pw, val); +} +#endif diff --git a/core/safe_refcount.h b/core/safe_refcount.h index 05126cc3196..39967d5ac4e 100644 --- a/core/safe_refcount.h +++ b/core/safe_refcount.h @@ -150,97 +150,25 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V v } #elif defined(_MSC_VER) +// For MSVC use a separate compilation unit to prevent windows.h from polluting +// the global namespace. +uint32_t atomic_conditional_increment(register uint32_t *pw); +uint32_t atomic_decrement(register uint32_t *pw); +uint32_t atomic_increment(register uint32_t *pw); +uint32_t atomic_sub(register uint32_t *pw, register uint32_t val); +uint32_t atomic_add(register uint32_t *pw, register uint32_t val); +uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val); -/* Implementation for MSVC-Windows */ - -// don't pollute my namespace! -#include - -#define ATOMIC_CONDITIONAL_INCREMENT_BODY(m_pw, m_win_type, m_win_cmpxchg, m_cpp_type) \ - /* try to increment until it actually works */ \ - /* taken from boost */ \ - while (true) { \ - m_cpp_type tmp = static_cast(*(m_pw)); \ - if (tmp == 0) \ - return 0; /* if zero, can't add to it anymore */ \ - if (m_win_cmpxchg((m_win_type volatile *)(m_pw), tmp + 1, tmp) == tmp) \ - return tmp + 1; \ - } - -#define ATOMIC_EXCHANGE_IF_GREATER_BODY(m_pw, m_val, m_win_type, m_win_cmpxchg, m_cpp_type) \ - while (true) { \ - m_cpp_type tmp = static_cast(*(m_pw)); \ - if (tmp >= m_val) \ - return tmp; /* already greater, or equal */ \ - if (m_win_cmpxchg((m_win_type volatile *)(m_pw), m_val, tmp) == tmp) \ - return m_val; \ - } - -static _ALWAYS_INLINE_ uint32_t atomic_conditional_increment(register uint32_t *pw) { - - ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t) -} - -static _ALWAYS_INLINE_ uint32_t atomic_decrement(register uint32_t *pw) { - - return InterlockedDecrement((LONG volatile *)pw); -} - -static _ALWAYS_INLINE_ uint32_t atomic_increment(register uint32_t *pw) { - - return InterlockedIncrement((LONG volatile *)pw); -} - -static _ALWAYS_INLINE_ uint32_t atomic_sub(register uint32_t *pw, register uint32_t val) { - - return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val; -} - -static _ALWAYS_INLINE_ uint32_t atomic_add(register uint32_t *pw, register uint32_t val) { - - return InterlockedAdd((LONG volatile *)pw, val); -} - -static _ALWAYS_INLINE_ uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val) { - - ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t) -} - -static _ALWAYS_INLINE_ uint64_t atomic_conditional_increment(register uint64_t *pw) { - - ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t) -} - -static _ALWAYS_INLINE_ uint64_t atomic_decrement(register uint64_t *pw) { - - return InterlockedDecrement64((LONGLONG volatile *)pw); -} - -static _ALWAYS_INLINE_ uint64_t atomic_increment(register uint64_t *pw) { - - return InterlockedIncrement64((LONGLONG volatile *)pw); -} - -static _ALWAYS_INLINE_ uint64_t atomic_sub(register uint64_t *pw, register uint64_t val) { - - return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val; -} - -static _ALWAYS_INLINE_ uint64_t atomic_add(register uint64_t *pw, register uint64_t val) { - - return InterlockedAdd64((LONGLONG volatile *)pw, val); -} - -static _ALWAYS_INLINE_ uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val) { - - ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t) -} +uint64_t atomic_conditional_increment(register uint64_t *pw); +uint64_t atomic_decrement(register uint64_t *pw); +uint64_t atomic_increment(register uint64_t *pw); +uint64_t atomic_sub(register uint64_t *pw, register uint64_t val); +uint64_t atomic_add(register uint64_t *pw, register uint64_t val); +uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val); #else - //no threads supported? #error Must provide atomic functions for this platform or compiler! - #endif struct SafeRefCount {