[Core] Allow locking/unlocking of `MutexLock`
This commit is contained in:
parent
e53dc80f2f
commit
723f5500f4
|
@ -81,17 +81,17 @@ struct _IP_ResolverPrivate {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex.lock();
|
MutexLock lock(mutex);
|
||||||
List<IPAddress> response;
|
List<IPAddress> response;
|
||||||
String hostname = queue[i].hostname;
|
String hostname = queue[i].hostname;
|
||||||
IP::Type type = queue[i].type;
|
IP::Type type = queue[i].type;
|
||||||
mutex.unlock();
|
lock.temp_unlock();
|
||||||
|
|
||||||
// We should not lock while resolving the hostname,
|
// We should not lock while resolving the hostname,
|
||||||
// only when modifying the queue.
|
// only when modifying the queue.
|
||||||
IP::get_singleton()->_resolve_hostname(response, hostname, type);
|
IP::get_singleton()->_resolve_hostname(response, hostname, type);
|
||||||
|
|
||||||
MutexLock lock(mutex);
|
lock.temp_relock();
|
||||||
// Could have been completed by another function, or deleted.
|
// Could have been completed by another function, or deleted.
|
||||||
if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) {
|
if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -131,21 +131,22 @@ PackedStringArray IP::resolve_hostname_addresses(const String &p_hostname, Type
|
||||||
List<IPAddress> res;
|
List<IPAddress> res;
|
||||||
String key = _IP_ResolverPrivate::get_cache_key(p_hostname, p_type);
|
String key = _IP_ResolverPrivate::get_cache_key(p_hostname, p_type);
|
||||||
|
|
||||||
resolver->mutex.lock();
|
{
|
||||||
|
MutexLock lock(resolver->mutex);
|
||||||
if (resolver->cache.has(key)) {
|
if (resolver->cache.has(key)) {
|
||||||
res = resolver->cache[key];
|
res = resolver->cache[key];
|
||||||
} else {
|
} else {
|
||||||
// This should be run unlocked so the resolver thread can keep resolving
|
// This should be run unlocked so the resolver thread can keep resolving
|
||||||
// other requests.
|
// other requests.
|
||||||
resolver->mutex.unlock();
|
lock.temp_unlock();
|
||||||
_resolve_hostname(res, p_hostname, p_type);
|
_resolve_hostname(res, p_hostname, p_type);
|
||||||
resolver->mutex.lock();
|
lock.temp_relock();
|
||||||
// We might be overriding another result, but we don't care as long as the result is valid.
|
// We might be overriding another result, but we don't care as long as the result is valid.
|
||||||
if (res.size()) {
|
if (res.size()) {
|
||||||
resolver->cache[key] = res;
|
resolver->cache[key] = res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
resolver->mutex.unlock();
|
}
|
||||||
|
|
||||||
PackedStringArray result;
|
PackedStringArray result;
|
||||||
for (const IPAddress &E : res) {
|
for (const IPAddress &E : res) {
|
||||||
|
|
|
@ -690,10 +690,10 @@ Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_e
|
||||||
if (Thread::is_main_thread() && !load_token->local_path.is_empty()) {
|
if (Thread::is_main_thread() && !load_token->local_path.is_empty()) {
|
||||||
const ThreadLoadTask &load_task = thread_load_tasks[load_token->local_path];
|
const ThreadLoadTask &load_task = thread_load_tasks[load_token->local_path];
|
||||||
while (load_task.status == THREAD_LOAD_IN_PROGRESS) {
|
while (load_task.status == THREAD_LOAD_IN_PROGRESS) {
|
||||||
thread_load_lock.~MutexLock();
|
thread_load_lock.temp_unlock();
|
||||||
bool exit = !_ensure_load_progress();
|
bool exit = !_ensure_load_progress();
|
||||||
OS::get_singleton()->delay_usec(1000);
|
OS::get_singleton()->delay_usec(1000);
|
||||||
new (&thread_load_lock) MutexLock(thread_load_mutex);
|
thread_load_lock.temp_relock();
|
||||||
if (exit) {
|
if (exit) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -754,7 +754,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
|
||||||
bool loader_is_wtp = load_task.task_id != 0;
|
bool loader_is_wtp = load_task.task_id != 0;
|
||||||
if (loader_is_wtp) {
|
if (loader_is_wtp) {
|
||||||
// Loading thread is in the worker pool.
|
// Loading thread is in the worker pool.
|
||||||
thread_load_mutex.unlock();
|
p_thread_load_lock.temp_unlock();
|
||||||
|
|
||||||
PREPARE_FOR_WTP_WAIT
|
PREPARE_FOR_WTP_WAIT
|
||||||
Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
|
Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
|
||||||
|
@ -770,7 +770,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
|
||||||
_run_load_task(&load_task);
|
_run_load_task(&load_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
thread_load_mutex.lock();
|
p_thread_load_lock.temp_relock();
|
||||||
load_task.awaited = true;
|
load_task.awaited = true;
|
||||||
|
|
||||||
DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
|
DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
|
||||||
|
@ -1205,7 +1205,7 @@ void ResourceLoader::clear_translation_remaps() {
|
||||||
void ResourceLoader::clear_thread_load_tasks() {
|
void ResourceLoader::clear_thread_load_tasks() {
|
||||||
// Bring the thing down as quickly as possible without causing deadlocks or leaks.
|
// Bring the thing down as quickly as possible without causing deadlocks or leaks.
|
||||||
|
|
||||||
thread_load_mutex.lock();
|
MutexLock thread_load_lock(thread_load_mutex);
|
||||||
cleaning_tasks = true;
|
cleaning_tasks = true;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -1224,9 +1224,9 @@ void ResourceLoader::clear_thread_load_tasks() {
|
||||||
if (none_running) {
|
if (none_running) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
thread_load_mutex.unlock();
|
thread_load_lock.temp_unlock();
|
||||||
OS::get_singleton()->delay_usec(1000);
|
OS::get_singleton()->delay_usec(1000);
|
||||||
thread_load_mutex.lock();
|
thread_load_lock.temp_relock();
|
||||||
}
|
}
|
||||||
|
|
||||||
while (user_load_tokens.begin()) {
|
while (user_load_tokens.begin()) {
|
||||||
|
@ -1241,7 +1241,6 @@ void ResourceLoader::clear_thread_load_tasks() {
|
||||||
thread_load_tasks.clear();
|
thread_load_tasks.clear();
|
||||||
|
|
||||||
cleaning_tasks = false;
|
cleaning_tasks = false;
|
||||||
thread_load_mutex.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ResourceLoader::load_path_remaps() {
|
void ResourceLoader::load_path_remaps() {
|
||||||
|
|
|
@ -72,7 +72,7 @@ public:
|
||||||
|
|
||||||
template <typename MutexT>
|
template <typename MutexT>
|
||||||
class MutexLock {
|
class MutexLock {
|
||||||
THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
|
mutable THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit MutexLock(const MutexT &p_mutex) :
|
explicit MutexLock(const MutexT &p_mutex) :
|
||||||
|
@ -82,8 +82,18 @@ public:
|
||||||
template <typename T = MutexT>
|
template <typename T = MutexT>
|
||||||
_ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock(
|
_ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock(
|
||||||
typename std::enable_if<std::is_same<T, THREADING_NAMESPACE::mutex>::value> * = nullptr) const {
|
typename std::enable_if<std::is_same<T, THREADING_NAMESPACE::mutex>::value> * = nullptr) const {
|
||||||
return const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(lock);
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_ALWAYS_INLINE_ void temp_relock() const {
|
||||||
|
lock.lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
_ALWAYS_INLINE_ void temp_unlock() const {
|
||||||
|
lock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
|
||||||
};
|
};
|
||||||
|
|
||||||
using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use
|
using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use
|
||||||
|
@ -109,6 +119,9 @@ template <typename MutexT>
|
||||||
class MutexLock {
|
class MutexLock {
|
||||||
public:
|
public:
|
||||||
MutexLock(const MutexT &p_mutex) {}
|
MutexLock(const MutexT &p_mutex) {}
|
||||||
|
|
||||||
|
void temp_relock() const {}
|
||||||
|
void temp_unlock() const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
using Mutex = MutexImpl;
|
using Mutex = MutexImpl;
|
||||||
|
|
|
@ -103,6 +103,16 @@ public:
|
||||||
~MutexLock() {
|
~MutexLock() {
|
||||||
mutex.unlock();
|
mutex.unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_ALWAYS_INLINE_ void temp_relock() const {
|
||||||
|
mutex.lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
_ALWAYS_INLINE_ void temp_unlock() const {
|
||||||
|
mutex.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
|
||||||
};
|
};
|
||||||
|
|
||||||
#else // No threads.
|
#else // No threads.
|
||||||
|
@ -119,6 +129,16 @@ public:
|
||||||
void unlock() const {}
|
void unlock() const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <int Tag>
|
||||||
|
class MutexLock<SafeBinaryMutex<Tag>> {
|
||||||
|
public:
|
||||||
|
MutexLock(const SafeBinaryMutex<Tag> &p_mutex) {}
|
||||||
|
~MutexLock() {}
|
||||||
|
|
||||||
|
void temp_relock() const {}
|
||||||
|
void temp_unlock() const {}
|
||||||
|
};
|
||||||
|
|
||||||
#endif // THREADS_ENABLED
|
#endif // THREADS_ENABLED
|
||||||
|
|
||||||
#endif // SAFE_BINARY_MUTEX_H
|
#endif // SAFE_BINARY_MUTEX_H
|
||||||
|
|
Loading…
Reference in New Issue