From 723f5500f4c57546f96258fb5c5f5d98959693d4 Mon Sep 17 00:00:00 2001 From: A Thousand Ships <96648715+AThousandShips@users.noreply.github.com> Date: Sat, 29 Jun 2024 13:01:01 +0200 Subject: [PATCH] [Core] Allow locking/unlocking of `MutexLock` --- core/io/ip.cpp | 33 +++++++++++++++++---------------- core/io/resource_loader.cpp | 15 +++++++-------- core/os/mutex.h | 17 +++++++++++++++-- core/os/safe_binary_mutex.h | 20 ++++++++++++++++++++ 4 files changed, 59 insertions(+), 26 deletions(-) diff --git a/core/io/ip.cpp b/core/io/ip.cpp index f20d65bef90..38c71b19fa3 100644 --- a/core/io/ip.cpp +++ b/core/io/ip.cpp @@ -81,17 +81,17 @@ struct _IP_ResolverPrivate { continue; } - mutex.lock(); + MutexLock lock(mutex); List response; String hostname = queue[i].hostname; IP::Type type = queue[i].type; - mutex.unlock(); + lock.temp_unlock(); // We should not lock while resolving the hostname, // only when modifying the queue. IP::get_singleton()->_resolve_hostname(response, hostname, type); - MutexLock lock(mutex); + lock.temp_relock(); // Could have been completed by another function, or deleted. if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) { continue; @@ -131,21 +131,22 @@ PackedStringArray IP::resolve_hostname_addresses(const String &p_hostname, Type List res; String key = _IP_ResolverPrivate::get_cache_key(p_hostname, p_type); - resolver->mutex.lock(); - if (resolver->cache.has(key)) { - res = resolver->cache[key]; - } else { - // This should be run unlocked so the resolver thread can keep resolving - // other requests. - resolver->mutex.unlock(); - _resolve_hostname(res, p_hostname, p_type); - resolver->mutex.lock(); - // We might be overriding another result, but we don't care as long as the result is valid. - if (res.size()) { - resolver->cache[key] = res; + { + MutexLock lock(resolver->mutex); + if (resolver->cache.has(key)) { + res = resolver->cache[key]; + } else { + // This should be run unlocked so the resolver thread can keep resolving + // other requests. + lock.temp_unlock(); + _resolve_hostname(res, p_hostname, p_type); + lock.temp_relock(); + // We might be overriding another result, but we don't care as long as the result is valid. + if (res.size()) { + resolver->cache[key] = res; + } } } - resolver->mutex.unlock(); PackedStringArray result; for (const IPAddress &E : res) { diff --git a/core/io/resource_loader.cpp b/core/io/resource_loader.cpp index eaee849a876..023153791a7 100644 --- a/core/io/resource_loader.cpp +++ b/core/io/resource_loader.cpp @@ -690,10 +690,10 @@ Ref ResourceLoader::load_threaded_get(const String &p_path, Error *r_e if (Thread::is_main_thread() && !load_token->local_path.is_empty()) { const ThreadLoadTask &load_task = thread_load_tasks[load_token->local_path]; while (load_task.status == THREAD_LOAD_IN_PROGRESS) { - thread_load_lock.~MutexLock(); + thread_load_lock.temp_unlock(); bool exit = !_ensure_load_progress(); OS::get_singleton()->delay_usec(1000); - new (&thread_load_lock) MutexLock(thread_load_mutex); + thread_load_lock.temp_relock(); if (exit) { break; } @@ -754,7 +754,7 @@ Ref ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro bool loader_is_wtp = load_task.task_id != 0; if (loader_is_wtp) { // Loading thread is in the worker pool. - thread_load_mutex.unlock(); + p_thread_load_lock.temp_unlock(); PREPARE_FOR_WTP_WAIT Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id); @@ -770,7 +770,7 @@ Ref ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro _run_load_task(&load_task); } - thread_load_mutex.lock(); + p_thread_load_lock.temp_relock(); load_task.awaited = true; DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED); @@ -1205,7 +1205,7 @@ void ResourceLoader::clear_translation_remaps() { void ResourceLoader::clear_thread_load_tasks() { // Bring the thing down as quickly as possible without causing deadlocks or leaks. - thread_load_mutex.lock(); + MutexLock thread_load_lock(thread_load_mutex); cleaning_tasks = true; while (true) { @@ -1224,9 +1224,9 @@ void ResourceLoader::clear_thread_load_tasks() { if (none_running) { break; } - thread_load_mutex.unlock(); + thread_load_lock.temp_unlock(); OS::get_singleton()->delay_usec(1000); - thread_load_mutex.lock(); + thread_load_lock.temp_relock(); } while (user_load_tokens.begin()) { @@ -1241,7 +1241,6 @@ void ResourceLoader::clear_thread_load_tasks() { thread_load_tasks.clear(); cleaning_tasks = false; - thread_load_mutex.unlock(); } void ResourceLoader::load_path_remaps() { diff --git a/core/os/mutex.h b/core/os/mutex.h index 773b31828dc..a968fd7029f 100644 --- a/core/os/mutex.h +++ b/core/os/mutex.h @@ -72,7 +72,7 @@ public: template class MutexLock { - THREADING_NAMESPACE::unique_lock lock; + mutable THREADING_NAMESPACE::unique_lock lock; public: explicit MutexLock(const MutexT &p_mutex) : @@ -82,8 +82,18 @@ public: template _ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock &_get_lock( typename std::enable_if::value> * = nullptr) const { - return const_cast &>(lock); + return lock; } + + _ALWAYS_INLINE_ void temp_relock() const { + lock.lock(); + } + + _ALWAYS_INLINE_ void temp_unlock() const { + lock.unlock(); + } + + // TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below). }; using Mutex = MutexImpl; // Recursive, for general use @@ -109,6 +119,9 @@ template class MutexLock { public: MutexLock(const MutexT &p_mutex) {} + + void temp_relock() const {} + void temp_unlock() const {} }; using Mutex = MutexImpl; diff --git a/core/os/safe_binary_mutex.h b/core/os/safe_binary_mutex.h index 4ca4b50b02c..4a3464ce7f4 100644 --- a/core/os/safe_binary_mutex.h +++ b/core/os/safe_binary_mutex.h @@ -103,6 +103,16 @@ public: ~MutexLock() { mutex.unlock(); } + + _ALWAYS_INLINE_ void temp_relock() const { + mutex.lock(); + } + + _ALWAYS_INLINE_ void temp_unlock() const { + mutex.unlock(); + } + + // TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below). }; #else // No threads. @@ -119,6 +129,16 @@ public: void unlock() const {} }; +template +class MutexLock> { +public: + MutexLock(const SafeBinaryMutex &p_mutex) {} + ~MutexLock() {} + + void temp_relock() const {} + void temp_unlock() const {} +}; + #endif // THREADS_ENABLED #endif // SAFE_BINARY_MUTEX_H