diff --git a/core/io/resource_loader.cpp b/core/io/resource_loader.cpp index fa964ade820..0dcde5509a2 100644 --- a/core/io/resource_loader.cpp +++ b/core/io/resource_loader.cpp @@ -302,7 +302,8 @@ void ResourceLoader::_thread_load_function(void *p_userdata) { thread_load_mutex.unlock(); // Thread-safe either if it's the current thread or a brand new one. - CallQueue *mq_override = nullptr; + bool mq_override_present = false; + CallQueue *own_mq_override = nullptr; if (load_nesting == 0) { load_paths_stack = memnew(Vector); @@ -310,8 +311,12 @@ void ResourceLoader::_thread_load_function(void *p_userdata) { load_paths_stack->push_back(load_task.dependent_path); } if (!Thread::is_main_thread()) { - mq_override = memnew(CallQueue); - MessageQueue::set_thread_singleton_override(mq_override); + // Let the caller thread use its own, for added flexibility. Provide one otherwise. + if (MessageQueue::get_singleton() == MessageQueue::get_main_singleton()) { + own_mq_override = memnew(CallQueue); + MessageQueue::set_thread_singleton_override(own_mq_override); + } + mq_override_present = true; set_current_thread_safe_for_nodes(true); } } else { @@ -324,8 +329,8 @@ void ResourceLoader::_thread_load_function(void *p_userdata) { } Ref res = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress); - if (mq_override) { - mq_override->flush(); + if (mq_override_present) { + MessageQueue::get_singleton()->flush(); } thread_load_mutex.lock(); @@ -394,8 +399,9 @@ void ResourceLoader::_thread_load_function(void *p_userdata) { thread_load_mutex.unlock(); if (load_nesting == 0) { - if (mq_override) { - memdelete(mq_override); + if (own_mq_override) { + MessageQueue::set_thread_singleton_override(nullptr); + memdelete(own_mq_override); } memdelete(load_paths_stack); } diff --git a/core/object/message_queue.cpp b/core/object/message_queue.cpp index 762bab75e79..4b0b1ce63d8 100644 --- a/core/object/message_queue.cpp +++ b/core/object/message_queue.cpp @@ -481,10 +481,7 @@ CallQueue::~CallQueue() { if (!allocator_is_custom) { memdelete(allocator); } - // This is done here to avoid a circular dependency between the safety checks and the thread singleton pointer. - if (this == MessageQueue::thread_singleton) { - MessageQueue::thread_singleton = nullptr; - } + DEV_ASSERT(!is_current_thread_override); } ////////////////////// @@ -493,7 +490,6 @@ CallQueue *MessageQueue::main_singleton = nullptr; thread_local CallQueue *MessageQueue::thread_singleton = nullptr; void MessageQueue::set_thread_singleton_override(CallQueue *p_thread_singleton) { - DEV_ASSERT(p_thread_singleton); // To unset the thread singleton, don't call this with nullptr, but just memfree() it. #ifdef DEV_ENABLED if (thread_singleton) { thread_singleton->is_current_thread_override = false; diff --git a/core/object/worker_thread_pool.cpp b/core/object/worker_thread_pool.cpp index 9c9e0fa8991..a7c0a0353e6 100644 --- a/core/object/worker_thread_pool.cpp +++ b/core/object/worker_thread_pool.cpp @@ -53,7 +53,9 @@ void WorkerThreadPool::_process_task(Task *p_task) { int pool_thread_index = thread_ids[Thread::get_caller_id()]; ThreadData &curr_thread = threads[pool_thread_index]; Task *prev_task = nullptr; // In case this is recursively called. + bool safe_for_nodes_backup = is_current_thread_safe_for_nodes(); + CallQueue *call_queue_backup = MessageQueue::get_singleton() != MessageQueue::get_main_singleton() ? MessageQueue::get_singleton() : nullptr; { // Tasks must start with this unset. They are free to set-and-forget otherwise. @@ -169,6 +171,7 @@ void WorkerThreadPool::_process_task(Task *p_task) { } set_current_thread_safe_for_nodes(safe_for_nodes_backup); + MessageQueue::set_thread_singleton_override(call_queue_backup); #endif }