Merge pull request #93124 from RandomShaper/skull_trio
`ResourceLoader`: Let the caller thread use its own message queue override
This commit is contained in:
commit
087ef4b942
|
@ -302,7 +302,8 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
|
||||||
thread_load_mutex.unlock();
|
thread_load_mutex.unlock();
|
||||||
|
|
||||||
// Thread-safe either if it's the current thread or a brand new one.
|
// Thread-safe either if it's the current thread or a brand new one.
|
||||||
CallQueue *mq_override = nullptr;
|
bool mq_override_present = false;
|
||||||
|
CallQueue *own_mq_override = nullptr;
|
||||||
if (load_nesting == 0) {
|
if (load_nesting == 0) {
|
||||||
load_paths_stack = memnew(Vector<String>);
|
load_paths_stack = memnew(Vector<String>);
|
||||||
|
|
||||||
|
@ -310,8 +311,12 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
|
||||||
load_paths_stack->push_back(load_task.dependent_path);
|
load_paths_stack->push_back(load_task.dependent_path);
|
||||||
}
|
}
|
||||||
if (!Thread::is_main_thread()) {
|
if (!Thread::is_main_thread()) {
|
||||||
mq_override = memnew(CallQueue);
|
// Let the caller thread use its own, for added flexibility. Provide one otherwise.
|
||||||
MessageQueue::set_thread_singleton_override(mq_override);
|
if (MessageQueue::get_singleton() == MessageQueue::get_main_singleton()) {
|
||||||
|
own_mq_override = memnew(CallQueue);
|
||||||
|
MessageQueue::set_thread_singleton_override(own_mq_override);
|
||||||
|
}
|
||||||
|
mq_override_present = true;
|
||||||
set_current_thread_safe_for_nodes(true);
|
set_current_thread_safe_for_nodes(true);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -324,8 +329,8 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Ref<Resource> res = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress);
|
Ref<Resource> res = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress);
|
||||||
if (mq_override) {
|
if (mq_override_present) {
|
||||||
mq_override->flush();
|
MessageQueue::get_singleton()->flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
thread_load_mutex.lock();
|
thread_load_mutex.lock();
|
||||||
|
@ -394,8 +399,9 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
|
||||||
thread_load_mutex.unlock();
|
thread_load_mutex.unlock();
|
||||||
|
|
||||||
if (load_nesting == 0) {
|
if (load_nesting == 0) {
|
||||||
if (mq_override) {
|
if (own_mq_override) {
|
||||||
memdelete(mq_override);
|
MessageQueue::set_thread_singleton_override(nullptr);
|
||||||
|
memdelete(own_mq_override);
|
||||||
}
|
}
|
||||||
memdelete(load_paths_stack);
|
memdelete(load_paths_stack);
|
||||||
}
|
}
|
||||||
|
|
|
@ -481,10 +481,7 @@ CallQueue::~CallQueue() {
|
||||||
if (!allocator_is_custom) {
|
if (!allocator_is_custom) {
|
||||||
memdelete(allocator);
|
memdelete(allocator);
|
||||||
}
|
}
|
||||||
// This is done here to avoid a circular dependency between the safety checks and the thread singleton pointer.
|
DEV_ASSERT(!is_current_thread_override);
|
||||||
if (this == MessageQueue::thread_singleton) {
|
|
||||||
MessageQueue::thread_singleton = nullptr;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////
|
//////////////////////
|
||||||
|
@ -493,7 +490,6 @@ CallQueue *MessageQueue::main_singleton = nullptr;
|
||||||
thread_local CallQueue *MessageQueue::thread_singleton = nullptr;
|
thread_local CallQueue *MessageQueue::thread_singleton = nullptr;
|
||||||
|
|
||||||
void MessageQueue::set_thread_singleton_override(CallQueue *p_thread_singleton) {
|
void MessageQueue::set_thread_singleton_override(CallQueue *p_thread_singleton) {
|
||||||
DEV_ASSERT(p_thread_singleton); // To unset the thread singleton, don't call this with nullptr, but just memfree() it.
|
|
||||||
#ifdef DEV_ENABLED
|
#ifdef DEV_ENABLED
|
||||||
if (thread_singleton) {
|
if (thread_singleton) {
|
||||||
thread_singleton->is_current_thread_override = false;
|
thread_singleton->is_current_thread_override = false;
|
||||||
|
|
|
@ -53,7 +53,9 @@ void WorkerThreadPool::_process_task(Task *p_task) {
|
||||||
int pool_thread_index = thread_ids[Thread::get_caller_id()];
|
int pool_thread_index = thread_ids[Thread::get_caller_id()];
|
||||||
ThreadData &curr_thread = threads[pool_thread_index];
|
ThreadData &curr_thread = threads[pool_thread_index];
|
||||||
Task *prev_task = nullptr; // In case this is recursively called.
|
Task *prev_task = nullptr; // In case this is recursively called.
|
||||||
|
|
||||||
bool safe_for_nodes_backup = is_current_thread_safe_for_nodes();
|
bool safe_for_nodes_backup = is_current_thread_safe_for_nodes();
|
||||||
|
CallQueue *call_queue_backup = MessageQueue::get_singleton() != MessageQueue::get_main_singleton() ? MessageQueue::get_singleton() : nullptr;
|
||||||
|
|
||||||
{
|
{
|
||||||
// Tasks must start with this unset. They are free to set-and-forget otherwise.
|
// Tasks must start with this unset. They are free to set-and-forget otherwise.
|
||||||
|
@ -169,6 +171,7 @@ void WorkerThreadPool::_process_task(Task *p_task) {
|
||||||
}
|
}
|
||||||
|
|
||||||
set_current_thread_safe_for_nodes(safe_for_nodes_backup);
|
set_current_thread_safe_for_nodes(safe_for_nodes_backup);
|
||||||
|
MessageQueue::set_thread_singleton_override(call_queue_backup);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue