Merge pull request #93082 from RandomShaper/fix_err_deadlock

ResourceLoader: Avoid deadlock when awaiting a loader thread that failed early
This commit is contained in:
Rémi Verschelde 2024-06-14 17:13:28 +02:00
commit 08a21fda8d
No known key found for this signature in database
GPG Key ID: C3336907360768E1
1 changed files with 37 additions and 26 deletions

View File

@ -656,39 +656,50 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
return Ref<Resource>();
}
if (load_task.task_id != 0) {
bool loader_is_wtp = load_task.task_id != 0;
Error wtp_task_err = FAILED;
if (loader_is_wtp) {
// Loading thread is in the worker pool.
thread_load_mutex.unlock();
Error err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
if (err == ERR_BUSY) {
// The WorkerThreadPool has reported that the current task wants to await on an older one.
// That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of
// resource loading that means that the task to wait for can be restarted here to break the
// cycle, with as much recursion into this process as needed.
// When the stack is eventually unrolled, the original load will have been notified to go on.
// CACHE_MODE_IGNORE is needed because, otherwise, the new request would just see there's
// an ongoing load for that resource and wait for it again. This value forces a new load.
Ref<ResourceLoader::LoadToken> token = _load_start(load_task.local_path, load_task.type_hint, LOAD_THREAD_DISTRIBUTE, ResourceFormatLoader::CACHE_MODE_IGNORE);
Ref<Resource> resource = _load_complete(*token.ptr(), &err);
if (r_error) {
*r_error = err;
wtp_task_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
}
if (load_task.status == THREAD_LOAD_IN_PROGRESS) { // If early errored, awaiting would deadlock.
if (loader_is_wtp) {
if (wtp_task_err == ERR_BUSY) {
// The WorkerThreadPool has reported that the current task wants to await on an older one.
// That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of
// resource loading that means that the task to wait for can be restarted here to break the
// cycle, with as much recursion into this process as needed.
// When the stack is eventually unrolled, the original load will have been notified to go on.
// CACHE_MODE_IGNORE is needed because, otherwise, the new request would just see there's
// an ongoing load for that resource and wait for it again. This value forces a new load.
Ref<ResourceLoader::LoadToken> token = _load_start(load_task.local_path, load_task.type_hint, LOAD_THREAD_DISTRIBUTE, ResourceFormatLoader::CACHE_MODE_IGNORE);
Ref<Resource> resource = _load_complete(*token.ptr(), &wtp_task_err);
if (r_error) {
*r_error = wtp_task_err;
}
thread_load_mutex.lock();
return resource;
} else {
DEV_ASSERT(wtp_task_err == OK);
thread_load_mutex.lock();
load_task.awaited = true;
}
thread_load_mutex.lock();
return resource;
} else {
DEV_ASSERT(err == OK);
thread_load_mutex.lock();
load_task.awaited = true;
// Loading thread is main or user thread.
if (!load_task.cond_var) {
load_task.cond_var = memnew(ConditionVariable);
}
do {
load_task.cond_var->wait(p_thread_load_lock);
DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
} while (load_task.cond_var);
}
} else {
// Loading thread is main or user thread.
if (!load_task.cond_var) {
load_task.cond_var = memnew(ConditionVariable);
if (loader_is_wtp) {
thread_load_mutex.lock();
}
do {
load_task.cond_var->wait(p_thread_load_lock);
DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
} while (load_task.cond_var);
}
}