diff --git a/core/io/resource_loader.cpp b/core/io/resource_loader.cpp index c3c37aa89d5..fa964ade820 100644 --- a/core/io/resource_loader.cpp +++ b/core/io/resource_loader.cpp @@ -656,39 +656,50 @@ Ref ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro return Ref(); } - if (load_task.task_id != 0) { + bool loader_is_wtp = load_task.task_id != 0; + Error wtp_task_err = FAILED; + if (loader_is_wtp) { // Loading thread is in the worker pool. thread_load_mutex.unlock(); - Error err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id); - if (err == ERR_BUSY) { - // The WorkerThreadPool has reported that the current task wants to await on an older one. - // That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of - // resource loading that means that the task to wait for can be restarted here to break the - // cycle, with as much recursion into this process as needed. - // When the stack is eventually unrolled, the original load will have been notified to go on. - // CACHE_MODE_IGNORE is needed because, otherwise, the new request would just see there's - // an ongoing load for that resource and wait for it again. This value forces a new load. - Ref token = _load_start(load_task.local_path, load_task.type_hint, LOAD_THREAD_DISTRIBUTE, ResourceFormatLoader::CACHE_MODE_IGNORE); - Ref resource = _load_complete(*token.ptr(), &err); - if (r_error) { - *r_error = err; + wtp_task_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id); + } + + if (load_task.status == THREAD_LOAD_IN_PROGRESS) { // If early errored, awaiting would deadlock. + if (loader_is_wtp) { + if (wtp_task_err == ERR_BUSY) { + // The WorkerThreadPool has reported that the current task wants to await on an older one. + // That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of + // resource loading that means that the task to wait for can be restarted here to break the + // cycle, with as much recursion into this process as needed. + // When the stack is eventually unrolled, the original load will have been notified to go on. + // CACHE_MODE_IGNORE is needed because, otherwise, the new request would just see there's + // an ongoing load for that resource and wait for it again. This value forces a new load. + Ref token = _load_start(load_task.local_path, load_task.type_hint, LOAD_THREAD_DISTRIBUTE, ResourceFormatLoader::CACHE_MODE_IGNORE); + Ref resource = _load_complete(*token.ptr(), &wtp_task_err); + if (r_error) { + *r_error = wtp_task_err; + } + thread_load_mutex.lock(); + return resource; + } else { + DEV_ASSERT(wtp_task_err == OK); + thread_load_mutex.lock(); + load_task.awaited = true; } - thread_load_mutex.lock(); - return resource; } else { - DEV_ASSERT(err == OK); - thread_load_mutex.lock(); - load_task.awaited = true; + // Loading thread is main or user thread. + if (!load_task.cond_var) { + load_task.cond_var = memnew(ConditionVariable); + } + do { + load_task.cond_var->wait(p_thread_load_lock); + DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count()); + } while (load_task.cond_var); } } else { - // Loading thread is main or user thread. - if (!load_task.cond_var) { - load_task.cond_var = memnew(ConditionVariable); + if (loader_is_wtp) { + thread_load_mutex.lock(); } - do { - load_task.cond_var->wait(p_thread_load_lock); - DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count()); - } while (load_task.cond_var); } }