Merge pull request #74405 from RandomShaper/fix_res_loader

Fix multi-threaded resource loading
This commit is contained in:
Rémi Verschelde 2023-05-11 10:55:31 +02:00 committed by GitHub
commit fbb1a929a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 598 additions and 476 deletions

View File

@ -445,13 +445,12 @@ Error ResourceLoaderBinary::parse_variant(Variant &r_v) {
WARN_PRINT("Broken external resource! (index out of size)");
r_v = Variant();
} else {
if (external_resources[erindex].cache.is_null()) {
//cache not here yet, wait for it?
if (use_sub_threads) {
Error err;
external_resources.write[erindex].cache = ResourceLoader::load_threaded_get(external_resources[erindex].path, &err);
if (err != OK || external_resources[erindex].cache.is_null()) {
Ref<ResourceLoader::LoadToken> &load_token = external_resources.write[erindex].load_token;
if (load_token.is_valid()) { // If not valid, it's OK since then we know this load accepts broken dependencies.
Error err;
Ref<Resource> res = ResourceLoader::_load_complete(*load_token.ptr(), &err);
if (res.is_null()) {
if (!ResourceLoader::is_cleaning_tasks()) {
if (!ResourceLoader::get_abort_on_missing_resources()) {
ResourceLoader::notify_dependency_error(local_path, external_resources[erindex].path, external_resources[erindex].type);
} else {
@ -459,12 +458,11 @@ Error ResourceLoaderBinary::parse_variant(Variant &r_v) {
ERR_FAIL_V_MSG(error, "Can't load dependency: " + external_resources[erindex].path + ".");
}
}
} else {
r_v = res;
}
}
r_v = external_resources[erindex].cache;
}
} break;
default: {
ERR_FAIL_V(ERR_FILE_CORRUPT);
@ -684,28 +682,13 @@ Error ResourceLoaderBinary::load() {
}
external_resources.write[i].path = path; //remap happens here, not on load because on load it can actually be used for filesystem dock resource remap
if (!use_sub_threads) {
external_resources.write[i].cache = ResourceLoader::load(path, external_resources[i].type);
if (external_resources[i].cache.is_null()) {
if (!ResourceLoader::get_abort_on_missing_resources()) {
ResourceLoader::notify_dependency_error(local_path, path, external_resources[i].type);
} else {
error = ERR_FILE_MISSING_DEPENDENCIES;
ERR_FAIL_V_MSG(error, "Can't load dependency: " + path + ".");
}
}
} else {
Error err = ResourceLoader::load_threaded_request(path, external_resources[i].type, use_sub_threads, ResourceFormatLoader::CACHE_MODE_REUSE, local_path);
if (err != OK) {
if (!ResourceLoader::get_abort_on_missing_resources()) {
ResourceLoader::notify_dependency_error(local_path, path, external_resources[i].type);
} else {
error = ERR_FILE_MISSING_DEPENDENCIES;
ERR_FAIL_V_MSG(error, "Can't load dependency: " + path + ".");
}
external_resources.write[i].load_token = ResourceLoader::_load_start(path, external_resources[i].type, use_sub_threads ? ResourceLoader::LOAD_THREAD_DISTRIBUTE : ResourceLoader::LOAD_THREAD_FROM_CURRENT, ResourceFormatLoader::CACHE_MODE_REUSE);
if (!external_resources[i].load_token.is_valid()) {
if (!ResourceLoader::get_abort_on_missing_resources()) {
ResourceLoader::notify_dependency_error(local_path, path, external_resources[i].type);
} else {
error = ERR_FILE_MISSING_DEPENDENCIES;
ERR_FAIL_V_MSG(error, "Can't load dependency: " + path + ".");
}
}
}

View File

@ -60,7 +60,7 @@ class ResourceLoaderBinary {
String path;
String type;
ResourceUID::ID uid = ResourceUID::INVALID_ID;
Ref<Resource> cache;
Ref<ResourceLoader::LoadToken> load_token;
};
bool using_named_scene_ids = false;

View File

@ -202,20 +202,71 @@ void ResourceFormatLoader::_bind_methods() {
///////////////////////////////////
// This should be robust enough to be called redundantly without issues.
void ResourceLoader::LoadToken::clear() {
thread_load_mutex.lock();
WorkerThreadPool::TaskID task_to_await = 0;
if (!local_path.is_empty()) { // Empty is used for the special case where the load task is not registered.
DEV_ASSERT(thread_load_tasks.has(local_path));
ThreadLoadTask &load_task = thread_load_tasks[local_path];
if (!load_task.awaited) {
task_to_await = load_task.task_id;
load_task.awaited = true;
}
thread_load_tasks.erase(local_path);
local_path.clear();
}
if (!user_path.is_empty()) {
DEV_ASSERT(user_load_tokens.has(user_path));
user_load_tokens.erase(user_path);
user_path.clear();
}
thread_load_mutex.unlock();
// If task is unused, await it here, locally, now the token data is consistent.
if (task_to_await) {
WorkerThreadPool::get_singleton()->wait_for_task_completion(task_to_await);
}
}
ResourceLoader::LoadToken::~LoadToken() {
clear();
}
Ref<Resource> ResourceLoader::_load(const String &p_path, const String &p_original_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error, bool p_use_sub_threads, float *r_progress) {
bool found = false;
load_nesting++;
if (load_paths_stack.size()) {
thread_load_mutex.lock();
HashMap<String, ThreadLoadTask>::Iterator E = thread_load_tasks.find(load_paths_stack[load_paths_stack.size() - 1]);
if (E) {
E->value.sub_tasks.insert(p_path);
}
thread_load_mutex.unlock();
}
load_paths_stack.push_back(p_path);
// Try all loaders and pick the first match for the type hint
bool found = false;
Ref<Resource> res;
for (int i = 0; i < loader_count; i++) {
if (!loader[i]->recognize_path(p_path, p_type_hint)) {
continue;
}
found = true;
Ref<Resource> res = loader[i]->load(p_path, !p_original_path.is_empty() ? p_original_path : p_path, r_error, p_use_sub_threads, r_progress, p_cache_mode);
if (res.is_null()) {
continue;
res = loader[i]->load(p_path, !p_original_path.is_empty() ? p_original_path : p_path, r_error, p_use_sub_threads, r_progress, p_cache_mode);
if (!res.is_null()) {
break;
}
}
load_paths_stack.resize(load_paths_stack.size() - 1);
load_nesting--;
if (!res.is_null()) {
return res;
}
@ -232,47 +283,60 @@ Ref<Resource> ResourceLoader::_load(const String &p_path, const String &p_origin
void ResourceLoader::_thread_load_function(void *p_userdata) {
ThreadLoadTask &load_task = *(ThreadLoadTask *)p_userdata;
load_task.loader_id = Thread::get_caller_id();
if (load_task.cond_var) {
//this is an actual thread, so wait for Ok from semaphore
thread_load_semaphore->wait(); //wait until its ok to start loading
thread_load_mutex.lock();
caller_task_id = load_task.task_id;
if (cleaning_tasks) {
load_task.status = THREAD_LOAD_FAILED;
thread_load_mutex.unlock();
return;
}
load_task.resource = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress);
thread_load_mutex.unlock();
// Thread-safe either if it's the current thread or a brand new one.
CallQueue *mq_override = nullptr;
if (load_nesting == 0) {
if (!load_task.dependent_path.is_empty()) {
load_paths_stack.push_back(load_task.dependent_path);
}
if (!Thread::is_main_thread()) {
mq_override = memnew(CallQueue);
MessageQueue::set_thread_singleton_override(mq_override);
}
} else {
DEV_ASSERT(load_task.dependent_path.is_empty());
}
// --
Ref<Resource> res = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress);
thread_load_mutex.lock();
load_task.resource = res;
load_task.progress = 1.0; //it was fully loaded at this point, so force progress to 1.0
thread_load_mutex->lock();
if (load_task.error != OK) {
load_task.status = THREAD_LOAD_FAILED;
} else {
load_task.status = THREAD_LOAD_LOADED;
}
if (load_task.cond_var) {
if (load_task.start_next && thread_waiting_count > 0) {
thread_waiting_count--;
//thread loading count remains constant, this ends but another one begins
thread_load_semaphore->post();
} else {
thread_loading_count--; //no threads waiting, just reduce loading count
}
print_lt("END: load count: " + itos(thread_loading_count) + " / wait count: " + itos(thread_waiting_count) + " / suspended count: " + itos(thread_suspended_count) + " / active: " + itos(thread_loading_count - thread_suspended_count));
load_task.cond_var->notify_all();
memdelete(load_task.cond_var);
load_task.cond_var = nullptr;
}
if (load_task.resource.is_valid()) {
load_task.resource->set_path(load_task.local_path);
if (load_task.cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
load_task.resource->set_path(load_task.local_path);
}
if (load_task.xl_remapped) {
load_task.resource->set_as_translation_remapped(true);
}
#ifdef TOOLS_ENABLED
load_task.resource->set_edited(false);
if (timestamp_on_load) {
uint64_t mt = FileAccess::get_modified_time(load_task.remapped_path);
@ -286,7 +350,12 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
}
}
thread_load_mutex->unlock();
thread_load_mutex.unlock();
if (load_nesting == 0 && mq_override) {
memdelete(mq_override);
MessageQueue::set_thread_singleton_override(nullptr);
}
}
static String _validate_local_path(const String &p_path) {
@ -299,91 +368,127 @@ static String _validate_local_path(const String &p_path) {
return ProjectSettings::get_singleton()->localize_path(p_path);
}
}
Error ResourceLoader::load_threaded_request(const String &p_path, const String &p_type_hint, bool p_use_sub_threads, ResourceFormatLoader::CacheMode p_cache_mode, const String &p_source_resource) {
String local_path = _validate_local_path(p_path);
thread_load_mutex->lock();
if (!p_source_resource.is_empty()) {
//must be loading from this resource
if (!thread_load_tasks.has(p_source_resource)) {
thread_load_mutex->unlock();
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "There is no thread loading source resource '" + p_source_resource + "'.");
}
//must not be already added as s sub tasks
if (thread_load_tasks[p_source_resource].sub_tasks.has(local_path)) {
thread_load_mutex->unlock();
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Thread loading source resource '" + p_source_resource + "' already is loading '" + local_path + "'.");
}
}
if (thread_load_tasks.has(local_path)) {
thread_load_tasks[local_path].requests++;
if (!p_source_resource.is_empty()) {
thread_load_tasks[p_source_resource].sub_tasks.insert(local_path);
}
thread_load_mutex->unlock();
Error ResourceLoader::load_threaded_request(const String &p_path, const String &p_type_hint, bool p_use_sub_threads, ResourceFormatLoader::CacheMode p_cache_mode) {
thread_load_mutex.lock();
if (user_load_tokens.has(p_path)) {
print_verbose("load_threaded_request(): Another threaded load for resource path '" + p_path + "' has been initiated. Not an error.");
user_load_tokens[p_path]->reference(); // Additional request.
thread_load_mutex.unlock();
return OK;
}
user_load_tokens[p_path] = nullptr;
thread_load_mutex.unlock();
Ref<ResourceLoader::LoadToken> token = _load_start(p_path, p_type_hint, p_use_sub_threads ? LOAD_THREAD_DISTRIBUTE : LOAD_THREAD_SPAWN_SINGLE, p_cache_mode);
if (token.is_valid()) {
thread_load_mutex.lock();
token->user_path = p_path;
token->reference(); // First request.
user_load_tokens[p_path] = token.ptr();
print_lt("REQUEST: user load tokens: " + itos(user_load_tokens.size()));
thread_load_mutex.unlock();
return OK;
} else {
return FAILED;
}
}
Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error) {
if (r_error) {
*r_error = OK;
}
Ref<LoadToken> load_token = _load_start(p_path, p_type_hint, LOAD_THREAD_FROM_CURRENT, p_cache_mode);
if (!load_token.is_valid()) {
if (r_error) {
*r_error = FAILED;
}
return Ref<Resource>();
}
Ref<Resource> res = _load_complete(*load_token.ptr(), r_error);
return res;
}
Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode) {
String local_path = _validate_local_path(p_path);
Ref<LoadToken> load_token;
bool must_not_register = false;
ThreadLoadTask unregistered_load_task; // Once set, must be valid up to the call to do the load.
ThreadLoadTask *load_task_ptr = nullptr;
bool run_on_current_thread = false;
{
MutexLock thread_load_lock(thread_load_mutex);
if (thread_load_tasks.has(local_path)) {
load_token = Ref<LoadToken>(thread_load_tasks[local_path].load_token);
if (!load_token.is_valid()) {
// The token is dying (reached 0 on another thread).
// Ensure it's killed now so the path can be safely reused right away.
thread_load_tasks[local_path].load_token->clear();
} else {
if (p_cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
return load_token;
}
}
}
load_token.instantiate();
load_token->local_path = local_path;
//create load task
{
ThreadLoadTask load_task;
ThreadLoadTask load_task;
load_task.requests = 1;
load_task.remapped_path = _path_remap(local_path, &load_task.xl_remapped);
load_task.local_path = local_path;
load_task.type_hint = p_type_hint;
load_task.cache_mode = p_cache_mode;
load_task.use_sub_threads = p_use_sub_threads;
{ //must check if resource is already loaded before attempting to load it in a thread
if (load_task.loader_id == Thread::get_caller_id()) {
thread_load_mutex->unlock();
ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Attempted to load a resource already being loaded from this thread, cyclic reference?");
load_task.remapped_path = _path_remap(local_path, &load_task.xl_remapped);
load_task.load_token = load_token.ptr();
load_task.local_path = local_path;
load_task.type_hint = p_type_hint;
load_task.cache_mode = p_cache_mode;
load_task.use_sub_threads = p_thread_mode == LOAD_THREAD_DISTRIBUTE;
if (p_cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
Ref<Resource> existing = ResourceCache::get_ref(local_path);
if (existing.is_valid()) {
//referencing is fine
load_task.resource = existing;
load_task.status = THREAD_LOAD_LOADED;
load_task.progress = 1.0;
thread_load_tasks[local_path] = load_task;
return load_token;
}
}
Ref<Resource> existing = ResourceCache::get_ref(local_path);
if (existing.is_valid()) {
//referencing is fine
load_task.resource = existing;
load_task.status = THREAD_LOAD_LOADED;
load_task.progress = 1.0;
// If we want to ignore cache, but there's another task loading it, we can't add this one to the map and we also have to finish unconditionally synchronously.
must_not_register = thread_load_tasks.has(local_path) && p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE;
if (must_not_register) {
load_token->local_path.clear();
unregistered_load_task = load_task;
} else {
thread_load_tasks[local_path] = load_task;
}
load_task_ptr = must_not_register ? &unregistered_load_task : &thread_load_tasks[local_path];
}
if (!p_source_resource.is_empty()) {
thread_load_tasks[p_source_resource].sub_tasks.insert(local_path);
}
run_on_current_thread = must_not_register || p_thread_mode == LOAD_THREAD_FROM_CURRENT;
thread_load_tasks[local_path] = load_task;
}
ThreadLoadTask &load_task = thread_load_tasks[local_path];
if (load_task.resource.is_null()) { //needs to be loaded in thread
load_task.cond_var = memnew(ConditionVariable);
if (thread_loading_count < thread_load_max) {
thread_loading_count++;
thread_load_semaphore->post(); //we have free threads, so allow one
if (run_on_current_thread) {
load_task_ptr->thread_id = Thread::get_caller_id();
if (must_not_register) {
load_token->res_if_unregistered = load_task_ptr->resource;
}
} else {
thread_waiting_count++;
load_task_ptr->task_id = WorkerThreadPool::get_singleton()->add_native_task(&ResourceLoader::_thread_load_function, load_task_ptr);
}
print_lt("REQUEST: load count: " + itos(thread_loading_count) + " / wait count: " + itos(thread_waiting_count) + " / suspended count: " + itos(thread_suspended_count) + " / active: " + itos(thread_loading_count - thread_suspended_count));
load_task.thread = memnew(Thread);
load_task.thread->start(_thread_load_function, &thread_load_tasks[local_path]);
load_task.loader_id = load_task.thread->get_id();
}
thread_load_mutex->unlock();
if (run_on_current_thread) {
_thread_load_function(load_task_ptr);
}
return OK;
return load_token;
}
float ResourceLoader::_dependency_get_progress(const String &p_path) {
@ -409,13 +514,22 @@ float ResourceLoader::_dependency_get_progress(const String &p_path) {
}
ResourceLoader::ThreadLoadStatus ResourceLoader::load_threaded_get_status(const String &p_path, float *r_progress) {
String local_path = _validate_local_path(p_path);
MutexLock thread_load_lock(thread_load_mutex);
thread_load_mutex->lock();
if (!thread_load_tasks.has(local_path)) {
thread_load_mutex->unlock();
if (!user_load_tokens.has(p_path)) {
print_verbose("load_threaded_get_status(): No threaded load for resource path '" + p_path + "' has been initiated or its result has already been collected.");
return THREAD_LOAD_INVALID_RESOURCE;
}
String local_path = _validate_local_path(p_path);
if (!thread_load_tasks.has(local_path)) {
#ifdef DEV_ENABLED
CRASH_NOW();
#endif
// On non-dev, be defensive and at least avoid crashing (at this point at least).
return THREAD_LOAD_INVALID_RESOURCE;
}
ThreadLoadTask &load_task = thread_load_tasks[local_path];
ThreadLoadStatus status;
status = load_task.status;
@ -423,198 +537,120 @@ ResourceLoader::ThreadLoadStatus ResourceLoader::load_threaded_get_status(const
*r_progress = _dependency_get_progress(local_path);
}
thread_load_mutex->unlock();
return status;
}
Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_error) {
String local_path = _validate_local_path(p_path);
MutexLock thread_load_lock(*thread_load_mutex);
if (!thread_load_tasks.has(local_path)) {
if (r_error) {
*r_error = ERR_INVALID_PARAMETER;
}
return Ref<Resource>();
if (r_error) {
*r_error = OK;
}
ThreadLoadTask &load_task = thread_load_tasks[local_path];
Ref<Resource> res;
{
MutexLock thread_load_lock(thread_load_mutex);
if (load_task.status == THREAD_LOAD_IN_PROGRESS) {
if (load_task.loader_id == Thread::get_caller_id()) {
// Load is in progress, but it's precisely this thread the one in charge.
// That means this is a cyclic load.
if (r_error) {
*r_error = ERR_BUSY;
}
return Ref<Resource>();
} else if (!load_task.cond_var) {
// Load is in progress, but a condition variable was never created for it.
// That happens when a load has been initiated with subthreads disabled,
// but now another load thread needs to interact with this one (either
// because of subthreads being used this time, or because it's simply a
// threaded load running on a different thread).
// Since we want to be notified when the load ends, we must create the
// condition variable now.
load_task.cond_var = memnew(ConditionVariable);
}
}
//cond var still exists, meaning it's still loading, request poll
if (load_task.cond_var) {
{
// As we got a cond var, this means we are going to have to wait
// until the sub-resource is done loading
//
// As this thread will become 'blocked' we should "exchange" its
// active status with a waiting one, to ensure load continues.
//
// This ensures loading is never blocked and that is also within
// the maximum number of active threads.
if (thread_waiting_count > 0) {
thread_waiting_count--;
thread_loading_count++;
thread_load_semaphore->post();
load_task.start_next = false; //do not start next since we are doing it here
}
thread_suspended_count++;
print_lt("GET: load count: " + itos(thread_loading_count) + " / wait count: " + itos(thread_waiting_count) + " / suspended count: " + itos(thread_suspended_count) + " / active: " + itos(thread_loading_count - thread_suspended_count));
}
bool still_valid = true;
bool was_thread = load_task.thread;
do {
load_task.cond_var->wait(thread_load_lock);
if (!thread_load_tasks.has(local_path)) { //may have been erased during unlock and this was always an invalid call
still_valid = false;
break;
}
} while (load_task.cond_var); // In case of spurious wakeup.
if (was_thread) {
thread_suspended_count--;
}
if (!still_valid) {
if (!user_load_tokens.has(p_path)) {
print_verbose("load_threaded_get(): No threaded load for resource path '" + p_path + "' has been initiated or its result has already been collected.");
if (r_error) {
*r_error = ERR_INVALID_PARAMETER;
}
return Ref<Resource>();
}
}
Ref<Resource> resource = load_task.resource;
if (r_error) {
*r_error = load_task.error;
}
load_task.requests--;
if (load_task.requests == 0) {
if (load_task.thread) { //thread may not have been used
load_task.thread->wait_to_finish();
memdelete(load_task.thread);
LoadToken *load_token = user_load_tokens[p_path];
if (!load_token) {
// This happens if requested from one thread and rapidly querying from another.
if (r_error) {
*r_error = ERR_BUSY;
}
return Ref<Resource>();
}
res = _load_complete_inner(*load_token, r_error, thread_load_lock);
if (load_token->unreference()) {
memdelete(load_token);
}
thread_load_tasks.erase(local_path);
}
return resource;
print_lt("GET: user load tokens: " + itos(user_load_tokens.size()));
return res;
}
Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error) {
Ref<Resource> ResourceLoader::_load_complete(LoadToken &p_load_token, Error *r_error) {
MutexLock thread_load_lock(thread_load_mutex);
return _load_complete_inner(p_load_token, r_error, thread_load_lock);
}
Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Error *r_error, MutexLock<SafeBinaryMutex<BINARY_MUTEX_TAG>> &p_thread_load_lock) {
if (r_error) {
*r_error = ERR_CANT_OPEN;
*r_error = OK;
}
String local_path = _validate_local_path(p_path);
if (p_cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
thread_load_mutex->lock();
//Is it already being loaded? poll until done
if (thread_load_tasks.has(local_path)) {
Error err = load_threaded_request(p_path, p_type_hint);
if (err != OK) {
if (r_error) {
*r_error = err;
}
thread_load_mutex->unlock();
return Ref<Resource>();
}
thread_load_mutex->unlock();
return load_threaded_get(p_path, r_error);
}
//Is it cached?
Ref<Resource> existing = ResourceCache::get_ref(local_path);
if (existing.is_valid()) {
thread_load_mutex->unlock();
if (!p_load_token.local_path.is_empty()) {
if (!thread_load_tasks.has(p_load_token.local_path)) {
#ifdef DEV_ENABLED
CRASH_NOW();
#endif
// On non-dev, be defensive and at least avoid crashing (at this point at least).
if (r_error) {
*r_error = OK;
*r_error = ERR_BUG;
}
return existing; //use cached
}
//load using task (but this thread)
ThreadLoadTask load_task;
load_task.requests = 1;
load_task.local_path = local_path;
load_task.remapped_path = _path_remap(local_path, &load_task.xl_remapped);
load_task.type_hint = p_type_hint;
load_task.cache_mode = p_cache_mode; //ignore
load_task.loader_id = Thread::get_caller_id();
thread_load_tasks[local_path] = load_task;
thread_load_mutex->unlock();
_thread_load_function(&thread_load_tasks[local_path]);
return load_threaded_get(p_path, r_error);
} else {
bool xl_remapped = false;
String path = _path_remap(local_path, &xl_remapped);
if (path.is_empty()) {
ERR_FAIL_V_MSG(Ref<Resource>(), "Remapping '" + local_path + "' failed.");
}
print_verbose("Loading resource: " + path);
float p;
Ref<Resource> res = _load(path, local_path, p_type_hint, p_cache_mode, r_error, false, &p);
if (res.is_null()) {
print_verbose("Failed loading resource: " + path);
return Ref<Resource>();
}
if (xl_remapped) {
res->set_as_translation_remapped(true);
ThreadLoadTask &load_task = thread_load_tasks[p_load_token.local_path];
if (load_task.status == THREAD_LOAD_IN_PROGRESS) {
DEV_ASSERT((load_task.task_id == 0) != (load_task.thread_id == 0));
if ((load_task.task_id != 0 && load_task.task_id == caller_task_id) ||
(load_task.thread_id != 0 && load_task.thread_id == Thread::get_caller_id())) {
// Load is in progress, but it's precisely this thread the one in charge.
// That means this is a cyclic load.
if (r_error) {
*r_error = ERR_BUSY;
}
return Ref<Resource>();
}
if (load_task.task_id != 0 && !load_task.awaited) {
// Loading thread is in the worker pool and still not awaited.
load_task.awaited = true;
thread_load_mutex.unlock();
WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
thread_load_mutex.lock();
} else {
// Loading thread is main or user thread, or in the worker pool, but already awaited by some other thread.
if (!load_task.cond_var) {
load_task.cond_var = memnew(ConditionVariable);
}
do {
load_task.cond_var->wait(p_thread_load_lock);
DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
} while (load_task.cond_var);
}
}
#ifdef TOOLS_ENABLED
res->set_edited(false);
if (timestamp_on_load) {
uint64_t mt = FileAccess::get_modified_time(path);
//printf("mt %s: %lli\n",remapped_path.utf8().get_data(),mt);
res->set_last_modified_time(mt);
if (cleaning_tasks) {
load_task.resource = Ref<Resource>();
load_task.error = FAILED;
}
#endif
return res;
Ref<Resource> resource = load_task.resource;
if (r_error) {
*r_error = load_task.error;
}
return resource;
} else {
// Special case of an unregistered task.
// The resource should have been loaded by now.
Ref<Resource> resource = p_load_token.res_if_unregistered;
if (!resource.is_valid()) {
if (r_error) {
*r_error = FAILED;
}
}
return resource;
}
}
@ -958,32 +994,42 @@ void ResourceLoader::clear_translation_remaps() {
}
void ResourceLoader::clear_thread_load_tasks() {
thread_load_mutex->lock();
// Bring the thing down as quickly as possible without causing deadlocks or leaks.
for (KeyValue<String, ResourceLoader::ThreadLoadTask> &E : thread_load_tasks) {
switch (E.value.status) {
case ResourceLoader::ThreadLoadStatus::THREAD_LOAD_LOADED: {
E.value.resource = Ref<Resource>();
} break;
thread_load_mutex.lock();
cleaning_tasks = true;
case ResourceLoader::ThreadLoadStatus::THREAD_LOAD_IN_PROGRESS: {
if (E.value.thread != nullptr) {
E.value.thread->wait_to_finish();
memdelete(E.value.thread);
E.value.thread = nullptr;
while (true) {
bool none_running = true;
if (thread_load_tasks.size()) {
for (KeyValue<String, ResourceLoader::ThreadLoadTask> &E : thread_load_tasks) {
if (E.value.status == THREAD_LOAD_IN_PROGRESS) {
if (E.value.cond_var) {
E.value.cond_var->notify_all();
memdelete(E.value.cond_var);
E.value.cond_var = nullptr;
}
none_running = false;
}
E.value.resource = Ref<Resource>();
} break;
case ResourceLoader::ThreadLoadStatus::THREAD_LOAD_FAILED:
default: {
// do nothing
}
}
if (none_running) {
break;
}
thread_load_mutex.unlock();
OS::get_singleton()->delay_usec(1000);
thread_load_mutex.lock();
}
for (KeyValue<String, LoadToken *> &E : user_load_tokens) {
memdelete(E.value);
}
user_load_tokens.clear();
thread_load_tasks.clear();
thread_load_mutex->unlock();
cleaning_tasks = false;
thread_load_mutex.unlock();
}
void ResourceLoader::load_path_remaps() {
@ -1080,20 +1126,14 @@ void ResourceLoader::remove_custom_loaders() {
}
}
void ResourceLoader::initialize() {
thread_load_mutex = memnew(SafeBinaryMutex<BINARY_MUTEX_TAG>);
thread_load_max = OS::get_singleton()->get_processor_count();
thread_loading_count = 0;
thread_waiting_count = 0;
thread_suspended_count = 0;
thread_load_semaphore = memnew(Semaphore);
bool ResourceLoader::is_cleaning_tasks() {
MutexLock lock(thread_load_mutex);
return cleaning_tasks;
}
void ResourceLoader::finalize() {
clear_thread_load_tasks();
memdelete(thread_load_mutex);
memdelete(thread_load_semaphore);
}
void ResourceLoader::initialize() {}
void ResourceLoader::finalize() {}
ResourceLoadErrorNotify ResourceLoader::err_notify = nullptr;
void *ResourceLoader::err_notify_ud = nullptr;
@ -1105,16 +1145,17 @@ bool ResourceLoader::create_missing_resources_if_class_unavailable = false;
bool ResourceLoader::abort_on_missing_resource = true;
bool ResourceLoader::timestamp_on_load = false;
thread_local int ResourceLoader::load_nesting = 0;
thread_local WorkerThreadPool::TaskID ResourceLoader::caller_task_id = 0;
thread_local Vector<String> ResourceLoader::load_paths_stack;
template <>
thread_local uint32_t SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG>::count = 0;
SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG> *ResourceLoader::thread_load_mutex = nullptr;
SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG> ResourceLoader::thread_load_mutex;
HashMap<String, ResourceLoader::ThreadLoadTask> ResourceLoader::thread_load_tasks;
Semaphore *ResourceLoader::thread_load_semaphore = nullptr;
bool ResourceLoader::cleaning_tasks = false;
int ResourceLoader::thread_loading_count = 0;
int ResourceLoader::thread_waiting_count = 0;
int ResourceLoader::thread_suspended_count = 0;
int ResourceLoader::thread_load_max = 0;
HashMap<String, ResourceLoader::LoadToken *> ResourceLoader::user_load_tokens;
SelfList<Resource>::List ResourceLoader::remapped_list;
HashMap<String, Vector<String>> ResourceLoader::translation_remaps;

View File

@ -34,6 +34,7 @@
#include "core/io/resource.h"
#include "core/object/gdvirtual.gen.inc"
#include "core/object/script_language.h"
#include "core/object/worker_thread_pool.h"
#include "core/os/semaphore.h"
#include "core/os/thread.h"
@ -107,9 +108,30 @@ public:
THREAD_LOAD_LOADED
};
enum LoadThreadMode {
LOAD_THREAD_FROM_CURRENT,
LOAD_THREAD_SPAWN_SINGLE,
LOAD_THREAD_DISTRIBUTE,
};
struct LoadToken : public RefCounted {
String local_path;
String user_path;
Ref<Resource> res_if_unregistered;
void clear();
virtual ~LoadToken();
};
static const int BINARY_MUTEX_TAG = 1;
static Ref<LoadToken> _load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode);
static Ref<Resource> _load_complete(LoadToken &p_load_token, Error *r_error);
private:
static Ref<Resource> _load_complete_inner(LoadToken &p_load_token, Error *r_error, MutexLock<SafeBinaryMutex<BINARY_MUTEX_TAG>> &p_thread_load_lock);
static Ref<ResourceFormatLoader> loader[MAX_LOADERS];
static int loader_count;
static bool timestamp_on_load;
@ -129,8 +151,7 @@ private:
static SelfList<Resource>::List remapped_list;
friend class ResourceFormatImporter;
friend class ResourceInteractiveLoader;
// Internal load function.
static Ref<Resource> _load(const String &p_path, const String &p_original_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error, bool p_use_sub_threads, float *r_progress);
static ResourceLoadedCallback _loaded_callback;
@ -138,11 +159,14 @@ private:
static Ref<ResourceFormatLoader> _find_custom_resource_format_loader(String path);
struct ThreadLoadTask {
Thread *thread = nullptr;
Thread::ID loader_id = 0;
ConditionVariable *cond_var = nullptr;
WorkerThreadPool::TaskID task_id = 0; // Used if run on a worker thread from the pool.
Thread::ID thread_id = 0; // Used if running on an user thread (e.g., simple non-threaded load).
bool awaited = false; // If it's in the pool, this helps not awaiting from more than one dependent thread.
ConditionVariable *cond_var = nullptr; // In not in the worker pool or already awaiting, this is used as a secondary awaiting mechanism.
LoadToken *load_token = nullptr;
String local_path;
String remapped_path;
String dependent_path;
String type_hint;
float progress = 0.0;
ThreadLoadStatus status = THREAD_LOAD_IN_PROGRESS;
@ -151,27 +175,29 @@ private:
Ref<Resource> resource;
bool xl_remapped = false;
bool use_sub_threads = false;
bool start_next = true;
int requests = 0;
HashSet<String> sub_tasks;
};
static void _thread_load_function(void *p_userdata);
static SafeBinaryMutex<BINARY_MUTEX_TAG> *thread_load_mutex;
static thread_local int load_nesting;
static thread_local WorkerThreadPool::TaskID caller_task_id;
static thread_local Vector<String> load_paths_stack;
static SafeBinaryMutex<BINARY_MUTEX_TAG> thread_load_mutex;
static HashMap<String, ThreadLoadTask> thread_load_tasks;
static Semaphore *thread_load_semaphore;
static int thread_waiting_count;
static int thread_loading_count;
static int thread_suspended_count;
static int thread_load_max;
static bool cleaning_tasks;
static HashMap<String, LoadToken *> user_load_tokens;
static float _dependency_get_progress(const String &p_path);
public:
static Error load_threaded_request(const String &p_path, const String &p_type_hint = "", bool p_use_sub_threads = false, ResourceFormatLoader::CacheMode p_cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE, const String &p_source_resource = String());
static Error load_threaded_request(const String &p_path, const String &p_type_hint = "", bool p_use_sub_threads = false, ResourceFormatLoader::CacheMode p_cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE);
static ThreadLoadStatus load_threaded_get_status(const String &p_path, float *r_progress = nullptr);
static Ref<Resource> load_threaded_get(const String &p_path, Error *r_error = nullptr);
static bool is_within_load() { return load_nesting > 0; };
static Ref<Resource> load(const String &p_path, const String &p_type_hint = "", ResourceFormatLoader::CacheMode p_cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE, Error *r_error = nullptr);
static bool exists(const String &p_path, const String &p_type_hint = "");
@ -237,6 +263,8 @@ public:
static void set_create_missing_resources_if_class_unavailable(bool p_enable);
_FORCE_INLINE_ static bool is_creating_missing_resources_if_class_unavailable_enabled() { return create_missing_resources_if_class_unavailable; }
static bool is_cleaning_tasks();
static void initialize();
static void finalize();
};

View File

@ -35,14 +35,23 @@
#include "core/object/class_db.h"
#include "core/object/script_language.h"
void CallQueue::_add_page() {
if (pages_used == page_messages.size()) {
pages.push_back(allocator->alloc());
page_messages.push_back(0);
#define LOCK_MUTEX \
if (this != MessageQueue::thread_singleton) { \
mutex.lock(); \
}
page_messages[pages_used] = 0;
#define UNLOCK_MUTEX \
if (this != MessageQueue::thread_singleton) { \
mutex.unlock(); \
}
void CallQueue::_add_page() {
if (pages_used == page_bytes.size()) {
pages.push_back(allocator->alloc());
page_bytes.push_back(0);
}
page_bytes[pages_used] = 0;
pages_used++;
page_offset = 0;
}
Error CallQueue::push_callp(ObjectID p_id, const StringName &p_method, const Variant **p_args, int p_argcount, bool p_show_error) {
@ -66,15 +75,15 @@ Error CallQueue::push_callablep(const Callable &p_callable, const Variant **p_ar
ERR_FAIL_COND_V_MSG(room_needed > uint32_t(PAGE_SIZE_BYTES), ERR_INVALID_PARAMETER, "Message is too large to fit on a page (" + itos(PAGE_SIZE_BYTES) + " bytes), consider passing less arguments.");
mutex.lock();
LOCK_MUTEX;
_ensure_first_page();
if ((page_offset + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if (pages_used == max_pages) {
ERR_PRINT("Failed method: " + p_callable + ". Message queue out of memory. " + error_text);
statistics();
mutex.unlock();
UNLOCK_MUTEX;
return ERR_OUT_OF_MEMORY;
}
_add_page();
@ -82,7 +91,7 @@ Error CallQueue::push_callablep(const Callable &p_callable, const Variant **p_ar
Page *page = pages[pages_used - 1];
uint8_t *buffer_end = &page->data[page_offset];
uint8_t *buffer_end = &page->data[page_bytes[pages_used - 1]];
Message *msg = memnew_placement(buffer_end, Message);
msg->args = p_argcount;
@ -104,21 +113,20 @@ Error CallQueue::push_callablep(const Callable &p_callable, const Variant **p_ar
*v = *p_args[i];
}
page_messages[pages_used - 1]++;
page_offset += room_needed;
page_bytes[pages_used - 1] += room_needed;
mutex.unlock();
UNLOCK_MUTEX;
return OK;
}
Error CallQueue::push_set(ObjectID p_id, const StringName &p_prop, const Variant &p_value) {
mutex.lock();
LOCK_MUTEX;
uint32_t room_needed = sizeof(Message) + sizeof(Variant);
_ensure_first_page();
if ((page_offset + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if (pages_used == max_pages) {
String type;
if (ObjectDB::get_instance(p_id)) {
@ -127,14 +135,14 @@ Error CallQueue::push_set(ObjectID p_id, const StringName &p_prop, const Variant
ERR_PRINT("Failed set: " + type + ":" + p_prop + " target ID: " + itos(p_id) + ". Message queue out of memory. " + error_text);
statistics();
mutex.unlock();
UNLOCK_MUTEX;
return ERR_OUT_OF_MEMORY;
}
_add_page();
}
Page *page = pages[pages_used - 1];
uint8_t *buffer_end = &page->data[page_offset];
uint8_t *buffer_end = &page->data[page_bytes[pages_used - 1]];
Message *msg = memnew_placement(buffer_end, Message);
msg->args = 1;
@ -146,32 +154,31 @@ Error CallQueue::push_set(ObjectID p_id, const StringName &p_prop, const Variant
Variant *v = memnew_placement(buffer_end, Variant);
*v = p_value;
page_messages[pages_used - 1]++;
page_offset += room_needed;
mutex.unlock();
page_bytes[pages_used - 1] += room_needed;
UNLOCK_MUTEX;
return OK;
}
Error CallQueue::push_notification(ObjectID p_id, int p_notification) {
ERR_FAIL_COND_V(p_notification < 0, ERR_INVALID_PARAMETER);
mutex.lock();
LOCK_MUTEX;
uint32_t room_needed = sizeof(Message);
_ensure_first_page();
if ((page_offset + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if (pages_used == max_pages) {
ERR_PRINT("Failed notification: " + itos(p_notification) + " target ID: " + itos(p_id) + ". Message queue out of memory. " + error_text);
statistics();
mutex.unlock();
UNLOCK_MUTEX;
return ERR_OUT_OF_MEMORY;
}
_add_page();
}
Page *page = pages[pages_used - 1];
uint8_t *buffer_end = &page->data[page_offset];
uint8_t *buffer_end = &page->data[page_bytes[pages_used - 1]];
Message *msg = memnew_placement(buffer_end, Message);
@ -180,9 +187,8 @@ Error CallQueue::push_notification(ObjectID p_id, int p_notification) {
//msg->target;
msg->notification = p_notification;
page_messages[pages_used - 1]++;
page_offset += room_needed;
mutex.unlock();
page_bytes[pages_used - 1] += room_needed;
UNLOCK_MUTEX;
return OK;
}
@ -205,26 +211,77 @@ void CallQueue::_call_function(const Callable &p_callable, const Variant *p_args
}
Error CallQueue::flush() {
mutex.lock();
LOCK_MUTEX;
// Non-main threads are not meant to be flushed, but appended to the main one.
if (this != MessageQueue::main_singleton) {
if (pages.size() == 0) {
return OK;
}
CallQueue *mq = MessageQueue::main_singleton;
DEV_ASSERT(!mq->allocator_is_custom && !allocator_is_custom); // Transferring pages is only safe if using the same alloator parameters.
mq->mutex.lock();
// Here we're transferring the data from this queue to the main one.
// However, it's very unlikely big amounts of messages will be queued here,
// so PagedArray/Pool would be overkill. Also, in most cases the data will fit
// an already existing page of the main queue.
// Let's see if our first (likely only) page fits the current target queue page.
uint32_t src_page = 0;
{
if (mq->pages_used) {
uint32_t dst_page = mq->pages_used - 1;
uint32_t dst_offset = mq->page_bytes[dst_page];
if (dst_offset + page_bytes[0] < uint32_t(PAGE_SIZE_BYTES)) {
memcpy(mq->pages[dst_page] + dst_offset, pages[0], page_bytes[0]);
src_page++;
}
}
}
// Any other possibly existing source page needs to be added.
if (mq->pages_used + (pages_used - src_page) > mq->max_pages) {
ERR_PRINT("Failed appending thread queue. Message queue out of memory. " + mq->error_text);
mq->statistics();
mq->mutex.unlock();
return ERR_OUT_OF_MEMORY;
}
for (; src_page < pages_used; src_page++) {
mq->_add_page();
memcpy(mq->pages[mq->pages_used - 1], pages[src_page], page_bytes[src_page]);
mq->page_bytes[mq->pages_used - 1] = page_bytes[src_page];
}
mq->mutex.unlock();
page_bytes[0] = 0;
pages_used = 1;
return OK;
}
if (pages.size() == 0) {
// Never allocated
mutex.unlock();
UNLOCK_MUTEX;
return OK; // Do nothing.
}
if (flushing) {
mutex.unlock();
UNLOCK_MUTEX;
return ERR_BUSY;
}
flushing = true;
uint32_t i = 0;
uint32_t j = 0;
uint32_t offset = 0;
while (i < pages_used && j < page_messages[i]) {
while (i < pages_used && offset < page_bytes[i]) {
Page *page = pages[i];
//lock on each iteration, so a call can re-add itself to the message queue
@ -241,7 +298,7 @@ Error CallQueue::flush() {
Object *target = message->callable.get_object();
mutex.unlock();
UNLOCK_MUTEX;
switch (message->type & FLAG_MASK) {
case TYPE_CALL: {
@ -272,35 +329,32 @@ Error CallQueue::flush() {
message->~Message();
mutex.lock();
j++;
if (j == page_messages[i]) {
j = 0;
LOCK_MUTEX;
if (offset == page_bytes[i]) {
i++;
offset = 0;
}
}
page_messages[0] = 0;
page_offset = 0;
page_bytes[0] = 0;
pages_used = 1;
flushing = false;
mutex.unlock();
UNLOCK_MUTEX;
return OK;
}
void CallQueue::clear() {
mutex.lock();
LOCK_MUTEX;
if (pages.size() == 0) {
mutex.unlock();
UNLOCK_MUTEX;
return; // Nothing to clear.
}
for (uint32_t i = 0; i < pages_used; i++) {
uint32_t offset = 0;
for (uint32_t j = 0; j < page_messages[i]; j++) {
while (offset < page_bytes[i]) {
Page *page = pages[i];
//lock on each iteration, so a call can re-add itself to the message queue
@ -312,7 +366,6 @@ void CallQueue::clear() {
advance += sizeof(Variant) * message->args;
}
//pre-advance so this function is reentrant
offset += advance;
if ((message->type & FLAG_MASK) != TYPE_NOTIFICATION) {
@ -327,14 +380,13 @@ void CallQueue::clear() {
}
pages_used = 1;
page_offset = 0;
page_messages[0] = 0;
page_bytes[0] = 0;
mutex.unlock();
UNLOCK_MUTEX;
}
void CallQueue::statistics() {
mutex.lock();
LOCK_MUTEX;
HashMap<StringName, int> set_count;
HashMap<int, int> notify_count;
HashMap<Callable, int> call_count;
@ -342,7 +394,7 @@ void CallQueue::statistics() {
for (uint32_t i = 0; i < pages_used; i++) {
uint32_t offset = 0;
for (uint32_t j = 0; j < page_messages[i]; j++) {
while (offset < page_bytes[i]) {
Page *page = pages[i];
//lock on each iteration, so a call can re-add itself to the message queue
@ -397,7 +449,6 @@ void CallQueue::statistics() {
null_count++;
}
//pre-advance so this function is reentrant
offset += advance;
if ((message->type & FLAG_MASK) != TYPE_NOTIFICATION) {
@ -426,7 +477,7 @@ void CallQueue::statistics() {
print_line("NOTIFY " + itos(E.key) + ": " + itos(E.value));
}
mutex.unlock();
UNLOCK_MUTEX;
}
bool CallQueue::is_flushing() const {
@ -437,7 +488,7 @@ bool CallQueue::has_messages() const {
if (pages_used == 0) {
return false;
}
if (pages_used == 1 && page_messages[0] == 0) {
if (pages_used == 1 && page_bytes[0] == 0) {
return false;
}
@ -473,16 +524,21 @@ CallQueue::~CallQueue() {
//////////////////////
MessageQueue *MessageQueue::singleton = nullptr;
CallQueue *MessageQueue::main_singleton = nullptr;
thread_local CallQueue *MessageQueue::thread_singleton = nullptr;
void MessageQueue::set_thread_singleton_override(CallQueue *p_thread_singleton) {
thread_singleton = p_thread_singleton;
}
MessageQueue::MessageQueue() :
CallQueue(nullptr,
int(GLOBAL_DEF_RST(PropertyInfo(Variant::INT, "memory/limits/message_queue/max_size_mb", PROPERTY_HINT_RANGE, "1,512,1,or_greater"), 32)) * 1024 * 1024 / PAGE_SIZE_BYTES,
"Message queue out of memory. Try increasing 'memory/limits/message_queue/max_size_mb' in project settings.") {
ERR_FAIL_COND_MSG(singleton != nullptr, "A MessageQueue singleton already exists.");
singleton = this;
ERR_FAIL_COND_MSG(main_singleton != nullptr, "A MessageQueue singleton already exists.");
main_singleton = this;
}
MessageQueue::~MessageQueue() {
singleton = nullptr;
main_singleton = nullptr;
}

View File

@ -70,10 +70,9 @@ private:
bool allocator_is_custom = false;
LocalVector<Page *> pages;
LocalVector<uint32_t> page_messages;
LocalVector<uint32_t> page_bytes;
uint32_t max_pages = 0;
uint32_t pages_used = 0;
uint32_t page_offset = 0;
bool flushing = false;
struct Message {
@ -88,7 +87,7 @@ private:
_FORCE_INLINE_ void _ensure_first_page() {
if (unlikely(pages.is_empty())) {
pages.push_back(allocator->alloc());
page_messages.push_back(0);
page_bytes.push_back(0);
pages_used = 1;
}
}
@ -153,10 +152,15 @@ public:
};
class MessageQueue : public CallQueue {
static MessageQueue *singleton;
static CallQueue *main_singleton;
static thread_local CallQueue *thread_singleton;
friend class CallQueue;
public:
_FORCE_INLINE_ static MessageQueue *get_singleton() { return singleton; }
_FORCE_INLINE_ static CallQueue *get_singleton() { return thread_singleton ? thread_singleton : main_singleton; }
static void set_thread_singleton_override(CallQueue *p_thread_singleton);
MessageQueue();
~MessageQueue();
};

View File

@ -119,8 +119,25 @@ class MutexLock {
public:
_ALWAYS_INLINE_ explicit MutexLock(const MutexT &p_mutex) :
lock(p_mutex.mutex){};
};
// This specialization is needed so manual locking and MutexLock can be used
// at the same time on a SafeBinaryMutex.
template <int Tag>
class MutexLock<SafeBinaryMutex<Tag>> {
friend class ConditionVariable;
std::unique_lock<std::mutex> lock;
public:
_ALWAYS_INLINE_ explicit MutexLock(const SafeBinaryMutex<Tag> &p_mutex) :
lock(p_mutex.mutex) {
}
SafeBinaryMutex<Tag>::count++;
};
_ALWAYS_INLINE_ ~MutexLock() {
SafeBinaryMutex<Tag>::count--;
};
};
using Mutex = MutexImpl<std::recursive_mutex>; // Recursive, for general use

View File

@ -66,11 +66,12 @@ void Thread::callback(ID p_caller_id, const Settings &p_settings, Callback p_cal
}
}
void Thread::start(Thread::Callback p_callback, void *p_user, const Settings &p_settings) {
ERR_FAIL_COND_MSG(id != UNASSIGNED_ID, "A Thread object has been re-started without wait_to_finish() having been called on it.");
Thread::ID Thread::start(Thread::Callback p_callback, void *p_user, const Settings &p_settings) {
ERR_FAIL_COND_V_MSG(id != UNASSIGNED_ID, UNASSIGNED_ID, "A Thread object has been re-started without wait_to_finish() having been called on it.");
id = id_counter.increment();
std::thread new_thread(&Thread::callback, id, p_settings, p_callback, p_user);
thread.swap(new_thread);
return id;
}
bool Thread::is_started() const {

View File

@ -109,7 +109,7 @@ public:
static Error set_name(const String &p_name);
void start(Thread::Callback p_callback, void *p_user, const Settings &p_settings = Settings());
ID start(Thread::Callback p_callback, void *p_user, const Settings &p_settings = Settings());
bool is_started() const;
///< waits until thread is finished, and deallocates it.
void wait_to_finish();

View File

@ -3446,6 +3446,8 @@ void Main::cleanup(bool p_force) {
movie_writer->end();
}
ResourceLoader::clear_thread_load_tasks();
ResourceLoader::remove_custom_loaders();
ResourceSaver::remove_custom_savers();
@ -3462,8 +3464,6 @@ void Main::cleanup(bool p_force) {
ResourceLoader::clear_translation_remaps();
ResourceLoader::clear_path_remaps();
ResourceLoader::clear_thread_load_tasks();
ScriptServer::finish_languages();
// Sync pending commands that may have been queued from a different thread during ScriptServer finalization

View File

@ -161,7 +161,7 @@ void CanvasItemMaterial::flush_changes() {
void CanvasItemMaterial::_queue_shader_change() {
MutexLock lock(material_mutex);
if (is_initialized && !element.in_list()) {
if (_is_initialized() && !element.in_list()) {
dirty_materials->add(&element);
}
}
@ -287,8 +287,8 @@ CanvasItemMaterial::CanvasItemMaterial() :
set_particles_anim_loop(false);
current_key.invalid_key = 1;
is_initialized = true;
_queue_shader_change();
_mark_initialized(callable_mp(this, &CanvasItemMaterial::_queue_shader_change));
}
CanvasItemMaterial::~CanvasItemMaterial() {

View File

@ -105,7 +105,6 @@ private:
_FORCE_INLINE_ void _queue_shader_change();
_FORCE_INLINE_ bool _is_shader_dirty() const;
bool is_initialized = false;
BlendMode blend_mode = BLEND_MODE_MIX;
LightMode light_mode = LIGHT_MODE_NORMAL;
bool particles_animation = false;

View File

@ -82,6 +82,23 @@ void Material::_validate_property(PropertyInfo &p_property) const {
}
}
void Material::_mark_initialized(const Callable &p_queue_shader_change_callable) {
// If this is happening as part of resource loading, it is not safe to queue the update
// as an addition to the dirty list, unless the load is happening on the main thread.
if (ResourceLoader::is_within_load() && Thread::get_caller_id() != Thread::get_main_id()) {
DEV_ASSERT(init_state != INIT_STATE_READY);
if (init_state == INIT_STATE_UNINITIALIZED) { // Prevent queueing twice.
// Queue an individual update of this material (the ResourceLoader knows how to handle deferred calls safely).
p_queue_shader_change_callable.call_deferred();
init_state = INIT_STATE_INITIALIZING;
}
} else {
// Straightforward conditions.
init_state = INIT_STATE_READY;
p_queue_shader_change_callable.callv(Array());
}
}
void Material::inspect_native_shader_code() {
SceneTree *st = Object::cast_to<SceneTree>(OS::get_singleton()->get_main_loop());
RID shader = get_shader_rid();
@ -1485,7 +1502,7 @@ void BaseMaterial3D::flush_changes() {
void BaseMaterial3D::_queue_shader_change() {
MutexLock lock(material_mutex);
if (is_initialized && !element.in_list()) {
if (_is_initialized() && !element.in_list()) {
dirty_materials->add(&element);
}
}
@ -3028,8 +3045,7 @@ BaseMaterial3D::BaseMaterial3D(bool p_orm) :
flags[FLAG_ALBEDO_TEXTURE_MSDF] = false;
flags[FLAG_USE_TEXTURE_REPEAT] = true;
is_initialized = true;
_queue_shader_change();
_mark_initialized(callable_mp(this, &BaseMaterial3D::_queue_shader_change));
}
BaseMaterial3D::~BaseMaterial3D() {

View File

@ -46,6 +46,12 @@ class Material : public Resource {
Ref<Material> next_pass;
int render_priority;
enum {
INIT_STATE_UNINITIALIZED,
INIT_STATE_INITIALIZING,
INIT_STATE_READY,
} init_state = INIT_STATE_UNINITIALIZED;
void inspect_native_shader_code();
protected:
@ -56,6 +62,9 @@ protected:
void _validate_property(PropertyInfo &p_property) const;
void _mark_initialized(const Callable &p_queue_shader_change_callable);
bool _is_initialized() { return init_state == INIT_STATE_READY; }
GDVIRTUAL0RC(RID, _get_shader_rid)
GDVIRTUAL0RC(Shader::Mode, _get_shader_mode)
GDVIRTUAL0RC(bool, _can_do_next_pass)
@ -452,7 +461,6 @@ private:
_FORCE_INLINE_ void _queue_shader_change();
_FORCE_INLINE_ bool _is_shader_dirty() const;
bool is_initialized = false;
bool orm;
Color albedo;

View File

@ -915,7 +915,7 @@ void ParticleProcessMaterial::flush_changes() {
void ParticleProcessMaterial::_queue_shader_change() {
MutexLock lock(material_mutex);
if (is_initialized && !element.in_list()) {
if (_is_initialized() && !element.in_list()) {
dirty_materials->add(&element);
}
}
@ -1889,8 +1889,7 @@ ParticleProcessMaterial::ParticleProcessMaterial() :
current_key.invalid_key = 1;
is_initialized = true;
_queue_shader_change();
_mark_initialized(callable_mp(this, &ParticleProcessMaterial::_queue_shader_change));
}
ParticleProcessMaterial::~ParticleProcessMaterial() {

View File

@ -261,7 +261,6 @@ private:
_FORCE_INLINE_ void _queue_shader_change();
_FORCE_INLINE_ bool _is_shader_dirty() const;
bool is_initialized = false;
Vector3 direction;
float spread = 0.0f;
float flatness = 0.0f;

View File

@ -150,32 +150,31 @@ Error ResourceLoaderText::_parse_ext_resource(VariantParser::Stream *p_stream, R
String path = ext_resources[id].path;
String type = ext_resources[id].type;
Ref<ResourceLoader::LoadToken> &load_token = ext_resources[id].load_token;
if (ext_resources[id].cache.is_valid()) {
r_res = ext_resources[id].cache;
} else if (use_sub_threads) {
Ref<Resource> res = ResourceLoader::load_threaded_get(path);
if (load_token.is_valid()) { // If not valid, it's OK since then we know this load accepts broken dependencies.
Ref<Resource> res = ResourceLoader::_load_complete(*load_token.ptr(), &err);
if (res.is_null()) {
if (ResourceLoader::get_abort_on_missing_resources()) {
error = ERR_FILE_MISSING_DEPENDENCIES;
error_text = "[ext_resource] referenced nonexistent resource at: " + path;
_printerr();
err = error;
} else {
ResourceLoader::notify_dependency_error(local_path, path, type);
if (!ResourceLoader::is_cleaning_tasks()) {
if (ResourceLoader::get_abort_on_missing_resources()) {
error = ERR_FILE_MISSING_DEPENDENCIES;
error_text = "[ext_resource] referenced non-existent resource at: " + path;
_printerr();
err = error;
} else {
ResourceLoader::notify_dependency_error(local_path, path, type);
}
}
} else {
ext_resources[id].cache = res;
#ifdef TOOLS_ENABLED
//remember ID for saving
res->set_id_for_path(path, id);
#endif
r_res = res;
}
} else {
error = ERR_FILE_MISSING_DEPENDENCIES;
error_text = "[ext_resource] referenced non-loaded resource at: " + path;
_printerr();
err = error;
r_res = Ref<Resource>();
}
} else {
r_res = Ref<Resource>();
}
VariantParser::get_token(p_stream, token, line, r_err_str);
@ -462,48 +461,20 @@ Error ResourceLoaderText::load() {
path = remaps[path];
}
ExtResource er;
er.path = path;
er.type = type;
if (use_sub_threads) {
Error err = ResourceLoader::load_threaded_request(path, type, use_sub_threads, ResourceFormatLoader::CACHE_MODE_REUSE, local_path);
if (err != OK) {
if (ResourceLoader::get_abort_on_missing_resources()) {
error = ERR_FILE_CORRUPT;
error_text = "[ext_resource] referenced broken resource at: " + path;
_printerr();
return error;
} else {
ResourceLoader::notify_dependency_error(local_path, path, type);
}
}
} else {
Ref<Resource> res = ResourceLoader::load(path, type);
if (res.is_null()) {
if (ResourceLoader::get_abort_on_missing_resources()) {
error = ERR_FILE_CORRUPT;
error_text = "[ext_resource] referenced nonexistent resource at: " + path;
_printerr();
return error;
} else {
ResourceLoader::notify_dependency_error(local_path, path, type);
}
ext_resources[id].path = path;
ext_resources[id].type = type;
ext_resources[id].load_token = ResourceLoader::_load_start(path, type, use_sub_threads ? ResourceLoader::LOAD_THREAD_DISTRIBUTE : ResourceLoader::LOAD_THREAD_FROM_CURRENT, ResourceFormatLoader::CACHE_MODE_REUSE);
if (!ext_resources[id].load_token.is_valid()) {
if (ResourceLoader::get_abort_on_missing_resources()) {
error = ERR_FILE_CORRUPT;
error_text = "[ext_resource] referenced non-existent resource at: " + path;
_printerr();
return error;
} else {
#ifdef TOOLS_ENABLED
//remember ID for saving
res->set_id_for_path(local_path, id);
#endif
ResourceLoader::notify_dependency_error(local_path, path, type);
}
er.cache = res;
}
ext_resources[id] = er;
error = VariantParser::parse_tag(&stream, lines, error_text, next_tag, &rp);
if (error) {

View File

@ -48,7 +48,7 @@ class ResourceLoaderText {
VariantParser::StreamFile stream;
struct ExtResource {
Ref<Resource> cache;
Ref<ResourceLoader::LoadToken> load_token;
String path;
String type;
};