Fix multiple issues in WorkerThreadPool

- Fix project settings being ignored.
- Made usages of `native_thread_allocator` thread-safe.
- Remove redundant thread-safety from `low_priority_threads_used`, `exit_threads`.
- Fix deadlock due to unintended extra lock of `task_mutex`.
This commit is contained in:
Pedro J. Estébanez 2023-05-11 12:24:59 +02:00
parent f717cc0a38
commit 9077bb9232
5 changed files with 32 additions and 21 deletions

View File

@ -140,9 +140,9 @@ void WorkerThreadPool::_process_task(Task *p_task) {
task_queue.add_last(&low_prio_task->task_elem);
post = true;
} else {
low_priority_threads_used.decrement();
low_priority_threads_used--;
}
task_mutex.lock();
task_mutex.unlock();
if (post) {
task_available_semaphore.post();
}
@ -152,7 +152,7 @@ void WorkerThreadPool::_process_task(Task *p_task) {
void WorkerThreadPool::_thread_function(void *p_user) {
while (true) {
singleton->task_available_semaphore.wait();
if (singleton->exit_threads.is_set()) {
if (singleton->exit_threads) {
break;
}
singleton->_process_task_queue();
@ -168,14 +168,13 @@ void WorkerThreadPool::_post_task(Task *p_task, bool p_high_priority) {
task_mutex.lock();
p_task->low_priority = !p_high_priority;
if (!p_high_priority && use_native_low_priority_threads) {
task_mutex.unlock();
p_task->low_priority_thread = native_thread_allocator.alloc();
task_mutex.unlock();
p_task->low_priority_thread->start(_native_low_priority_thread_function, p_task); // Pask task directly to thread.
} else if (p_high_priority || low_priority_threads_used.get() < max_low_priority_threads) {
} else if (p_high_priority || low_priority_threads_used < max_low_priority_threads) {
task_queue.add_last(&p_task->task_elem);
if (!p_high_priority) {
low_priority_threads_used.increment();
low_priority_threads_used++;
}
task_mutex.unlock();
task_available_semaphore.post();
@ -251,6 +250,8 @@ void WorkerThreadPool::wait_for_task_completion(TaskID p_task_id) {
if (use_native_low_priority_threads && task->low_priority) {
task->low_priority_thread->wait_to_finish();
task_mutex.lock();
native_thread_allocator.free(task->low_priority_thread);
} else {
int *index = thread_ids.getptr(Thread::get_caller_id());
@ -272,9 +273,10 @@ void WorkerThreadPool::wait_for_task_completion(TaskID p_task_id) {
} else {
task->done_semaphore.wait();
}
task_mutex.lock();
}
task_mutex.lock();
tasks.erase(p_task_id);
task_allocator.free(task);
task_mutex.unlock();
@ -379,8 +381,8 @@ void WorkerThreadPool::wait_for_group_task_completion(GroupID p_group) {
if (group->low_priority_native_tasks.size() > 0) {
for (Task *task : group->low_priority_native_tasks) {
task->low_priority_thread->wait_to_finish();
native_thread_allocator.free(task->low_priority_thread);
task_mutex.lock();
native_thread_allocator.free(task->low_priority_thread);
task_allocator.free(task);
task_mutex.unlock();
}
@ -443,7 +445,7 @@ void WorkerThreadPool::finish() {
}
task_mutex.unlock();
exit_threads.set_to(true);
exit_threads = true;
for (uint32_t i = 0; i < threads.size(); i++) {
task_available_semaphore.post();

View File

@ -107,7 +107,7 @@ private:
};
TightLocalVector<ThreadData> threads;
SafeFlag exit_threads;
bool exit_threads = false;
HashMap<Thread::ID, int> thread_ids;
HashMap<TaskID, Task *> tasks;
@ -115,7 +115,7 @@ private:
bool use_native_low_priority_threads = false;
uint32_t max_low_priority_threads = 0;
SafeNumeric<uint32_t> low_priority_threads_used;
uint32_t low_priority_threads_used = 0;
uint64_t last_task = 1;

View File

@ -302,15 +302,9 @@ void register_core_settings() {
GLOBAL_DEF_RST(PropertyInfo(Variant::INT, "network/limits/packet_peer_stream/max_buffer_po2", PROPERTY_HINT_RANGE, "0,64,1,or_greater"), (16));
GLOBAL_DEF(PropertyInfo(Variant::STRING, "network/tls/certificate_bundle_override", PROPERTY_HINT_FILE, "*.crt"), "");
int worker_threads = GLOBAL_DEF("threading/worker_pool/max_threads", -1);
bool low_priority_use_system_threads = GLOBAL_DEF("threading/worker_pool/use_system_threads_for_low_priority_tasks", true);
float low_property_ratio = GLOBAL_DEF("threading/worker_pool/low_priority_thread_ratio", 0.3);
if (Engine::get_singleton()->is_editor_hint() || Engine::get_singleton()->is_project_manager_hint()) {
worker_thread_pool->init();
} else {
worker_thread_pool->init(worker_threads, low_priority_use_system_threads, low_property_ratio);
}
GLOBAL_DEF("threading/worker_pool/max_threads", -1);
GLOBAL_DEF("threading/worker_pool/use_system_threads_for_low_priority_tasks", true);
GLOBAL_DEF("threading/worker_pool/low_priority_thread_ratio", 0.3);
}
void register_core_singletons() {

View File

@ -1454,6 +1454,19 @@ Error Main::setup(const char *execpath, int argc, char *argv[], bool p_second_ph
#endif
}
// Initialize WorkerThreadPool.
{
int worker_threads = GLOBAL_GET("threading/worker_pool/max_threads");
bool low_priority_use_system_threads = GLOBAL_GET("threading/worker_pool/use_system_threads_for_low_priority_tasks");
float low_property_ratio = GLOBAL_GET("threading/worker_pool/low_priority_thread_ratio");
if (editor || project_manager) {
WorkerThreadPool::get_singleton()->init();
} else {
WorkerThreadPool::get_singleton()->init(worker_threads, low_priority_use_system_threads, low_property_ratio);
}
}
// Initialize user data dir.
OS::get_singleton()->ensure_user_data_dir();

View File

@ -135,6 +135,8 @@ int test_main(int argc, char *argv[]) {
OS::get_singleton()->set_cmdline("", args, List<String>());
DisplayServerMock::register_mock_driver();
WorkerThreadPool::get_singleton()->init();
// Run custom test tools.
if (test_commands) {
for (const KeyValue<String, TestFunc> &E : (*test_commands)) {