core/command_queue_mt: Fix crash/hang when buffer fills up
This patch fixes two related issues. One is the race condition in issue #42107..
The other is a crash which happens when the reader is lapped near the end of the buffer.
Backport of 48e8da4
to 3.2
This commit is contained in:
parent
074a098df6
commit
f994666bbe
@ -80,7 +80,7 @@ CommandQueueMT::SyncSemaphore *CommandQueueMT::_alloc_sync_sem() {
|
||||
|
||||
bool CommandQueueMT::dealloc_one() {
|
||||
tryagain:
|
||||
if (dealloc_ptr == write_ptr) {
|
||||
if (dealloc_ptr == (write_ptr_and_epoch >> 1)) {
|
||||
// The queue is empty
|
||||
return false;
|
||||
}
|
||||
@ -104,8 +104,8 @@ tryagain:
|
||||
|
||||
CommandQueueMT::CommandQueueMT(bool p_sync) {
|
||||
|
||||
read_ptr = 0;
|
||||
write_ptr = 0;
|
||||
read_ptr_and_epoch = 0;
|
||||
write_ptr_and_epoch = 0;
|
||||
dealloc_ptr = 0;
|
||||
mutex = Mutex::create();
|
||||
|
||||
|
@ -316,8 +316,8 @@ class CommandQueueMT {
|
||||
};
|
||||
|
||||
uint8_t *command_mem;
|
||||
uint32_t read_ptr;
|
||||
uint32_t write_ptr;
|
||||
uint32_t read_ptr_and_epoch;
|
||||
uint32_t write_ptr_and_epoch;
|
||||
uint32_t dealloc_ptr;
|
||||
uint32_t command_mem_size;
|
||||
SyncSemaphore sync_sems[SYNC_SEMAPHORES];
|
||||
@ -330,7 +330,11 @@ class CommandQueueMT {
|
||||
// alloc size is size+T+safeguard
|
||||
uint32_t alloc_size = ((sizeof(T) + 8 - 1) & ~(8 - 1)) + 8;
|
||||
|
||||
// Assert that the buffer is big enough to hold at least two messages.
|
||||
ERR_FAIL_COND_V(alloc_size * 2 + sizeof(uint32_t) > command_mem_size, NULL);
|
||||
|
||||
tryagain:
|
||||
uint32_t write_ptr = write_ptr_and_epoch >> 1;
|
||||
|
||||
if (write_ptr < dealloc_ptr) {
|
||||
// behind dealloc_ptr, check that there is room
|
||||
@ -362,8 +366,13 @@ class CommandQueueMT {
|
||||
// zero means, wrap to beginning
|
||||
|
||||
uint32_t *p = (uint32_t *)&command_mem[write_ptr];
|
||||
*p = 0;
|
||||
write_ptr = 0;
|
||||
*p = 1;
|
||||
write_ptr_and_epoch = 0 | (1 & ~write_ptr_and_epoch); // Invert epoch.
|
||||
// See if we can get the thread to run and clear up some more space while we wait.
|
||||
// This is required if alloc_size * 2 + 4 > COMMAND_MEM_SIZE
|
||||
if (sync) {
|
||||
sync->post();
|
||||
}
|
||||
goto tryagain;
|
||||
}
|
||||
}
|
||||
@ -377,6 +386,7 @@ class CommandQueueMT {
|
||||
// allocate the command
|
||||
T *cmd = memnew_placement(&command_mem[write_ptr], T);
|
||||
write_ptr += size;
|
||||
write_ptr_and_epoch = (write_ptr << 1) | (write_ptr_and_epoch & 1);
|
||||
return cmd;
|
||||
}
|
||||
|
||||
@ -402,17 +412,19 @@ class CommandQueueMT {
|
||||
tryagain:
|
||||
|
||||
// tried to read an empty queue
|
||||
if (read_ptr == write_ptr) {
|
||||
if (read_ptr_and_epoch == write_ptr_and_epoch) {
|
||||
if (p_lock) unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t read_ptr = read_ptr_and_epoch >> 1;
|
||||
uint32_t size_ptr = read_ptr;
|
||||
uint32_t size = *(uint32_t *)&command_mem[read_ptr] >> 1;
|
||||
|
||||
if (size == 0) {
|
||||
*(uint32_t *)&command_mem[read_ptr] = 0; // clear in-use bit.
|
||||
//end of ringbuffer, wrap
|
||||
read_ptr = 0;
|
||||
read_ptr_and_epoch = 0 | (1 & ~read_ptr_and_epoch); // Invert epoch.
|
||||
goto tryagain;
|
||||
}
|
||||
|
||||
@ -422,6 +434,8 @@ class CommandQueueMT {
|
||||
|
||||
read_ptr += size;
|
||||
|
||||
read_ptr_and_epoch = (read_ptr << 1) | (read_ptr_and_epoch & 1);
|
||||
|
||||
if (p_lock) unlock();
|
||||
cmd->call();
|
||||
if (p_lock) lock();
|
||||
|
Loading…
Reference in New Issue
Block a user