Fix deadlock in spinlock (mfence) also spin for less time before sleeping

Former-commit-id: 8a9045c028128ad1ceaaa7a482ae4d241592d164
This commit is contained in:
John Sully 2019-11-30 00:51:58 -05:00
parent dd5acf2c0d
commit 0860489563
2 changed files with 4 additions and 3 deletions

View File

@ -279,7 +279,7 @@ extern "C" void fastlock_lock(struct fastlock *lock)
int tid = gettid();
unsigned myticket = __atomic_fetch_add(&lock->m_ticket.m_avail, 1, __ATOMIC_RELEASE);
unsigned mask = (1U << (myticket % 32));
int cloops = 0;
unsigned cloops = 0;
ticket ticketT;
for (;;)
@ -293,7 +293,7 @@ extern "C" void fastlock_lock(struct fastlock *lock)
#elif defined(__arm__)
__asm__ __volatile__ ("yield");
#endif
if ((++cloops % 1024*1024) == 0)
if ((++cloops % 0x10000) == 0)
{
fastlock_sleep(lock, tid, ticketT.u, mask);
}

View File

@ -45,7 +45,7 @@ fastlock_lock:
cmp dx, ax # is our ticket up?
je .LLocked # leave the loop
pause
add ecx, 0x1000 # Have we been waiting a long time? (oflow if we have)
add ecx, 0x10000 # Have we been waiting a long time? (oflow if we have)
# 1000h is set so we overflow on the 1024*1024'th iteration (like the C code)
jnc .LLoop # If so, give up our timeslice to someone who's doing real work
# Like the compiler, you're probably thinking: "Hey! I should take these pushs out of the loop"
@ -140,6 +140,7 @@ fastlock_unlock:
mov ecx, [rdi+64] # get current active (this one)
inc ecx # bump it to the next thread
mov [rdi+64], cx # give up our ticket (note: lock is not required here because the spinlock itself guards this variable)
mfence # sync other threads
# At this point the lock is removed, however we must wake up any pending futexs
mov r9d, 1 # eax is the bitmask for 2 threads
rol r9d, cl # place the mask in the right spot for the next 2 threads