Even the next up thread should sleep in the futex

Former-commit-id: 8b76b01bfe710603bcdc101da6eb27afcee7e1b1
This commit is contained in:
John Sully 2019-06-16 00:00:34 -04:00
parent 5573f021c5
commit 0cd9bc48fc
2 changed files with 4 additions and 11 deletions

View File

@ -121,12 +121,9 @@ extern "C" void fastlock_lock(struct fastlock *lock)
#endif
if ((++cloops % 1024*1024) == 0)
{
if (static_cast<uint16_t>(ticketT.m_active+1U) != myticket)
{
__atomic_fetch_or(&lock->futex, mask, __ATOMIC_ACQUIRE);
futex(&lock->m_ticket.u, FUTEX_WAIT_BITSET_PRIVATE, ticketT.u, nullptr, mask);
__atomic_fetch_and(&lock->futex, ~mask, __ATOMIC_RELEASE);
}
__atomic_fetch_or(&lock->futex, mask, __ATOMIC_ACQUIRE);
futex(&lock->m_ticket.u, FUTEX_WAIT_BITSET_PRIVATE, ticketT.u, nullptr, mask);
__atomic_fetch_and(&lock->futex, ~mask, __ATOMIC_RELEASE);
++g_longwaits;
}
}

View File

@ -44,11 +44,7 @@ ALIGN 16
; But the compiler doesn't know that we rarely hit this, and when we do we know the lock is
; taking a long time to be released anyways. We optimize for the common case of short
; lock intervals. That's why we're using a spinlock in the first place
inc edx
cmp dx, ax
je .LLoop
dec edx ; restore the current ticket
.LFutexWait:
; If we get here we're going to sleep in the kernel with a futex
push rsi
push rax
; Setup the syscall args