diff --git a/src/fastlock.cpp b/src/fastlock.cpp index 4d3327318..6be725f6d 100644 --- a/src/fastlock.cpp +++ b/src/fastlock.cpp @@ -121,12 +121,9 @@ extern "C" void fastlock_lock(struct fastlock *lock) #endif if ((++cloops % 1024*1024) == 0) { - if (static_cast(ticketT.m_active+1U) != myticket) - { - __atomic_fetch_or(&lock->futex, mask, __ATOMIC_ACQUIRE); - futex(&lock->m_ticket.u, FUTEX_WAIT_BITSET_PRIVATE, ticketT.u, nullptr, mask); - __atomic_fetch_and(&lock->futex, ~mask, __ATOMIC_RELEASE); - } + __atomic_fetch_or(&lock->futex, mask, __ATOMIC_ACQUIRE); + futex(&lock->m_ticket.u, FUTEX_WAIT_BITSET_PRIVATE, ticketT.u, nullptr, mask); + __atomic_fetch_and(&lock->futex, ~mask, __ATOMIC_RELEASE); ++g_longwaits; } } diff --git a/src/fastlock_x64.asm b/src/fastlock_x64.asm index f7d1a3093..baf33654f 100644 --- a/src/fastlock_x64.asm +++ b/src/fastlock_x64.asm @@ -44,11 +44,7 @@ ALIGN 16 ; But the compiler doesn't know that we rarely hit this, and when we do we know the lock is ; taking a long time to be released anyways. We optimize for the common case of short ; lock intervals. That's why we're using a spinlock in the first place - inc edx - cmp dx, ax - je .LLoop - dec edx ; restore the current ticket -.LFutexWait: + ; If we get here we're going to sleep in the kernel with a futex push rsi push rax ; Setup the syscall args