Merge branch 'unstable' into advanced_db

Former-commit-id: d1ae8f2d1b03c0a79a36d710e4bb6594c37028b4
This commit is contained in:
John Sully 2019-11-30 00:52:33 -05:00
commit b27fd8dfb1
2 changed files with 7 additions and 4 deletions

View File

@ -279,7 +279,7 @@ extern "C" void fastlock_lock(struct fastlock *lock)
int tid = gettid();
unsigned myticket = __atomic_fetch_add(&lock->m_ticket.m_avail, 1, __ATOMIC_RELEASE);
unsigned mask = (1U << (myticket % 32));
int cloops = 0;
unsigned cloops = 0;
ticket ticketT;
for (;;)
@ -289,9 +289,11 @@ extern "C" void fastlock_lock(struct fastlock *lock)
break;
#if defined(__i386__) || defined(__amd64__)
__asm__ ("pause");
__asm__ __volatile__ ("pause");
#elif defined(__arm__)
__asm__ __volatile__ ("yield");
#endif
if ((++cloops % 1024*1024) == 0)
if ((++cloops % 0x10000) == 0)
{
fastlock_sleep(lock, tid, ticketT.u, mask);
}

View File

@ -45,7 +45,7 @@ fastlock_lock:
cmp dx, ax # is our ticket up?
je .LLocked # leave the loop
pause
add ecx, 0x1000 # Have we been waiting a long time? (oflow if we have)
add ecx, 0x10000 # Have we been waiting a long time? (oflow if we have)
# 1000h is set so we overflow on the 1024*1024'th iteration (like the C code)
jnc .LLoop # If so, give up our timeslice to someone who's doing real work
# Like the compiler, you're probably thinking: "Hey! I should take these pushs out of the loop"
@ -140,6 +140,7 @@ fastlock_unlock:
mov ecx, [rdi+64] # get current active (this one)
inc ecx # bump it to the next thread
mov [rdi+64], cx # give up our ticket (note: lock is not required here because the spinlock itself guards this variable)
mfence # sync other threads
# At this point the lock is removed, however we must wake up any pending futexs
mov r9d, 1 # eax is the bitmask for 2 threads
rol r9d, cl # place the mask in the right spot for the next 2 threads