Change fastlock implementation from naive spinlock to a ticket lock for better fairness and lower peak latency
This commit is contained in:
parent
02e7fe400c
commit
942510f1a5
@ -1,3 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) 2019, John Sully <john at eqalpha dot com>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Redis nor the names of its contributors may be used
|
||||
* to endorse or promote products derived from this software without
|
||||
* specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "fastlock.h"
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
@ -5,6 +34,13 @@
|
||||
#include <sched.h>
|
||||
#include <atomic>
|
||||
|
||||
/****************************************************
|
||||
*
|
||||
* Implementation of a fair spinlock. To promote fairness we
|
||||
* use a ticket lock instead of a raw spinlock
|
||||
*
|
||||
****************************************************/
|
||||
|
||||
static_assert(sizeof(pid_t) <= sizeof(fastlock::m_pidOwner), "fastlock::m_pidOwner not large enough");
|
||||
|
||||
static pid_t gettid()
|
||||
@ -17,22 +53,25 @@ static pid_t gettid()
|
||||
|
||||
extern "C" void fastlock_init(struct fastlock *lock)
|
||||
{
|
||||
lock->m_lock = 0;
|
||||
lock->m_active = 0;
|
||||
lock->m_avail = 0;
|
||||
lock->m_depth = 0;
|
||||
}
|
||||
|
||||
extern "C" void fastlock_lock(struct fastlock *lock)
|
||||
{
|
||||
if (!__sync_bool_compare_and_swap(&lock->m_lock, 0, 1))
|
||||
if ((int)__atomic_load_4(&lock->m_pidOwner, __ATOMIC_ACQUIRE) == gettid())
|
||||
{
|
||||
if (lock->m_pidOwner == gettid())
|
||||
{
|
||||
++lock->m_depth;
|
||||
return;
|
||||
}
|
||||
++lock->m_depth;
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned myticket = __atomic_fetch_add(&lock->m_avail, 1, __ATOMIC_ACQ_REL);
|
||||
|
||||
if (__atomic_load_4(&lock->m_active, __ATOMIC_ACQUIRE) != myticket)
|
||||
{
|
||||
int cloops = 1;
|
||||
while (!__sync_bool_compare_and_swap(&lock->m_lock, 0, 1))
|
||||
while (__atomic_load_4(&lock->m_active, __ATOMIC_ACQUIRE) != myticket)
|
||||
{
|
||||
if ((++cloops % 1024*1024) == 0)
|
||||
sched_yield();
|
||||
@ -40,7 +79,8 @@ extern "C" void fastlock_lock(struct fastlock *lock)
|
||||
}
|
||||
|
||||
lock->m_depth = 1;
|
||||
lock->m_pidOwner = gettid();
|
||||
__atomic_store_4(&lock->m_pidOwner, gettid(), __ATOMIC_RELEASE);
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
extern "C" void fastlock_unlock(struct fastlock *lock)
|
||||
@ -50,8 +90,7 @@ extern "C" void fastlock_unlock(struct fastlock *lock)
|
||||
{
|
||||
lock->m_pidOwner = -1;
|
||||
__sync_synchronize();
|
||||
if (!__sync_bool_compare_and_swap(&lock->m_lock, 1, 0))
|
||||
*((volatile int*)0) = -1;
|
||||
__atomic_fetch_add(&lock->m_active, 1, __ATOMIC_ACQ_REL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -64,10 +103,5 @@ extern "C" void fastlock_free(struct fastlock *lock)
|
||||
|
||||
bool fastlock::fOwnLock()
|
||||
{
|
||||
if (__sync_bool_compare_and_swap(&m_lock, 0, 1))
|
||||
{
|
||||
__sync_bool_compare_and_swap(&m_lock, 1, 0);
|
||||
return false; // it was never locked
|
||||
}
|
||||
return gettid() == m_pidOwner;
|
||||
}
|
@ -18,7 +18,9 @@ void fastlock_free(struct fastlock *lock);
|
||||
|
||||
struct fastlock
|
||||
{
|
||||
volatile int m_lock;
|
||||
volatile unsigned m_active;
|
||||
volatile unsigned m_avail;
|
||||
|
||||
volatile int m_pidOwner;
|
||||
volatile int m_depth;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user