Add support for sanitizers and fix issues they have found

Former-commit-id: 58971a430dd3e4e8c39c7b5288211bc2ba7699ed
This commit is contained in:
John Sully 2019-07-18 18:39:42 -04:00
parent bd3cc0bb95
commit 5cb7ec841a
7 changed files with 147 additions and 24 deletions

View File

@ -45,6 +45,17 @@ endif
endif endif
USEASM?=true USEASM?=true
ifneq ($(SANITIZE),)
CC=clang
CXX=clang++
CFLAGS+= -fsanitize=$(SANITIZE) -DSANITIZE
CXXFLAGS+= -fsanitize=$(SANITIZE) -DSANITIZE
LDFLAGS+= -fsanitize=$(SANITIZE)
MALLOC=libc
USEASM=false
endif
# Do we use our assembly spinlock? X64 only # Do we use our assembly spinlock? X64 only
ifeq ($(uname_S),Linux) ifeq ($(uname_S),Linux)
ifeq ($(uname_M),x86_64) ifeq ($(uname_M),x86_64)

View File

@ -57,6 +57,7 @@
#define UNUSED(x) ((void)x) #define UNUSED(x) ((void)x)
#endif #endif
/**************************************************** /****************************************************
* *
* Implementation of a fair spinlock. To promote fairness we * Implementation of a fair spinlock. To promote fairness we
@ -64,12 +65,74 @@
* *
****************************************************/ ****************************************************/
#if !defined(__has_feature)
#define __has_feature(x) 0
#endif
#if __has_feature(thread_sanitizer)
/* Report that a lock has been created at address "lock". */
#define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" is about to be destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
#if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
#if defined(__GNUC__)
#define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
#else
/* TODO(glider): for Windows support we may want to change this macro in order
to prepend __declspec(selectany) to the annotations' declarations. */
#error weak annotations are not supported for your compiler
#endif
#else
#define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
#endif
extern "C" {
void AnnotateRWLockCreate(
const char *file, int line,
const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockDestroy(
const char *file, int line,
const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockAcquired(
const char *file, int line,
const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockReleased(
const char *file, int line,
const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
}
#else
#define ANNOTATE_RWLOCK_CREATE(lock)
#define ANNOTATE_RWLOCK_DESTROY(lock)
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
#endif
static_assert(sizeof(pid_t) <= sizeof(fastlock::m_pidOwner), "fastlock::m_pidOwner not large enough"); static_assert(sizeof(pid_t) <= sizeof(fastlock::m_pidOwner), "fastlock::m_pidOwner not large enough");
uint64_t g_longwaits = 0; uint64_t g_longwaits = 0;
uint64_t fastlock_getlongwaitcount() uint64_t fastlock_getlongwaitcount()
{ {
return g_longwaits; uint64_t rval;
__atomic_load(&g_longwaits, &rval, __ATOMIC_RELAXED);
return rval;
} }
#ifndef ASM_SPINLOCK #ifndef ASM_SPINLOCK
@ -107,12 +170,15 @@ extern "C" void fastlock_init(struct fastlock *lock)
lock->m_depth = 0; lock->m_depth = 0;
lock->m_pidOwner = -1; lock->m_pidOwner = -1;
lock->futex = 0; lock->futex = 0;
ANNOTATE_RWLOCK_CREATE(lock);
} }
#ifndef ASM_SPINLOCK #ifndef ASM_SPINLOCK
extern "C" void fastlock_lock(struct fastlock *lock) extern "C" void fastlock_lock(struct fastlock *lock)
{ {
if ((int)__atomic_load_4(&lock->m_pidOwner, __ATOMIC_ACQUIRE) == gettid()) int pidOwner;
__atomic_load(&lock->m_pidOwner, &pidOwner, __ATOMIC_ACQUIRE);
if (pidOwner == gettid())
{ {
++lock->m_depth; ++lock->m_depth;
return; return;
@ -124,8 +190,12 @@ extern "C" void fastlock_lock(struct fastlock *lock)
#endif #endif
int cloops = 0; int cloops = 0;
ticket ticketT; ticket ticketT;
while (((ticketT.u = __atomic_load_4(&lock->m_ticket.m_active, __ATOMIC_ACQUIRE)) & 0xffff) != myticket) for (;;)
{ {
__atomic_load(&lock->m_ticket.u, &ticketT.u, __ATOMIC_ACQUIRE);
if ((ticketT.u & 0xffff) == myticket)
break;
#if defined(__i386__) || defined(__amd64__) #if defined(__i386__) || defined(__amd64__)
__asm__ ("pause"); __asm__ ("pause");
#endif #endif
@ -136,28 +206,34 @@ extern "C" void fastlock_lock(struct fastlock *lock)
futex(&lock->m_ticket.u, FUTEX_WAIT_BITSET_PRIVATE, ticketT.u, nullptr, mask); futex(&lock->m_ticket.u, FUTEX_WAIT_BITSET_PRIVATE, ticketT.u, nullptr, mask);
__atomic_fetch_and(&lock->futex, ~mask, __ATOMIC_RELEASE); __atomic_fetch_and(&lock->futex, ~mask, __ATOMIC_RELEASE);
#endif #endif
++g_longwaits; __atomic_fetch_add(&g_longwaits, 1, __ATOMIC_RELAXED);
} }
} }
lock->m_depth = 1; lock->m_depth = 1;
__atomic_store_4(&lock->m_pidOwner, gettid(), __ATOMIC_RELEASE); int tid = gettid();
__atomic_store(&lock->m_pidOwner, &tid, __ATOMIC_RELEASE);
ANNOTATE_RWLOCK_ACQUIRED(lock, true);
std::atomic_thread_fence(std::memory_order_acquire); std::atomic_thread_fence(std::memory_order_acquire);
} }
extern "C" int fastlock_trylock(struct fastlock *lock, int fWeak) extern "C" int fastlock_trylock(struct fastlock *lock, int fWeak)
{ {
if ((int)__atomic_load_4(&lock->m_pidOwner, __ATOMIC_ACQUIRE) == gettid()) int tid;
__atomic_load(&lock->m_pidOwner, &tid, __ATOMIC_ACQUIRE);
if (tid == gettid())
{ {
++lock->m_depth; ++lock->m_depth;
return true; return true;
} }
// cheap test // cheap test
if (lock->m_ticket.m_active != lock->m_ticket.m_avail) struct ticket ticketT;
__atomic_load(&lock->m_ticket.u, &ticketT.u, __ATOMIC_ACQUIRE);
if (ticketT.m_active != ticketT.m_avail)
return false; return false;
uint16_t active = __atomic_load_2(&lock->m_ticket.m_active, __ATOMIC_RELAXED); uint16_t active = ticketT.m_active;
uint16_t next = active + 1; uint16_t next = active + 1;
struct ticket ticket_expect { { { active, active } } }; struct ticket ticket_expect { { { active, active } } };
@ -165,7 +241,9 @@ extern "C" int fastlock_trylock(struct fastlock *lock, int fWeak)
if (__atomic_compare_exchange(&lock->m_ticket, &ticket_expect, &ticket_setiflocked, fWeak /*weak*/, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) if (__atomic_compare_exchange(&lock->m_ticket, &ticket_expect, &ticket_setiflocked, fWeak /*weak*/, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
{ {
lock->m_depth = 1; lock->m_depth = 1;
__atomic_store_4(&lock->m_pidOwner, gettid(), __ATOMIC_RELEASE); tid = gettid();
__atomic_store(&lock->m_pidOwner, &tid, __ATOMIC_RELEASE);
ANNOTATE_RWLOCK_ACQUIRED(lock, true);
return true; return true;
} }
return false; return false;
@ -176,13 +254,20 @@ extern "C" int fastlock_trylock(struct fastlock *lock, int fWeak)
void unlock_futex(struct fastlock *lock, uint16_t ifutex) void unlock_futex(struct fastlock *lock, uint16_t ifutex)
{ {
unsigned mask = (1U << (ifutex % 32)); unsigned mask = (1U << (ifutex % 32));
unsigned futexT = __atomic_load_4(&lock->futex, __ATOMIC_RELAXED) & mask; unsigned futexT;
__atomic_load(&lock->futex, &futexT, __ATOMIC_RELAXED);
futexT &= mask;
if (futexT == 0) if (futexT == 0)
return; return;
while (__atomic_load_4(&lock->futex, __ATOMIC_ACQUIRE) & mask) for (;;)
{ {
__atomic_load(&lock->futex, &futexT, __ATOMIC_ACQUIRE);
futexT &= mask;
if (!futexT)
break;
if (futex(&lock->m_ticket.u, FUTEX_WAKE_BITSET_PRIVATE, INT_MAX, nullptr, mask) == 1) if (futex(&lock->m_ticket.u, FUTEX_WAKE_BITSET_PRIVATE, INT_MAX, nullptr, mask) == 1)
break; break;
} }
@ -194,9 +279,13 @@ extern "C" void fastlock_unlock(struct fastlock *lock)
--lock->m_depth; --lock->m_depth;
if (lock->m_depth == 0) if (lock->m_depth == 0)
{ {
assert((int)__atomic_load_4(&lock->m_pidOwner, __ATOMIC_RELAXED) >= 0); // unlock after free int pidT;
lock->m_pidOwner = -1; __atomic_load(&lock->m_pidOwner, &pidT, __ATOMIC_RELAXED);
assert(pidT >= 0); // unlock after free
int t = -1;
__atomic_store(&lock->m_pidOwner, &t, __ATOMIC_RELEASE);
std::atomic_thread_fence(std::memory_order_release); std::atomic_thread_fence(std::memory_order_release);
ANNOTATE_RWLOCK_RELEASED(lock, true);
uint16_t activeNew = __atomic_add_fetch(&lock->m_ticket.m_active, 1, __ATOMIC_RELEASE); // on x86 the atomic is not required here, but ASM handles that case uint16_t activeNew = __atomic_add_fetch(&lock->m_ticket.m_active, 1, __ATOMIC_RELEASE); // on x86 the atomic is not required here, but ASM handles that case
#ifdef __linux__ #ifdef __linux__
unlock_futex(lock, activeNew); unlock_futex(lock, activeNew);
@ -213,12 +302,15 @@ extern "C" void fastlock_free(struct fastlock *lock)
assert((lock->m_ticket.m_active == lock->m_ticket.m_avail) // Asser the lock is unlocked assert((lock->m_ticket.m_active == lock->m_ticket.m_avail) // Asser the lock is unlocked
|| (lock->m_pidOwner == gettid() && (lock->m_ticket.m_active == lock->m_ticket.m_avail-1))); // OR we own the lock and nobody else is waiting || (lock->m_pidOwner == gettid() && (lock->m_ticket.m_active == lock->m_ticket.m_avail-1))); // OR we own the lock and nobody else is waiting
lock->m_pidOwner = -2; // sentinal value indicating free lock->m_pidOwner = -2; // sentinal value indicating free
ANNOTATE_RWLOCK_DESTROY(lock);
} }
bool fastlock::fOwnLock() bool fastlock::fOwnLock()
{ {
return gettid() == m_pidOwner; int tid;
__atomic_load(&m_pidOwner, &tid, __ATOMIC_RELAXED);
return gettid() == tid;
} }
int fastlock_unlock_recursive(struct fastlock *lock) int fastlock_unlock_recursive(struct fastlock *lock)

View File

@ -1631,7 +1631,7 @@ int handleClientsWithPendingWrites(int iel) {
if (writeToClient(c->fd,c,0) == C_ERR) { if (writeToClient(c->fd,c,0) == C_ERR) {
if (c->flags & CLIENT_CLOSE_ASAP) if (c->flags & CLIENT_CLOSE_ASAP)
{ {
c->lock.lock(); lock.release(); // still locked
AeLocker ae; AeLocker ae;
ae.arm(c); ae.arm(c);
freeClient(c); // writeToClient will only async close, but there's no need to wait freeClient(c); // writeToClient will only async close, but there's no need to wait

View File

@ -2,6 +2,14 @@
#include "server.h" #include "server.h"
#include "new.h" #include "new.h"
#ifdef SANITIZE
void *operator new(size_t size, enum MALLOC_CLASS mclass)
{
(void)mclass;
return ::operator new(size);
}
#else
[[deprecated]] [[deprecated]]
void *operator new(size_t size) void *operator new(size_t size)
{ {
@ -21,4 +29,6 @@ void operator delete(void * p) noexcept
void operator delete(void *p, std::size_t) noexcept void operator delete(void *p, std::size_t) noexcept
{ {
zfree(p); zfree(p);
} }
#endif

View File

@ -1,10 +1,12 @@
#pragma once #pragma once
#include <cstddef> // std::size_t #include <cstddef> // std::size_t
void *operator new(size_t size, enum MALLOC_CLASS mclass);
#ifndef SANITIZE
[[deprecated]] [[deprecated]]
void *operator new(size_t size); void *operator new(size_t size);
void *operator new(size_t size, enum MALLOC_CLASS mclass);
void operator delete(void * p) noexcept; void operator delete(void * p) noexcept;
void operator delete(void *p, std::size_t) noexcept; void operator delete(void *p, std::size_t) noexcept;
#endif

View File

@ -356,7 +356,7 @@ void incrRefCount(robj_roptr o) {
} }
void decrRefCount(robj_roptr o) { void decrRefCount(robj_roptr o) {
if (o->refcount == 1) { if (o->refcount.load(std::memory_order_acquire) == 1) {
switch(o->type) { switch(o->type) {
case OBJ_STRING: freeStringObject(o); break; case OBJ_STRING: freeStringObject(o); break;
case OBJ_LIST: freeListObject(o); break; case OBJ_LIST: freeListObject(o); break;

View File

@ -1023,7 +1023,7 @@ extern "C" void nolocks_localtime(struct tm *tmp, time_t t, time_t tz, int dst);
* serverLog() is to prefer. */ * serverLog() is to prefer. */
void serverLogRaw(int level, const char *msg) { void serverLogRaw(int level, const char *msg) {
const int syslogLevelMap[] = { LOG_DEBUG, LOG_INFO, LOG_NOTICE, LOG_WARNING }; const int syslogLevelMap[] = { LOG_DEBUG, LOG_INFO, LOG_NOTICE, LOG_WARNING };
const char *c = ".-*#"; const char *c = ".-*# ";
FILE *fp; FILE *fp;
char buf[64]; char buf[64];
int rawmode = (level & LL_RAW); int rawmode = (level & LL_RAW);
@ -1749,7 +1749,8 @@ void databasesCron(void) {
* a lot faster than calling time(NULL) */ * a lot faster than calling time(NULL) */
void updateCachedTime(void) { void updateCachedTime(void) {
g_pserver->unixtime = time(NULL); g_pserver->unixtime = time(NULL);
g_pserver->mstime = mstime(); long long ms = mstime();
__atomic_store(&g_pserver->mstime, &ms, __ATOMIC_RELAXED);
/* To get information about daylight saving time, we need to call /* To get information about daylight saving time, we need to call
* localtime_r and cache the result. However calling localtime_r in this * localtime_r and cache the result. However calling localtime_r in this
@ -4941,13 +4942,20 @@ int redisIsSupervised(int mode) {
uint64_t getMvccTstamp() uint64_t getMvccTstamp()
{ {
return g_pserver->mvcc_tstamp; uint64_t rval;
__atomic_load(&g_pserver->mvcc_tstamp, &rval, __ATOMIC_ACQUIRE);
return rval;
} }
void incrementMvccTstamp() void incrementMvccTstamp()
{ {
uint64_t msPrev = g_pserver->mvcc_tstamp >> 20; uint64_t msPrev;
if (msPrev >= (uint64_t)g_pserver->mstime) // we can be greater if the count overflows __atomic_load(&g_pserver->mvcc_tstamp, &msPrev, __ATOMIC_ACQUIRE);
msPrev >>= 20; // convert to milliseconds
long long mst;
__atomic_load(&g_pserver->mstime, &mst, __ATOMIC_RELAXED);
if (msPrev >= (uint64_t)mst) // we can be greater if the count overflows
{ {
atomicIncr(g_pserver->mvcc_tstamp, 1); atomicIncr(g_pserver->mvcc_tstamp, 1);
} }