2019-02-21 00:16:47 -05:00
/*
* Copyright ( c ) 2019 , John Sully < john at eqalpha dot com >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* * Redistributions of source code must retain the above copyright notice ,
* this list of conditions and the following disclaimer .
* * Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution .
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR
* CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS
* INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN
* CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE )
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE , EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE .
*/
2020-04-15 23:04:04 -04:00
# include "fmacros.h"
2019-02-10 20:24:11 -05:00
# include "fastlock.h"
2019-02-10 22:00:19 -05:00
# include <unistd.h>
2019-02-16 14:25:14 -05:00
# include <sys/syscall.h>
# include <sys/types.h>
# include <sched.h>
2019-02-20 01:20:26 -05:00
# include <atomic>
2019-02-22 21:00:14 -05:00
# include <assert.h>
2019-03-24 18:06:57 -04:00
# include <pthread.h>
# include <limits.h>
2019-10-22 00:43:32 -04:00
# include <map>
2019-06-27 15:04:09 -04:00
# ifdef __linux__
2019-06-15 23:53:34 -04:00
# include <linux/futex.h>
2020-05-30 18:22:27 -04:00
# include <sys/sysinfo.h>
2019-06-27 15:04:09 -04:00
# endif
2019-06-15 23:53:34 -04:00
# include <string.h>
2019-10-22 21:34:51 -04:00
# include <stdarg.h>
2019-10-23 13:31:39 -04:00
# include <stdio.h>
2020-03-19 15:37:49 -04:00
# include "config.h"
2020-04-28 22:41:07 -04:00
# include "serverassert.h"
2019-03-24 18:06:57 -04:00
# ifdef __APPLE__
# include <TargetConditionals.h>
# ifdef TARGET_OS_MAC
/* The CLANG that ships with Mac OS doesn't have these builtins.
but on x86 they are just normal reads / writes anyways */
# define __atomic_load_4(ptr, csq) (*(reinterpret_cast<const volatile uint32_t*>(ptr)))
# define __atomic_load_2(ptr, csq) (*(reinterpret_cast<const volatile uint16_t*>(ptr)))
# define __atomic_store_4(ptr, val, csq) (*(reinterpret_cast<volatile uint32_t*>(ptr)) = val)
# endif
# endif
2019-02-10 20:24:11 -05:00
2019-06-27 16:29:36 -04:00
# ifndef UNUSED
# define UNUSED(x) ((void)x)
# endif
2020-03-19 15:37:49 -04:00
# ifdef HAVE_BACKTRACE
# include <ucontext.h>
__attribute__ ( ( weak ) ) void logStackTrace ( ucontext_t * ) { }
# endif
2019-10-22 23:26:37 -04:00
extern int g_fInCrash ;
2020-05-30 18:22:27 -04:00
extern int g_fTestMode ;
int g_fHighCpuPressure = false ;
2019-07-18 18:39:42 -04:00
2019-02-21 00:16:47 -05:00
/****************************************************
*
* Implementation of a fair spinlock . To promote fairness we
* use a ticket lock instead of a raw spinlock
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-07-18 18:39:42 -04:00
# if !defined(__has_feature)
# define __has_feature(x) 0
# endif
2020-02-17 19:54:05 -05:00
# ifdef __linux__
extern " C " void unlock_futex ( struct fastlock * lock , uint16_t ifutex ) ;
# endif
2019-07-18 18:39:42 -04:00
# if __has_feature(thread_sanitizer)
/* Report that a lock has been created at address "lock". */
# define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate ( __FILE__ , __LINE__ , lock )
/* Report that the lock at address "lock" is about to be destroyed. */
# define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy ( __FILE__ , __LINE__ , lock )
/* Report that the lock at address "lock" has been acquired.
is_w = 1 for writer lock , is_w = 0 for reader lock . */
# define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired ( __FILE__ , __LINE__ , lock , is_w )
/* Report that the lock at address "lock" is about to be released. */
# define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased ( __FILE__ , __LINE__ , lock , is_w )
# if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
# if defined(__GNUC__)
# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
# else
/* TODO(glider): for Windows support we may want to change this macro in order
to prepend __declspec ( selectany ) to the annotations ' declarations . */
# error weak annotations are not supported for your compiler
# endif
# else
# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
# endif
extern " C " {
void AnnotateRWLockCreate (
const char * file , int line ,
const volatile void * lock ) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK ;
void AnnotateRWLockDestroy (
const char * file , int line ,
const volatile void * lock ) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK ;
void AnnotateRWLockAcquired (
const char * file , int line ,
const volatile void * lock , long is_w ) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK ;
void AnnotateRWLockReleased (
const char * file , int line ,
const volatile void * lock , long is_w ) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK ;
}
# else
# define ANNOTATE_RWLOCK_CREATE(lock)
# define ANNOTATE_RWLOCK_DESTROY(lock)
# define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
# define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
# endif
2019-10-22 21:34:51 -04:00
extern " C " __attribute__ ( ( weak ) ) void _serverPanic ( const char * /*file*/ , int /*line*/ , const char * /*msg*/ , . . . )
2019-10-22 00:43:32 -04:00
{
* ( ( char * ) - 1 ) = ' x ' ;
}
2019-10-22 21:34:51 -04:00
__attribute__ ( ( weak ) ) void serverLog ( int , const char * fmt , . . . )
{
va_list args ;
va_start ( args , fmt ) ;
vprintf ( fmt , args ) ;
va_end ( args ) ;
printf ( " \n " ) ;
}
2020-03-19 15:37:49 -04:00
extern " C " pid_t gettid ( )
{
static thread_local int pidCache = - 1 ;
# ifdef __linux__
if ( pidCache = = - 1 )
pidCache = syscall ( SYS_gettid ) ;
# else
if ( pidCache = = - 1 ) {
uint64_t tidT ;
pthread_threadid_np ( nullptr , & tidT ) ;
2020-04-28 22:41:07 -04:00
serverAssert ( tidT < UINT_MAX ) ;
2020-03-19 15:37:49 -04:00
pidCache = ( int ) tidT ;
}
# endif
return pidCache ;
}
void printTrace ( )
{
# ifdef HAVE_BACKTRACE
serverLog ( 3 /*LL_WARNING*/ , " printing backtrace for thread %d " , gettid ( ) ) ;
ucontext_t ctxt ;
getcontext ( & ctxt ) ;
logStackTrace ( & ctxt ) ;
# endif
}
# ifdef __linux__
static int futex ( volatile unsigned * uaddr , int futex_op , int val ,
const struct timespec * timeout , int val3 )
{
return syscall ( SYS_futex , uaddr , futex_op , val ,
timeout , uaddr , val3 ) ;
}
# endif
2019-10-22 00:43:32 -04:00
class DeadlockDetector
{
2020-05-24 12:45:10 -04:00
fastlock m_lock { " deadlock detector " } ; // destruct this first
2019-10-22 00:43:32 -04:00
std : : map < pid_t , fastlock * > m_mapwait ;
2020-05-24 12:45:10 -04:00
2019-10-22 00:43:32 -04:00
public :
void registerwait ( fastlock * lock , pid_t thispid )
{
2020-03-19 15:37:49 -04:00
static volatile bool fInDeadlock = false ;
2019-10-22 23:26:37 -04:00
if ( lock = = & m_lock | | g_fInCrash )
2019-10-22 00:43:32 -04:00
return ;
fastlock_lock ( & m_lock ) ;
2020-03-19 15:37:49 -04:00
if ( fInDeadlock )
{
printTrace ( ) ;
fastlock_unlock ( & m_lock ) ;
return ;
}
2019-10-22 00:43:32 -04:00
m_mapwait . insert ( std : : make_pair ( thispid , lock ) ) ;
// Detect cycles
pid_t pidCheck = thispid ;
2019-10-22 21:34:51 -04:00
size_t cchecks = 0 ;
2019-10-22 00:43:32 -04:00
for ( ; ; )
{
auto itr = m_mapwait . find ( pidCheck ) ;
if ( itr = = m_mapwait . end ( ) )
break ;
2020-04-15 22:24:12 -04:00
__atomic_load ( & itr - > second - > m_pidOwner , & pidCheck , __ATOMIC_RELAXED ) ;
2019-10-22 00:43:32 -04:00
if ( pidCheck = = thispid )
2019-10-22 21:34:51 -04:00
{
// Deadlock detected, printout some debugging info and crash
serverLog ( 3 /*LL_WARNING*/ , " \n \n " ) ;
serverLog ( 3 /*LL_WARNING*/ , " !!! ERROR: Deadlock detected !!! " ) ;
pidCheck = thispid ;
for ( ; ; )
{
auto itr = m_mapwait . find ( pidCheck ) ;
serverLog ( 3 /* LL_WARNING */ , " \t %d: (%p) %s " , pidCheck , itr - > second , itr - > second - > szName ) ;
2020-04-15 22:24:12 -04:00
__atomic_load ( & itr - > second - > m_pidOwner , & pidCheck , __ATOMIC_RELAXED ) ;
2019-10-22 21:34:51 -04:00
if ( pidCheck = = thispid )
break ;
}
2020-03-19 15:37:49 -04:00
// Wake All sleeping threads so they can print their callstacks
# ifdef HAVE_BACKTRACE
# ifdef __linux__
int mask = - 1 ;
fInDeadlock = true ;
fastlock_unlock ( & m_lock ) ;
futex ( & lock - > m_ticket . u , FUTEX_WAKE_BITSET_PRIVATE , INT_MAX , nullptr , mask ) ;
futex ( & itr - > second - > m_ticket . u , FUTEX_WAKE_BITSET_PRIVATE , INT_MAX , nullptr , mask ) ;
sleep ( 2 ) ;
fastlock_lock ( & m_lock ) ;
printTrace ( ) ;
# endif
# endif
2019-10-22 21:34:51 -04:00
serverLog ( 3 /*LL_WARNING*/ , " !!! KeyDB Will Now Crash !!! " ) ;
2019-10-22 00:43:32 -04:00
_serverPanic ( __FILE__ , __LINE__ , " Deadlock detected " ) ;
2019-10-22 21:34:51 -04:00
}
if ( cchecks > m_mapwait . size ( ) )
break ; // There is a cycle but we're not in it
+ + cchecks ;
2019-10-22 00:43:32 -04:00
}
fastlock_unlock ( & m_lock ) ;
}
void clearwait ( fastlock * lock , pid_t thispid )
{
2019-10-22 23:26:37 -04:00
if ( lock = = & m_lock | | g_fInCrash )
2019-10-22 00:43:32 -04:00
return ;
fastlock_lock ( & m_lock ) ;
m_mapwait . erase ( thispid ) ;
fastlock_unlock ( & m_lock ) ;
}
} ;
DeadlockDetector g_dlock ;
2019-02-16 14:25:14 -05:00
static_assert ( sizeof ( pid_t ) < = sizeof ( fastlock : : m_pidOwner ) , " fastlock::m_pidOwner not large enough " ) ;
2019-03-19 22:04:33 -04:00
uint64_t g_longwaits = 0 ;
2020-02-17 19:54:05 -05:00
extern " C " void fastlock_panic ( struct fastlock * lock )
{
_serverPanic ( __FILE__ , __LINE__ , " fastlock lock/unlock mismatch for: %s " , lock - > szName ) ;
}
2019-03-19 22:04:33 -04:00
uint64_t fastlock_getlongwaitcount ( )
{
2019-07-18 18:39:42 -04:00
uint64_t rval ;
__atomic_load ( & g_longwaits , & rval , __ATOMIC_RELAXED ) ;
return rval ;
2019-03-19 22:04:33 -04:00
}
2019-10-22 21:34:51 -04:00
extern " C " void fastlock_sleep ( fastlock * lock , pid_t pid , unsigned wake , unsigned mask )
{
# ifdef __linux__
g_dlock . registerwait ( lock , pid ) ;
__atomic_fetch_or ( & lock - > futex , mask , __ATOMIC_ACQUIRE ) ;
futex ( & lock - > m_ticket . u , FUTEX_WAIT_BITSET_PRIVATE , wake , nullptr , mask ) ;
__atomic_fetch_and ( & lock - > futex , ~ mask , __ATOMIC_RELEASE ) ;
g_dlock . clearwait ( lock , pid ) ;
# endif
__atomic_fetch_add ( & g_longwaits , 1 , __ATOMIC_RELAXED ) ;
}
extern " C " void fastlock_init ( struct fastlock * lock , const char * name )
2019-02-10 20:24:11 -05:00
{
2019-02-22 01:23:31 -05:00
lock - > m_ticket . m_active = 0 ;
lock - > m_ticket . m_avail = 0 ;
2019-02-15 14:11:05 -05:00
lock - > m_depth = 0 ;
2019-02-22 21:00:14 -05:00
lock - > m_pidOwner = - 1 ;
2019-06-15 23:53:34 -04:00
lock - > futex = 0 ;
2019-11-17 16:06:49 -05:00
int cch = strlen ( name ) ;
cch = std : : min < int > ( cch , sizeof ( lock - > szName ) - 1 ) ;
memcpy ( lock - > szName , name , cch ) ;
lock - > szName [ cch ] = ' \0 ' ;
2019-07-18 18:39:42 -04:00
ANNOTATE_RWLOCK_CREATE ( lock ) ;
2019-02-10 20:24:11 -05:00
}
2019-02-25 18:21:27 -05:00
# ifndef ASM_SPINLOCK
2019-02-10 20:24:11 -05:00
extern " C " void fastlock_lock ( struct fastlock * lock )
{
2019-07-18 18:39:42 -04:00
int pidOwner ;
__atomic_load ( & lock - > m_pidOwner , & pidOwner , __ATOMIC_ACQUIRE ) ;
if ( pidOwner = = gettid ( ) )
2019-02-16 14:25:14 -05:00
{
2019-02-21 00:16:47 -05:00
+ + lock - > m_depth ;
return ;
}
2019-02-16 14:25:14 -05:00
2019-10-22 00:43:32 -04:00
int tid = gettid ( ) ;
2019-02-22 15:49:22 -05:00
unsigned myticket = __atomic_fetch_add ( & lock - > m_ticket . m_avail , 1 , __ATOMIC_RELEASE ) ;
2019-06-15 23:53:34 -04:00
unsigned mask = ( 1U < < ( myticket % 32 ) ) ;
2019-11-30 00:51:58 -05:00
unsigned cloops = 0 ;
2019-06-15 23:53:34 -04:00
ticket ticketT ;
2020-05-30 18:22:27 -04:00
unsigned loopLimit = g_fHighCpuPressure ? 0x10000 : 0x100000 ;
2019-10-22 00:43:32 -04:00
2019-10-22 21:34:51 -04:00
for ( ; ; )
2019-02-21 00:16:47 -05:00
{
2019-10-22 21:34:51 -04:00
__atomic_load ( & lock - > m_ticket . u , & ticketT . u , __ATOMIC_ACQUIRE ) ;
if ( ( ticketT . u & 0xffff ) = = myticket )
break ;
2019-07-18 18:39:42 -04:00
2019-06-15 23:53:34 -04:00
# if defined(__i386__) || defined(__amd64__)
2019-11-28 14:26:43 -05:00
__asm__ __volatile__ ( " pause " ) ;
2020-02-28 23:49:17 -05:00
# elif defined(__aarch64__)
2019-11-28 14:26:43 -05:00
__asm__ __volatile__ ( " yield " ) ;
2019-06-15 23:53:34 -04:00
# endif
2020-05-30 18:22:27 -04:00
if ( ( + + cloops % loopLimit ) = = 0 )
2019-10-22 21:34:51 -04:00
{
fastlock_sleep ( lock , tid , ticketT . u , mask ) ;
2019-03-19 22:04:33 -04:00
}
2019-02-10 22:00:19 -05:00
}
2019-02-20 01:20:26 -05:00
2019-02-15 14:11:05 -05:00
lock - > m_depth = 1 ;
2019-07-18 18:39:42 -04:00
__atomic_store ( & lock - > m_pidOwner , & tid , __ATOMIC_RELEASE ) ;
ANNOTATE_RWLOCK_ACQUIRED ( lock , true ) ;
2019-02-22 15:49:22 -05:00
std : : atomic_thread_fence ( std : : memory_order_acquire ) ;
2019-02-10 20:24:11 -05:00
}
2019-06-17 21:53:04 -04:00
extern " C " int fastlock_trylock ( struct fastlock * lock , int fWeak )
2019-02-22 01:23:31 -05:00
{
2019-07-18 18:39:42 -04:00
int tid ;
__atomic_load ( & lock - > m_pidOwner , & tid , __ATOMIC_ACQUIRE ) ;
if ( tid = = gettid ( ) )
2019-02-22 01:23:31 -05:00
{
+ + lock - > m_depth ;
return true ;
}
// cheap test
2019-07-18 18:39:42 -04:00
struct ticket ticketT ;
__atomic_load ( & lock - > m_ticket . u , & ticketT . u , __ATOMIC_ACQUIRE ) ;
if ( ticketT . m_active ! = ticketT . m_avail )
2019-02-22 01:23:31 -05:00
return false ;
2019-07-18 18:39:42 -04:00
uint16_t active = ticketT . m_active ;
2019-02-22 01:23:31 -05:00
uint16_t next = active + 1 ;
2019-06-27 16:29:36 -04:00
struct ticket ticket_expect { { { active , active } } } ;
struct ticket ticket_setiflocked { { { active , next } } } ;
2020-02-28 21:21:05 -05:00
if ( __atomic_compare_exchange ( & lock - > m_ticket . u , & ticket_expect . u , & ticket_setiflocked . u , fWeak /*weak*/ , __ATOMIC_ACQUIRE , __ATOMIC_RELAXED ) )
2019-02-22 01:23:31 -05:00
{
lock - > m_depth = 1 ;
2019-07-18 18:39:42 -04:00
tid = gettid ( ) ;
__atomic_store ( & lock - > m_pidOwner , & tid , __ATOMIC_RELEASE ) ;
ANNOTATE_RWLOCK_ACQUIRED ( lock , true ) ;
2019-02-22 01:23:31 -05:00
return true ;
}
return false ;
}
2019-02-10 20:24:11 -05:00
extern " C " void fastlock_unlock ( struct fastlock * lock )
{
2019-02-15 14:11:05 -05:00
- - lock - > m_depth ;
if ( lock - > m_depth = = 0 )
{
2019-07-18 18:39:42 -04:00
int pidT ;
__atomic_load ( & lock - > m_pidOwner , & pidT , __ATOMIC_RELAXED ) ;
2020-04-28 22:41:07 -04:00
serverAssert ( pidT > = 0 ) ; // unlock after free
2019-07-18 18:39:42 -04:00
int t = - 1 ;
__atomic_store ( & lock - > m_pidOwner , & t , __ATOMIC_RELEASE ) ;
2019-06-17 21:53:04 -04:00
std : : atomic_thread_fence ( std : : memory_order_release ) ;
2019-07-18 18:39:42 -04:00
ANNOTATE_RWLOCK_RELEASED ( lock , true ) ;
2019-06-17 21:53:04 -04:00
uint16_t activeNew = __atomic_add_fetch ( & lock - > m_ticket . m_active , 1 , __ATOMIC_RELEASE ) ; // on x86 the atomic is not required here, but ASM handles that case
2019-06-27 15:04:09 -04:00
# ifdef __linux__
2019-06-15 23:53:34 -04:00
unlock_futex ( lock , activeNew ) ;
2019-06-27 16:29:36 -04:00
# else
UNUSED ( activeNew ) ;
2019-06-27 15:04:09 -04:00
# endif
2019-02-15 14:11:05 -05:00
}
2019-02-10 20:24:11 -05:00
}
2019-03-02 16:47:27 -05:00
# endif
2019-02-10 22:00:19 -05:00
2020-02-17 19:54:05 -05:00
# ifdef __linux__
# define ROL32(v, shift) ((v << shift) | (v >> (32-shift)))
extern " C " void unlock_futex ( struct fastlock * lock , uint16_t ifutex )
{
unsigned mask = ( 1U < < ( ifutex % 32 ) ) ;
unsigned futexT ;
for ( ; ; )
{
__atomic_load ( & lock - > futex , & futexT , __ATOMIC_ACQUIRE ) ;
futexT & = mask ;
if ( ! futexT )
break ;
if ( futex ( & lock - > m_ticket . u , FUTEX_WAKE_BITSET_PRIVATE , INT_MAX , nullptr , mask ) = = 1 )
break ;
}
}
# endif
2019-02-10 20:24:11 -05:00
extern " C " void fastlock_free ( struct fastlock * lock )
{
// NOP
2020-05-25 02:13:57 -04:00
serverAssert ( ( lock - > m_ticket . m_active = = lock - > m_ticket . m_avail ) // Assert the lock is unlocked
| | ( lock - > m_pidOwner = = gettid ( )
& & ( lock - > m_ticket . m_active = = static_cast < uint16_t > ( lock - > m_ticket . m_avail - 1U ) ) ) ) ; // OR we own the lock and nobody else is waiting
2019-02-22 21:00:14 -05:00
lock - > m_pidOwner = - 2 ; // sentinal value indicating free
2019-07-18 18:39:42 -04:00
ANNOTATE_RWLOCK_DESTROY ( lock ) ;
2019-02-15 14:11:05 -05:00
}
2019-02-18 22:25:35 -05:00
bool fastlock : : fOwnLock ( )
{
2019-07-18 18:39:42 -04:00
int tid ;
__atomic_load ( & m_pidOwner , & tid , __ATOMIC_RELAXED ) ;
return gettid ( ) = = tid ;
2019-03-21 22:17:04 +00:00
}
2019-07-12 20:46:50 -04:00
int fastlock_unlock_recursive ( struct fastlock * lock )
{
int rval = lock - > m_depth ;
lock - > m_depth = 1 ;
fastlock_unlock ( lock ) ;
return rval ;
}
void fastlock_lock_recursive ( struct fastlock * lock , int nesting )
{
fastlock_lock ( lock ) ;
lock - > m_depth = nesting ;
2020-02-17 19:54:05 -05:00
}
2020-05-30 18:22:27 -04:00
void fastlock_auto_adjust_waits ( )
{
# ifdef __linux__
struct sysinfo sysinf ;
auto fHighPressurePrev = g_fHighCpuPressure ;
memset ( & sysinf , 0 , sizeof sysinf ) ;
if ( ! sysinfo ( & sysinf ) ) {
auto avgCoreLoad = sysinf . loads [ 0 ] / get_nprocs ( ) ;
g_fHighCpuPressure = ( avgCoreLoad > ( ( 1 < < SI_LOAD_SHIFT ) * 0.9 ) ) ;
if ( g_fHighCpuPressure )
serverLog ( ! fHighPressurePrev ? 3 /*LL_WARNING*/ : 1 /* LL_VERBOSE */ , " NOTICE: Detuning locks due to high load per core: %.2f%% " , avgCoreLoad / ( double ) ( 1 < < SI_LOAD_SHIFT ) * 100.0 ) ;
}
if ( ! g_fHighCpuPressure & & fHighPressurePrev ) {
serverLog ( 3 /*LL_WARNING*/ , " NOTICE: CPU pressure reduced " ) ;
}
# else
g_fHighCpuPressure = g_fTestMode ;
# endif
}