Use the concurrentqueue for multithread load instead of mutex and cvs

Former-commit-id: d5a59113dbfedaf7b62a650cff58a2e8ec01826f
This commit is contained in:
John Sully 2021-10-04 07:33:03 +00:00
parent 8a2f2bcb91
commit d29df021b1
2 changed files with 45 additions and 57 deletions

View File

@ -2639,17 +2639,15 @@ class rdbAsyncWorkThread
{ {
rdbSaveInfo *rsi; rdbSaveInfo *rsi;
int rdbflags; int rdbflags;
list *listJobs; moodycamel::BlockingConcurrentQueue<JobBase*> queueJobs;
std::mutex mutex;
std::condition_variable cv;
std::condition_variable cvThrottle;
fastlock m_lockPause { "rdbAsyncWork-Pause"}; fastlock m_lockPause { "rdbAsyncWork-Pause"};
bool fLaunched = false; bool fLaunched = false;
bool fExit = false; std::atomic<int> fExit {false};
std::atomic<size_t> ckeysLoaded; std::atomic<size_t> ckeysLoaded;
std::atomic<int> cstorageWritesInFlight; std::atomic<int> cstorageWritesInFlight;
std::atomic<bool> workerThreadDone; std::atomic<bool> workerThreadDone;
std::thread m_thread; std::thread m_thread;
std::vector<JobBase*> vecbatch;
long long now; long long now;
long long lastPing = -1; long long lastPing = -1;
@ -2664,14 +2662,11 @@ public:
{ {
ckeysLoaded = 0; ckeysLoaded = 0;
cstorageWritesInFlight = 0; cstorageWritesInFlight = 0;
listJobs = listCreate();
listSetFreeMethod(listJobs, listFreeMethod);
} }
~rdbAsyncWorkThread() { ~rdbAsyncWorkThread() {
if (m_thread.joinable()) if (m_thread.joinable())
endWork(); endWork();
listRelease(listJobs);
} }
void start() { void start() {
@ -2680,26 +2675,24 @@ public:
fLaunched = true; fLaunched = true;
} }
void throttle(std::unique_lock<std::mutex> &l) { void throttle() {
if (listLength(listJobs) > 0 && (listLength(listJobs) % 1024 == 0) && (getMaxmemoryState(NULL,NULL,NULL,NULL) != C_OK)) { if (g_pserver->m_pstorageFactory && (getMaxmemoryState(NULL,NULL,NULL,NULL) != C_OK)) {
cvThrottle.wait(l); while ((cstorageWritesInFlight.load(std::memory_order_relaxed) || queueJobs.size_approx()) && (getMaxmemoryState(NULL,NULL,NULL,NULL) != C_OK)) {
while (cstorageWritesInFlight.load(std::memory_order_relaxed) && (getMaxmemoryState(NULL,NULL,NULL,NULL) != C_OK)) {
l.unlock();
usleep(1); usleep(1);
pauseExecution(); pauseExecution();
ProcessWhileBlocked(); ProcessWhileBlocked();
resumeExecution(); resumeExecution();
l.lock();
} }
} }
} }
void enqueue(std::unique_ptr<rdbInsertJob> &spjob) { void enqueue(std::unique_ptr<rdbInsertJob> &spjob) {
std::unique_lock<std::mutex> l(mutex); vecbatch.push_back(spjob.release());
throttle(l); if (vecbatch.size() >= 64) {
listAddNodeTail(listJobs, spjob.release()); queueJobs.enqueue_bulk(vecbatch.data(), vecbatch.size());
if (listLength(listJobs) == 1) vecbatch.clear();
cv.notify_one(); throttle();
}
} }
void pauseExecution() { void pauseExecution() {
@ -2711,12 +2704,9 @@ public:
} }
void enqueue(std::function<void()> &&fn) { void enqueue(std::function<void()> &&fn) {
JobBase *pjob = new rdbFunctionJob(std::move(fn)); std::unique_ptr<JobBase> spjob = std::make_unique<rdbFunctionJob>(std::move(fn));
std::unique_lock<std::mutex> l(mutex); queueJobs.enqueue(spjob.release());
throttle(l); throttle();
listAddNodeTail(listJobs, pjob);
if (listLength(listJobs) == 1)
cv.notify_one();
} }
void ProcessWhileBlocked() { void ProcessWhileBlocked() {
@ -2739,11 +2729,13 @@ public:
size_t ckeys() { return ckeysLoaded; } size_t ckeys() { return ckeysLoaded; }
size_t endWork() { size_t endWork() {
std::unique_lock<std::mutex> l(mutex); if (!vecbatch.empty()) {
queueJobs.enqueue_bulk(vecbatch.data(), vecbatch.size());
vecbatch.clear();
}
std::atomic_thread_fence(std::memory_order_seq_cst); // The queue must have transferred to the consumer before we call fExit
serverAssert(fLaunched); serverAssert(fLaunched);
fExit = true; fExit = true;
cv.notify_one();
l.unlock();
if (g_pserver->m_pstorageFactory) { if (g_pserver->m_pstorageFactory) {
// If we have a storage provider it can take some time to complete and we want to process events in the meantime // If we have a storage provider it can take some time to complete and we want to process events in the meantime
while (!workerThreadDone) { while (!workerThreadDone) {
@ -2760,7 +2752,7 @@ public:
} }
fLaunched = false; fLaunched = false;
fExit = false; fExit = false;
serverAssert(listLength(listJobs) == 0); serverAssert(queueJobs.size_approx() == 0);
return ckeysLoaded; return ckeysLoaded;
} }
@ -2863,40 +2855,35 @@ public:
} }
for (;;) { for (;;) {
std::unique_lock<std::mutex> lock(queue.mutex); if (queue.queueJobs.size_approx() == 0) {
if (listLength(queue.listJobs) == 0) { if (queue.fExit.load(std::memory_order_relaxed))
if (queue.fExit)
break;
queue.cv.wait(lock);
if (listLength(queue.listJobs) == 0 && queue.fExit)
break; break;
} }
pqueue->cvThrottle.notify_one();
if (queue.fExit.load(std::memory_order_seq_cst) && queue.queueJobs.size_approx() == 0)
list *listJobs = queue.listJobs; break;
queue.listJobs = listCreate();
listSetFreeMethod(queue.listJobs, listFreeMethod);
lock.unlock();
vars.gcEpoch = g_pserver->garbageCollector.startEpoch(); vars.gcEpoch = g_pserver->garbageCollector.startEpoch();
while (listLength(listJobs)) { JobBase *rgjob[64];
int cjobs = 0;
while ((cjobs = pqueue->queueJobs.wait_dequeue_bulk_timed(rgjob, 64, std::chrono::milliseconds(5))) > 0) {
std::unique_lock<fastlock> ulPause(pqueue->m_lockPause); std::unique_lock<fastlock> ulPause(pqueue->m_lockPause);
JobBase *pjobBase = ((JobBase*)listNodeValue(listFirst(listJobs)));
switch (pjobBase->type) for (int ijob = 0; ijob < cjobs; ++ijob) {
{ JobBase *pjob = rgjob[ijob];
case JobBase::JobType::Insert: switch (pjob->type)
pqueue->processJob(*static_cast<rdbInsertJob*>(pjobBase)); {
break; case JobBase::JobType::Insert:
pqueue->processJob(*static_cast<rdbInsertJob*>(pjob));
break;
case JobBase::JobType::Function: case JobBase::JobType::Function:
static_cast<rdbFunctionJob*>(pjobBase)->m_fn(); static_cast<rdbFunctionJob*>(pjob)->m_fn();
break; break;
}
delete pjob;
} }
// Pop from the list
listDelNode(listJobs, listFirst(listJobs));
} }
listRelease(listJobs);
g_pserver->garbageCollector.endEpoch(vars.gcEpoch); g_pserver->garbageCollector.endEpoch(vars.gcEpoch);
} }
@ -2906,8 +2893,6 @@ public:
} }
queue.workerThreadDone = true; queue.workerThreadDone = true;
std::unique_lock<std::mutex> lock(queue.mutex);
serverAssert(listLength(queue.listJobs) == 0);
ProcessPendingAsyncWrites(); ProcessPendingAsyncWrites();
listRelease(vars.clients_pending_asyncwrite); listRelease(vars.clients_pending_asyncwrite);
aeSetThreadOwnsLockOverride(false); aeSetThreadOwnsLockOverride(false);

View File

@ -39,6 +39,9 @@
#include "rio.h" #include "rio.h"
#include "atomicvar.h" #include "atomicvar.h"
#include <concurrentqueue.h>
#include <blockingconcurrentqueue.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <cmath> #include <cmath>