2022-01-14 16:17:32 -05:00
|
|
|
#pragma once
|
|
|
|
#include <condition_variable>
|
|
|
|
|
|
|
|
class readWriteLock {
|
2022-01-14 18:50:20 +00:00
|
|
|
fastlock m_readLock;
|
|
|
|
fastlock m_writeLock;
|
|
|
|
std::condition_variable_any m_cv;
|
2022-01-14 16:17:32 -05:00
|
|
|
int m_readCount = 0;
|
|
|
|
int m_writeCount = 0;
|
|
|
|
bool m_writeWaiting = false;
|
|
|
|
public:
|
2022-01-14 18:50:20 +00:00
|
|
|
readWriteLock(const char *name) : m_readLock(name), m_writeLock(name) {}
|
|
|
|
|
2022-01-14 16:17:32 -05:00
|
|
|
void acquireRead() {
|
2022-01-14 18:50:20 +00:00
|
|
|
std::unique_lock<fastlock> rm(m_readLock);
|
2022-01-14 16:17:32 -05:00
|
|
|
while (m_writeCount > 0 || m_writeWaiting)
|
|
|
|
m_cv.wait(rm);
|
|
|
|
m_readCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool tryAcquireRead() {
|
2022-01-14 18:50:20 +00:00
|
|
|
std::unique_lock<fastlock> rm(m_readLock, std::defer_lock);
|
2022-01-14 16:17:32 -05:00
|
|
|
if (!rm.try_lock())
|
|
|
|
return false;
|
|
|
|
if (m_writeCount > 0 || m_writeWaiting)
|
|
|
|
return false;
|
|
|
|
m_readCount++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void acquireWrite(bool exclusive = true) {
|
2022-01-14 18:50:20 +00:00
|
|
|
std::unique_lock<fastlock> rm(m_readLock);
|
2022-01-14 16:17:32 -05:00
|
|
|
m_writeWaiting = true;
|
|
|
|
while (m_readCount > 0)
|
|
|
|
m_cv.wait(rm);
|
|
|
|
if (exclusive) {
|
|
|
|
/* Another thread might have the write lock while we have the read lock
|
|
|
|
but won't be able to release it until they can acquire the read lock
|
|
|
|
so release the read lock and try again instead of waiting to avoid deadlock */
|
|
|
|
while(!m_writeLock.try_lock())
|
|
|
|
m_cv.wait(rm);
|
|
|
|
}
|
|
|
|
m_writeCount++;
|
|
|
|
m_writeWaiting = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void upgradeWrite(bool exclusive = true) {
|
2022-01-14 18:50:20 +00:00
|
|
|
releaseRead();
|
|
|
|
acquireWrite(exclusive);
|
2022-01-14 16:17:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
bool tryAcquireWrite(bool exclusive = true) {
|
2022-01-14 18:50:20 +00:00
|
|
|
std::unique_lock<fastlock> rm(m_readLock, std::defer_lock);
|
2022-01-14 16:17:32 -05:00
|
|
|
if (!rm.try_lock())
|
|
|
|
return false;
|
|
|
|
if (m_readCount > 0)
|
|
|
|
return false;
|
|
|
|
if (exclusive)
|
|
|
|
if (!m_writeLock.try_lock())
|
|
|
|
return false;
|
|
|
|
m_writeCount++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void releaseRead() {
|
2022-01-14 18:50:20 +00:00
|
|
|
std::unique_lock<fastlock> rm(m_readLock);
|
2022-01-14 16:17:32 -05:00
|
|
|
m_readCount--;
|
|
|
|
m_cv.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void releaseWrite(bool exclusive = true) {
|
2022-01-14 18:50:20 +00:00
|
|
|
std::unique_lock<fastlock> rm(m_readLock);
|
2022-01-14 16:17:32 -05:00
|
|
|
serverAssert(m_writeCount > 0);
|
|
|
|
if (exclusive)
|
|
|
|
m_writeLock.unlock();
|
|
|
|
m_writeCount--;
|
|
|
|
m_cv.notify_all();
|
|
|
|
}
|
|
|
|
|
2023-02-09 10:28:22 -08:00
|
|
|
void releaseWriteChild(bool exclusive = true) {
|
|
|
|
std::unique_lock<fastlock> rm(m_readLock);
|
|
|
|
serverAssert(m_writeCount > 0);
|
|
|
|
if (exclusive)
|
|
|
|
m_writeLock.unlock();
|
|
|
|
m_writeCount--;
|
|
|
|
m_writeWaiting = false;
|
|
|
|
}
|
|
|
|
|
2022-01-14 16:17:32 -05:00
|
|
|
void downgradeWrite(bool exclusive = true) {
|
2022-01-14 18:50:20 +00:00
|
|
|
releaseWrite(exclusive);
|
|
|
|
acquireRead();
|
2022-01-14 16:17:32 -05:00
|
|
|
}
|
|
|
|
|
2023-02-09 10:28:22 -08:00
|
|
|
void downgradeWriteChild(bool exclusive = true) {
|
|
|
|
releaseWriteChild(exclusive);
|
|
|
|
acquireRead();
|
|
|
|
}
|
|
|
|
|
2022-01-14 16:17:32 -05:00
|
|
|
bool hasReader() {
|
|
|
|
return m_readCount > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool hasWriter() {
|
|
|
|
return m_writeCount > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool writeWaiting() {
|
|
|
|
return m_writeWaiting;
|
|
|
|
}
|
|
|
|
};
|