Fix crash in RDB save
Former-commit-id: b032809b3e978fe571b791179d32ecdc9c067045
This commit is contained in:
parent
e15f035bfb
commit
0b6a66ca55
6
src/gc.h
6
src/gc.h
@ -52,7 +52,7 @@ public:
|
||||
void endEpoch(uint64_t epoch, bool fNoFree = false)
|
||||
{
|
||||
std::unique_lock<fastlock> lock(m_lock);
|
||||
assert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end());
|
||||
serverAssert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end());
|
||||
bool fMinElement = *std::min_element(m_setepochOutstanding.begin(), m_setepochOutstanding.end());
|
||||
m_setepochOutstanding.erase(epoch);
|
||||
if (fNoFree)
|
||||
@ -91,8 +91,8 @@ public:
|
||||
void enqueue(uint64_t epoch, std::unique_ptr<T> &&sp)
|
||||
{
|
||||
std::unique_lock<fastlock> lock(m_lock);
|
||||
assert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end());
|
||||
assert(sp->FWillFreeChildDebug() == false);
|
||||
serverAssert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end());
|
||||
serverAssert(sp->FWillFreeChildDebug() == false);
|
||||
|
||||
auto itr = std::find(m_vecepochs.begin(), m_vecepochs.end(), m_epochNext+1);
|
||||
if (itr == m_vecepochs.end())
|
||||
|
31
src/server.h
31
src/server.h
@ -1794,7 +1794,13 @@ class GarbageCollectorCollection
|
||||
CPtrCollectable(void *pv)
|
||||
: m_pv(pv)
|
||||
{}
|
||||
~CPtrCollectable() {
|
||||
|
||||
CPtrCollectable(CPtrCollectable &&move) {
|
||||
m_pv = move.m_pv;
|
||||
move.m_pv = nullptr;
|
||||
}
|
||||
|
||||
virtual ~CPtrCollectable() {
|
||||
zfree(m_pv);
|
||||
}
|
||||
};
|
||||
@ -1810,6 +1816,20 @@ public:
|
||||
epochGeneric = 0;
|
||||
}
|
||||
|
||||
Epoch() = default;
|
||||
|
||||
Epoch (const Epoch &other) {
|
||||
epochSnapshot = other.epochSnapshot;
|
||||
epochGeneric = other.epochGeneric;
|
||||
}
|
||||
|
||||
Epoch &operator=(const Epoch &other) {
|
||||
serverAssert(isReset());
|
||||
epochSnapshot = other.epochSnapshot;
|
||||
epochGeneric = other.epochGeneric;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool isReset() const {
|
||||
return epochSnapshot == 0 && epochGeneric == 0;
|
||||
}
|
||||
@ -1823,10 +1843,13 @@ public:
|
||||
return e;
|
||||
}
|
||||
|
||||
void endEpoch(Epoch e, bool fNoFree = false)
|
||||
void endEpoch(Epoch &e, bool fNoFree = false)
|
||||
{
|
||||
garbageCollectorSnapshot.endEpoch(e.epochSnapshot, fNoFree);
|
||||
garbageCollectorGeneric.endEpoch(e.epochGeneric, fNoFree);
|
||||
auto epochSnapshot = e.epochSnapshot;
|
||||
auto epochGeneric = e.epochGeneric;
|
||||
e.reset(); // We must do this early as GC'd dtors can themselves try to enqueue more data
|
||||
garbageCollectorSnapshot.endEpoch(epochSnapshot, fNoFree);
|
||||
garbageCollectorGeneric.endEpoch(epochGeneric, fNoFree);
|
||||
}
|
||||
|
||||
void shutdown()
|
||||
|
@ -154,7 +154,7 @@ start_server {tags {"defrag"} overrides {server-threads 1} } {
|
||||
$rd read ; # Discard replies
|
||||
}
|
||||
|
||||
set expected_frag 1.7
|
||||
set expected_frag 1.5
|
||||
if {$::accurate} {
|
||||
# scale the hash to 1m fields in order to have a measurable the latency
|
||||
for {set j 10000} {$j < 1000000} {incr j} {
|
||||
@ -265,7 +265,7 @@ start_server {tags {"defrag"} overrides {server-threads 1} } {
|
||||
# create big keys with 10k items
|
||||
set rd [redis_deferring_client]
|
||||
|
||||
set expected_frag 1.7
|
||||
set expected_frag 1.5
|
||||
# add a mass of list nodes to two lists (allocations are interlaced)
|
||||
set val [string repeat A 100] ;# 5 items of 100 bytes puts us in the 640 bytes bin, which has 32 regs, so high potential for fragmentation
|
||||
set elements 500000
|
||||
@ -543,4 +543,5 @@ start_server {tags {"defrag"} overrides {server-threads 1 active-replica yes} }
|
||||
} {OK}
|
||||
}
|
||||
}
|
||||
} ;# run solo
|
||||
} ;# run solo
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user