diff --git a/src/gc.h b/src/gc.h index 4715bc8de..5d92e38cb 100644 --- a/src/gc.h +++ b/src/gc.h @@ -52,7 +52,7 @@ public: void endEpoch(uint64_t epoch, bool fNoFree = false) { std::unique_lock lock(m_lock); - assert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end()); + serverAssert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end()); bool fMinElement = *std::min_element(m_setepochOutstanding.begin(), m_setepochOutstanding.end()); m_setepochOutstanding.erase(epoch); if (fNoFree) @@ -91,8 +91,8 @@ public: void enqueue(uint64_t epoch, std::unique_ptr &&sp) { std::unique_lock lock(m_lock); - assert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end()); - assert(sp->FWillFreeChildDebug() == false); + serverAssert(m_setepochOutstanding.find(epoch) != m_setepochOutstanding.end()); + serverAssert(sp->FWillFreeChildDebug() == false); auto itr = std::find(m_vecepochs.begin(), m_vecepochs.end(), m_epochNext+1); if (itr == m_vecepochs.end()) diff --git a/src/server.h b/src/server.h index 3612d93f0..1fbb77c60 100644 --- a/src/server.h +++ b/src/server.h @@ -1794,7 +1794,13 @@ class GarbageCollectorCollection CPtrCollectable(void *pv) : m_pv(pv) {} - ~CPtrCollectable() { + + CPtrCollectable(CPtrCollectable &&move) { + m_pv = move.m_pv; + move.m_pv = nullptr; + } + + virtual ~CPtrCollectable() { zfree(m_pv); } }; @@ -1810,6 +1816,20 @@ public: epochGeneric = 0; } + Epoch() = default; + + Epoch (const Epoch &other) { + epochSnapshot = other.epochSnapshot; + epochGeneric = other.epochGeneric; + } + + Epoch &operator=(const Epoch &other) { + serverAssert(isReset()); + epochSnapshot = other.epochSnapshot; + epochGeneric = other.epochGeneric; + return *this; + } + bool isReset() const { return epochSnapshot == 0 && epochGeneric == 0; } @@ -1823,10 +1843,13 @@ public: return e; } - void endEpoch(Epoch e, bool fNoFree = false) + void endEpoch(Epoch &e, bool fNoFree = false) { - garbageCollectorSnapshot.endEpoch(e.epochSnapshot, fNoFree); - garbageCollectorGeneric.endEpoch(e.epochGeneric, fNoFree); + auto epochSnapshot = e.epochSnapshot; + auto epochGeneric = e.epochGeneric; + e.reset(); // We must do this early as GC'd dtors can themselves try to enqueue more data + garbageCollectorSnapshot.endEpoch(epochSnapshot, fNoFree); + garbageCollectorGeneric.endEpoch(epochGeneric, fNoFree); } void shutdown() diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl index c0a6ec4d7..5bf69787b 100644 --- a/tests/unit/memefficiency.tcl +++ b/tests/unit/memefficiency.tcl @@ -154,7 +154,7 @@ start_server {tags {"defrag"} overrides {server-threads 1} } { $rd read ; # Discard replies } - set expected_frag 1.7 + set expected_frag 1.5 if {$::accurate} { # scale the hash to 1m fields in order to have a measurable the latency for {set j 10000} {$j < 1000000} {incr j} { @@ -265,7 +265,7 @@ start_server {tags {"defrag"} overrides {server-threads 1} } { # create big keys with 10k items set rd [redis_deferring_client] - set expected_frag 1.7 + set expected_frag 1.5 # add a mass of list nodes to two lists (allocations are interlaced) set val [string repeat A 100] ;# 5 items of 100 bytes puts us in the 640 bytes bin, which has 32 regs, so high potential for fragmentation set elements 500000 @@ -543,4 +543,5 @@ start_server {tags {"defrag"} overrides {server-threads 1 active-replica yes} } } {OK} } } -} ;# run solo \ No newline at end of file +} ;# run solo +