diff --git a/include/rapidjson/allocators.h b/include/rapidjson/allocators.h index 05e3b0c..8cde8f4 100644 --- a/include/rapidjson/allocators.h +++ b/include/rapidjson/allocators.h @@ -194,6 +194,9 @@ public: if (newSize == 0) return NULL; + originalSize = RAPIDJSON_ALIGN(originalSize); + newSize = RAPIDJSON_ALIGN(newSize); + // Do not shrink if new size is smaller than original if (originalSize >= newSize) return originalPtr; @@ -201,7 +204,6 @@ public: // Simply expand it if it is the last allocation and there is sufficient space if (originalPtr == reinterpret_cast(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) { size_t increment = static_cast(newSize - originalSize); - increment = RAPIDJSON_ALIGN(increment); if (chunkHead_->size + increment <= chunkHead_->capacity) { chunkHead_->size += increment; return originalPtr; diff --git a/test/unittest/allocatorstest.cpp b/test/unittest/allocatorstest.cpp index f70e672..a5958de 100644 --- a/test/unittest/allocatorstest.cpp +++ b/test/unittest/allocatorstest.cpp @@ -81,3 +81,22 @@ TEST(Allocator, Alignment) { } #endif } + +TEST(Allocator, Issue399) { + MemoryPoolAllocator<> a; + void* p = a.Malloc(100); + void* q = a.Realloc(p, 100, 200); + EXPECT_EQ(p, q); + + // exhuasive testing + for (size_t j = 1; j < 32; j++) { + a.Clear(); + a.Malloc(j); // some unaligned size + p = a.Malloc(1); + for (size_t i = 1; i < 1024; i++) { + q = a.Realloc(p, i, i + 1); + EXPECT_EQ(p, q); + p = q; + } + } +}