Provide StdAllocator, STL compatible, for use with STL types.

This commit is contained in:
ylavic 2021-03-12 15:16:02 +01:00
parent d51dd2d0e9
commit 49e4dd619f

View File

@ -17,6 +17,8 @@
#include "rapidjson.h"
#include <memory>
RAPIDJSON_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
@ -89,7 +91,14 @@ public:
}
return RAPIDJSON_REALLOC(originalPtr, newSize);
}
static void Free(void *ptr) { RAPIDJSON_FREE(ptr); }
static void Free(void *ptr) RAPIDJSON_NOEXCEPT { RAPIDJSON_FREE(ptr); }
bool operator==(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
return true;
}
bool operator!=(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
return false;
}
};
///////////////////////////////////////////////////////////////////////////////
@ -113,6 +122,36 @@ public:
*/
template <typename BaseAllocator = CrtAllocator>
class MemoryPoolAllocator {
//! Chunk header for perpending to each chunk.
/*! Chunks are stored as a singly linked list.
*/
struct ChunkHeader {
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
size_t size; //!< Current size of allocated memory in bytes.
ChunkHeader *next; //!< Next chunk in the linked list.
};
struct SharedData {
ChunkHeader *chunkHead; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
BaseAllocator* ownBaseAllocator; //!< base allocator created by this object.
size_t refcount;
bool ownBuffer;
};
static const size_t SIZEOF_SHARED_DATA = RAPIDJSON_ALIGN(sizeof(SharedData));
static const size_t SIZEOF_CHUNK_HEADER = RAPIDJSON_ALIGN(sizeof(ChunkHeader));
static inline ChunkHeader *GetChunkHead(SharedData *shared)
{
return reinterpret_cast<ChunkHeader*>(reinterpret_cast<uint8_t*>(shared) + SIZEOF_SHARED_DATA);
}
static inline uint8_t *GetChunkBuffer(SharedData *shared)
{
return reinterpret_cast<uint8_t*>(shared->chunkHead) + SIZEOF_CHUNK_HEADER;
}
static const size_t kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
public:
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
@ -120,9 +159,26 @@ public:
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks.
*/
explicit
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
chunk_capacity_(chunkSize),
baseAllocator_(baseAllocator ? baseAllocator : RAPIDJSON_NEW(BaseAllocator)()),
shared_(static_cast<SharedData*>(baseAllocator_ ? baseAllocator_->Malloc(SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER) : 0))
{
RAPIDJSON_ASSERT(baseAllocator_ != 0);
RAPIDJSON_ASSERT(shared_ != 0);
if (baseAllocator) {
shared_->ownBaseAllocator = 0;
}
else {
shared_->ownBaseAllocator = baseAllocator_;
}
shared_->chunkHead = GetChunkHead(shared_);
shared_->chunkHead->capacity = 0;
shared_->chunkHead->size = 0;
shared_->chunkHead->next = 0;
shared_->ownBuffer = true;
shared_->refcount = 1;
}
//! Constructor with user-supplied buffer.
@ -136,41 +192,77 @@ public:
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
chunk_capacity_(chunkSize),
baseAllocator_(baseAllocator),
shared_(static_cast<SharedData*>(AlignBuffer(buffer, size)))
{
RAPIDJSON_ASSERT(buffer != 0);
RAPIDJSON_ASSERT(size > sizeof(ChunkHeader));
chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
chunkHead_->capacity = size - sizeof(ChunkHeader);
chunkHead_->size = 0;
chunkHead_->next = 0;
RAPIDJSON_ASSERT(size >= SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER);
shared_->chunkHead = GetChunkHead(shared_);
shared_->chunkHead->capacity = size - SIZEOF_SHARED_DATA - SIZEOF_CHUNK_HEADER;
shared_->chunkHead->size = 0;
shared_->chunkHead->next = 0;
shared_->ownBaseAllocator = 0;
shared_->ownBuffer = false;
shared_->refcount = 1;
}
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT :
chunk_capacity_(rhs.chunk_capacity_),
baseAllocator_(rhs.baseAllocator_),
shared_(rhs.shared_)
{
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
++shared_->refcount;
}
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT
{
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
++rhs.shared_->refcount;
this->~MemoryPoolAllocator();
baseAllocator_ = rhs.baseAllocator_;
chunk_capacity_ = rhs.chunk_capacity_;
shared_ = rhs.shared_;
return *this;
}
//! Destructor.
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
*/
~MemoryPoolAllocator() {
~MemoryPoolAllocator() RAPIDJSON_NOEXCEPT {
if (shared_->refcount > 1) {
--shared_->refcount;
return;
}
Clear();
RAPIDJSON_DELETE(ownBaseAllocator_);
BaseAllocator *a = shared_->ownBaseAllocator;
if (shared_->ownBuffer) {
baseAllocator_->Free(shared_);
}
RAPIDJSON_DELETE(a);
}
//! Deallocates all memory chunks, excluding the user-supplied buffer.
void Clear() {
while (chunkHead_ && chunkHead_ != userBuffer_) {
ChunkHeader* next = chunkHead_->next;
baseAllocator_->Free(chunkHead_);
chunkHead_ = next;
//! Deallocates all memory chunks, excluding the first/user one.
void Clear() RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
for (;;) {
ChunkHeader* c = shared_->chunkHead;
if (!c->next) {
break;
}
shared_->chunkHead = c->next;
baseAllocator_->Free(c);
}
if (chunkHead_ && chunkHead_ == userBuffer_)
chunkHead_->size = 0; // Clear user buffer
shared_->chunkHead->size = 0;
}
//! Computes the total capacity of allocated memory chunks.
/*! \return total capacity in bytes.
*/
size_t Capacity() const {
size_t Capacity() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t capacity = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
capacity += c->capacity;
return capacity;
}
@ -178,25 +270,35 @@ public:
//! Computes the memory blocks allocated.
/*! \return total used bytes.
*/
size_t Size() const {
size_t Size() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t size = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
size += c->size;
return size;
}
//! Whether the allocator is shared.
/*! \return true or false.
*/
bool Shared() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
return shared_->refcount > 1;
}
//! Allocates a memory block. (concept Allocator)
void* Malloc(size_t size) {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (!size)
return NULL;
size = RAPIDJSON_ALIGN(size);
if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
if (RAPIDJSON_UNLIKELY(shared_->chunkHead->size + size > shared_->chunkHead->capacity))
if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
return NULL;
void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size;
chunkHead_->size += size;
void *buffer = GetChunkBuffer(shared_) + shared_->chunkHead->size;
shared_->chunkHead->size += size;
return buffer;
}
@ -205,6 +307,7 @@ public:
if (originalPtr == 0)
return Malloc(newSize);
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (newSize == 0)
return NULL;
@ -216,10 +319,10 @@ public:
return originalPtr;
// Simply expand it if it is the last allocation and there is sufficient space
if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
if (originalPtr == GetChunkBuffer(shared_) + shared_->chunkHead->size - originalSize) {
size_t increment = static_cast<size_t>(newSize - originalSize);
if (chunkHead_->size + increment <= chunkHead_->capacity) {
chunkHead_->size += increment;
if (shared_->chunkHead->size + increment <= shared_->chunkHead->capacity) {
shared_->chunkHead->size += increment;
return originalPtr;
}
}
@ -235,50 +338,264 @@ public:
}
//! Frees a memory block (concept Allocator)
static void Free(void *ptr) { (void)ptr; } // Do nothing
static void Free(void *ptr) RAPIDJSON_NOEXCEPT { (void)ptr; } // Do nothing
//! Compare (equality) with another MemoryPoolAllocator
bool operator==(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
return shared_ == rhs.shared_;
}
//! Compare (inequality) with another MemoryPoolAllocator
bool operator!=(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
return !operator==(rhs);
}
private:
//! Copy constructor is not permitted.
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
//! Copy assignment operator is not permitted.
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
//! Creates a new chunk.
/*! \param capacity Capacity of the chunk in bytes.
\return true if success.
*/
bool AddChunk(size_t capacity) {
if (!baseAllocator_)
ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) {
shared_->ownBaseAllocator = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
if (ChunkHeader* chunk = static_cast<ChunkHeader*>(baseAllocator_->Malloc(SIZEOF_CHUNK_HEADER + capacity))) {
chunk->capacity = capacity;
chunk->size = 0;
chunk->next = chunkHead_;
chunkHead_ = chunk;
chunk->next = shared_->chunkHead;
shared_->chunkHead = chunk;
return true;
}
else
return false;
}
static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
static inline void* AlignBuffer(void* buf, size_t &size)
{
RAPIDJSON_NOEXCEPT_ASSERT(buf != 0);
const uintptr_t mask = sizeof(void*) - 1;
const uintptr_t ubuf = reinterpret_cast<uintptr_t>(buf);
if (RAPIDJSON_UNLIKELY(ubuf & mask)) {
const uintptr_t abuf = (ubuf + mask) & ~mask;
RAPIDJSON_ASSERT(size >= abuf - ubuf);
buf = reinterpret_cast<void*>(abuf);
size -= abuf - ubuf;
}
return buf;
}
//! Chunk header for perpending to each chunk.
/*! Chunks are stored as a singly linked list.
*/
struct ChunkHeader {
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
size_t size; //!< Current size of allocated memory in bytes.
ChunkHeader *next; //!< Next chunk in the linked list.
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
SharedData *shared_; //!< The shared data of the allocator
};
template<typename T, typename A>
inline T* Realloc(A& a, T* old_p, size_t old_n, size_t new_n)
{
RAPIDJSON_NOEXCEPT_ASSERT(old_n <= SIZE_MAX / sizeof(T) && new_n <= SIZE_MAX / sizeof(T));
return static_cast<T*>(a.Realloc(old_p, old_n * sizeof(T), new_n * sizeof(T)));
}
template<typename T, typename A>
inline T *Malloc(A& a, size_t n = 1)
{
return Realloc<T, A>(a, NULL, 0, n);
}
template<typename T, typename A>
inline void Free(A& a, T *p, size_t n = 1)
{
static_cast<void>(Realloc<T, A>(a, p, n, 0));
}
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++) // std::allocator can safely be inherited
#endif
template <typename T, typename BaseAllocator = CrtAllocator>
class StdAllocator :
public std::allocator<T>
{
typedef std::allocator<T> allocator_type;
public:
typedef BaseAllocator BaseAllocatorType;
StdAllocator() RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_()
{ }
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
template<typename U>
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
/* implicit */
StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_(allocator)
{ }
~StdAllocator() RAPIDJSON_NOEXCEPT
{ }
typedef typename allocator_type::value_type value_type;
typedef typename allocator_type::pointer pointer;
typedef typename allocator_type::const_pointer const_pointer;
typedef typename allocator_type::reference reference;
typedef typename allocator_type::const_reference const_reference;
typedef typename allocator_type::size_type size_type;
typedef typename allocator_type::difference_type difference_type;
template<typename U>
struct rebind {
typedef StdAllocator<U, BaseAllocator> other;
};
ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
void *userBuffer_; //!< User supplied buffer.
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object.
#if RAPIDJSON_HAS_CXX11
using allocator_type::max_size;
using allocator_type::address;
using allocator_type::construct;
using allocator_type::destroy;
#else
size_t max_size() const RAPIDJSON_NOEXCEPT
{
return allocator_type::max_size();
}
pointer address(reference r) const RAPIDJSON_NOEXCEPT
{
return allocator_type::address(r);
}
const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
{
return allocator_type::address(r);
}
void construct(pointer p, const_reference r)
{
allocator_type::construct(p, r);
}
void destroy(pointer p)
{
allocator_type::destroy(p);
}
#endif
template <typename U>
U* allocate(size_type n = 1, const void* = 0)
{
return RAPIDJSON_NAMESPACE::Malloc<U>(baseAllocator_, n);
}
template <typename U>
void deallocate(U* p, size_type n = 1)
{
RAPIDJSON_NAMESPACE::Free<U>(baseAllocator_, p, n);
}
pointer allocate(size_type n = 1, const void* = 0)
{
return allocate<value_type>(n);
}
void deallocate(pointer p, size_type n = 1)
{
deallocate<value_type>(p, n);
}
template<typename U>
bool operator==(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
{
return baseAllocator_ == rhs.baseAllocator_;
}
template<typename U>
bool operator!=(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
{
return !operator==(rhs);
}
//! rapidjson Allocator concept
void* Malloc(size_t size)
{
return baseAllocator_.Malloc(size);
}
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize)
{
return baseAllocator_.Realloc(originalPtr, originalSize, newSize);
}
static void Free(void *ptr) RAPIDJSON_NOEXCEPT
{
BaseAllocator::Free(ptr);
}
private:
template <typename, typename>
friend class StdAllocator; // access to StdAllocator<!T>.*
BaseAllocator baseAllocator_;
};
template <typename BaseAllocator>
class StdAllocator<void, BaseAllocator> :
public std::allocator<void>
{
typedef std::allocator<void> allocator_type;
public:
typedef BaseAllocator BaseAllocatorType;
StdAllocator() RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_()
{ }
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
template<typename U>
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
/* implicit */
StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_(allocator)
{ }
~StdAllocator() RAPIDJSON_NOEXCEPT
{ }
typedef typename allocator_type::value_type value_type;
template<typename U>
struct rebind {
typedef StdAllocator<U, BaseAllocator> other;
};
private:
template <typename, typename>
friend class StdAllocator; // access to StdAllocator<!T>.*
BaseAllocator baseAllocator_;
};
#ifdef __GNUC__
RAPIDJSON_DIAG_POP
#endif
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ENCODINGS_H_