Provide StdAllocator, STL compatible, for use with STL types.

This commit is contained in:
ylavic 2021-03-12 15:16:02 +01:00
parent d51dd2d0e9
commit 49e4dd619f

View File

@ -17,6 +17,8 @@
#include "rapidjson.h" #include "rapidjson.h"
#include <memory>
RAPIDJSON_NAMESPACE_BEGIN RAPIDJSON_NAMESPACE_BEGIN
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
@ -89,7 +91,14 @@ public:
} }
return RAPIDJSON_REALLOC(originalPtr, newSize); return RAPIDJSON_REALLOC(originalPtr, newSize);
} }
static void Free(void *ptr) { RAPIDJSON_FREE(ptr); } static void Free(void *ptr) RAPIDJSON_NOEXCEPT { RAPIDJSON_FREE(ptr); }
bool operator==(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
return true;
}
bool operator!=(const CrtAllocator&) const RAPIDJSON_NOEXCEPT {
return false;
}
}; };
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
@ -113,6 +122,36 @@ public:
*/ */
template <typename BaseAllocator = CrtAllocator> template <typename BaseAllocator = CrtAllocator>
class MemoryPoolAllocator { class MemoryPoolAllocator {
//! Chunk header for perpending to each chunk.
/*! Chunks are stored as a singly linked list.
*/
struct ChunkHeader {
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
size_t size; //!< Current size of allocated memory in bytes.
ChunkHeader *next; //!< Next chunk in the linked list.
};
struct SharedData {
ChunkHeader *chunkHead; //!< Head of the chunk linked-list. Only the head chunk serves allocation.
BaseAllocator* ownBaseAllocator; //!< base allocator created by this object.
size_t refcount;
bool ownBuffer;
};
static const size_t SIZEOF_SHARED_DATA = RAPIDJSON_ALIGN(sizeof(SharedData));
static const size_t SIZEOF_CHUNK_HEADER = RAPIDJSON_ALIGN(sizeof(ChunkHeader));
static inline ChunkHeader *GetChunkHead(SharedData *shared)
{
return reinterpret_cast<ChunkHeader*>(reinterpret_cast<uint8_t*>(shared) + SIZEOF_SHARED_DATA);
}
static inline uint8_t *GetChunkBuffer(SharedData *shared)
{
return reinterpret_cast<uint8_t*>(shared->chunkHead) + SIZEOF_CHUNK_HEADER;
}
static const size_t kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity.
public: public:
static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator) static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator)
@ -120,9 +159,26 @@ public:
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. /*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks. \param baseAllocator The allocator for allocating memory chunks.
*/ */
explicit
MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0) chunk_capacity_(chunkSize),
baseAllocator_(baseAllocator ? baseAllocator : RAPIDJSON_NEW(BaseAllocator)()),
shared_(static_cast<SharedData*>(baseAllocator_ ? baseAllocator_->Malloc(SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER) : 0))
{ {
RAPIDJSON_ASSERT(baseAllocator_ != 0);
RAPIDJSON_ASSERT(shared_ != 0);
if (baseAllocator) {
shared_->ownBaseAllocator = 0;
}
else {
shared_->ownBaseAllocator = baseAllocator_;
}
shared_->chunkHead = GetChunkHead(shared_);
shared_->chunkHead->capacity = 0;
shared_->chunkHead->size = 0;
shared_->chunkHead->next = 0;
shared_->ownBuffer = true;
shared_->refcount = 1;
} }
//! Constructor with user-supplied buffer. //! Constructor with user-supplied buffer.
@ -136,41 +192,77 @@ public:
\param baseAllocator The allocator for allocating memory chunks. \param baseAllocator The allocator for allocating memory chunks.
*/ */
MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0) chunk_capacity_(chunkSize),
baseAllocator_(baseAllocator),
shared_(static_cast<SharedData*>(AlignBuffer(buffer, size)))
{ {
RAPIDJSON_ASSERT(buffer != 0); RAPIDJSON_ASSERT(size >= SIZEOF_SHARED_DATA + SIZEOF_CHUNK_HEADER);
RAPIDJSON_ASSERT(size > sizeof(ChunkHeader)); shared_->chunkHead = GetChunkHead(shared_);
chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer); shared_->chunkHead->capacity = size - SIZEOF_SHARED_DATA - SIZEOF_CHUNK_HEADER;
chunkHead_->capacity = size - sizeof(ChunkHeader); shared_->chunkHead->size = 0;
chunkHead_->size = 0; shared_->chunkHead->next = 0;
chunkHead_->next = 0; shared_->ownBaseAllocator = 0;
shared_->ownBuffer = false;
shared_->refcount = 1;
}
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT :
chunk_capacity_(rhs.chunk_capacity_),
baseAllocator_(rhs.baseAllocator_),
shared_(rhs.shared_)
{
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
++shared_->refcount;
}
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) RAPIDJSON_NOEXCEPT
{
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
++rhs.shared_->refcount;
this->~MemoryPoolAllocator();
baseAllocator_ = rhs.baseAllocator_;
chunk_capacity_ = rhs.chunk_capacity_;
shared_ = rhs.shared_;
return *this;
} }
//! Destructor. //! Destructor.
/*! This deallocates all memory chunks, excluding the user-supplied buffer. /*! This deallocates all memory chunks, excluding the user-supplied buffer.
*/ */
~MemoryPoolAllocator() { ~MemoryPoolAllocator() RAPIDJSON_NOEXCEPT {
if (shared_->refcount > 1) {
--shared_->refcount;
return;
}
Clear(); Clear();
RAPIDJSON_DELETE(ownBaseAllocator_); BaseAllocator *a = shared_->ownBaseAllocator;
if (shared_->ownBuffer) {
baseAllocator_->Free(shared_);
}
RAPIDJSON_DELETE(a);
} }
//! Deallocates all memory chunks, excluding the user-supplied buffer. //! Deallocates all memory chunks, excluding the first/user one.
void Clear() { void Clear() RAPIDJSON_NOEXCEPT {
while (chunkHead_ && chunkHead_ != userBuffer_) { RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
ChunkHeader* next = chunkHead_->next; for (;;) {
baseAllocator_->Free(chunkHead_); ChunkHeader* c = shared_->chunkHead;
chunkHead_ = next; if (!c->next) {
break;
}
shared_->chunkHead = c->next;
baseAllocator_->Free(c);
} }
if (chunkHead_ && chunkHead_ == userBuffer_) shared_->chunkHead->size = 0;
chunkHead_->size = 0; // Clear user buffer
} }
//! Computes the total capacity of allocated memory chunks. //! Computes the total capacity of allocated memory chunks.
/*! \return total capacity in bytes. /*! \return total capacity in bytes.
*/ */
size_t Capacity() const { size_t Capacity() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t capacity = 0; size_t capacity = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
capacity += c->capacity; capacity += c->capacity;
return capacity; return capacity;
} }
@ -178,25 +270,35 @@ public:
//! Computes the memory blocks allocated. //! Computes the memory blocks allocated.
/*! \return total used bytes. /*! \return total used bytes.
*/ */
size_t Size() const { size_t Size() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
size_t size = 0; size_t size = 0;
for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) for (ChunkHeader* c = shared_->chunkHead; c != 0; c = c->next)
size += c->size; size += c->size;
return size; return size;
} }
//! Whether the allocator is shared.
/*! \return true or false.
*/
bool Shared() const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
return shared_->refcount > 1;
}
//! Allocates a memory block. (concept Allocator) //! Allocates a memory block. (concept Allocator)
void* Malloc(size_t size) { void* Malloc(size_t size) {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (!size) if (!size)
return NULL; return NULL;
size = RAPIDJSON_ALIGN(size); size = RAPIDJSON_ALIGN(size);
if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity) if (RAPIDJSON_UNLIKELY(shared_->chunkHead->size + size > shared_->chunkHead->capacity))
if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size)) if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
return NULL; return NULL;
void *buffer = reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size; void *buffer = GetChunkBuffer(shared_) + shared_->chunkHead->size;
chunkHead_->size += size; shared_->chunkHead->size += size;
return buffer; return buffer;
} }
@ -205,6 +307,7 @@ public:
if (originalPtr == 0) if (originalPtr == 0)
return Malloc(newSize); return Malloc(newSize);
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
if (newSize == 0) if (newSize == 0)
return NULL; return NULL;
@ -216,10 +319,10 @@ public:
return originalPtr; return originalPtr;
// Simply expand it if it is the last allocation and there is sufficient space // Simply expand it if it is the last allocation and there is sufficient space
if (originalPtr == reinterpret_cast<char *>(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) { if (originalPtr == GetChunkBuffer(shared_) + shared_->chunkHead->size - originalSize) {
size_t increment = static_cast<size_t>(newSize - originalSize); size_t increment = static_cast<size_t>(newSize - originalSize);
if (chunkHead_->size + increment <= chunkHead_->capacity) { if (shared_->chunkHead->size + increment <= shared_->chunkHead->capacity) {
chunkHead_->size += increment; shared_->chunkHead->size += increment;
return originalPtr; return originalPtr;
} }
} }
@ -235,50 +338,264 @@ public:
} }
//! Frees a memory block (concept Allocator) //! Frees a memory block (concept Allocator)
static void Free(void *ptr) { (void)ptr; } // Do nothing static void Free(void *ptr) RAPIDJSON_NOEXCEPT { (void)ptr; } // Do nothing
//! Compare (equality) with another MemoryPoolAllocator
bool operator==(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
RAPIDJSON_NOEXCEPT_ASSERT(shared_->refcount > 0);
RAPIDJSON_NOEXCEPT_ASSERT(rhs.shared_->refcount > 0);
return shared_ == rhs.shared_;
}
//! Compare (inequality) with another MemoryPoolAllocator
bool operator!=(const MemoryPoolAllocator& rhs) const RAPIDJSON_NOEXCEPT {
return !operator==(rhs);
}
private: private:
//! Copy constructor is not permitted.
MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */;
//! Copy assignment operator is not permitted.
MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */;
//! Creates a new chunk. //! Creates a new chunk.
/*! \param capacity Capacity of the chunk in bytes. /*! \param capacity Capacity of the chunk in bytes.
\return true if success. \return true if success.
*/ */
bool AddChunk(size_t capacity) { bool AddChunk(size_t capacity) {
if (!baseAllocator_) if (!baseAllocator_)
ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)(); shared_->ownBaseAllocator = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)();
if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) { if (ChunkHeader* chunk = static_cast<ChunkHeader*>(baseAllocator_->Malloc(SIZEOF_CHUNK_HEADER + capacity))) {
chunk->capacity = capacity; chunk->capacity = capacity;
chunk->size = 0; chunk->size = 0;
chunk->next = chunkHead_; chunk->next = shared_->chunkHead;
chunkHead_ = chunk; shared_->chunkHead = chunk;
return true; return true;
} }
else else
return false; return false;
} }
static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity. static inline void* AlignBuffer(void* buf, size_t &size)
{
RAPIDJSON_NOEXCEPT_ASSERT(buf != 0);
const uintptr_t mask = sizeof(void*) - 1;
const uintptr_t ubuf = reinterpret_cast<uintptr_t>(buf);
if (RAPIDJSON_UNLIKELY(ubuf & mask)) {
const uintptr_t abuf = (ubuf + mask) & ~mask;
RAPIDJSON_ASSERT(size >= abuf - ubuf);
buf = reinterpret_cast<void*>(abuf);
size -= abuf - ubuf;
}
return buf;
}
//! Chunk header for perpending to each chunk. size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated.
/*! Chunks are stored as a singly linked list. BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks.
*/ SharedData *shared_; //!< The shared data of the allocator
struct ChunkHeader { };
size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself).
size_t size; //!< Current size of allocated memory in bytes.
ChunkHeader *next; //!< Next chunk in the linked list. template<typename T, typename A>
inline T* Realloc(A& a, T* old_p, size_t old_n, size_t new_n)
{
RAPIDJSON_NOEXCEPT_ASSERT(old_n <= SIZE_MAX / sizeof(T) && new_n <= SIZE_MAX / sizeof(T));
return static_cast<T*>(a.Realloc(old_p, old_n * sizeof(T), new_n * sizeof(T)));
}
template<typename T, typename A>
inline T *Malloc(A& a, size_t n = 1)
{
return Realloc<T, A>(a, NULL, 0, n);
}
template<typename T, typename A>
inline void Free(A& a, T *p, size_t n = 1)
{
static_cast<void>(Realloc<T, A>(a, p, n, 0));
}
#ifdef __GNUC__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(effc++) // std::allocator can safely be inherited
#endif
template <typename T, typename BaseAllocator = CrtAllocator>
class StdAllocator :
public std::allocator<T>
{
typedef std::allocator<T> allocator_type;
public:
typedef BaseAllocator BaseAllocatorType;
StdAllocator() RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_()
{ }
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
template<typename U>
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
/* implicit */
StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_(allocator)
{ }
~StdAllocator() RAPIDJSON_NOEXCEPT
{ }
typedef typename allocator_type::value_type value_type;
typedef typename allocator_type::pointer pointer;
typedef typename allocator_type::const_pointer const_pointer;
typedef typename allocator_type::reference reference;
typedef typename allocator_type::const_reference const_reference;
typedef typename allocator_type::size_type size_type;
typedef typename allocator_type::difference_type difference_type;
template<typename U>
struct rebind {
typedef StdAllocator<U, BaseAllocator> other;
}; };
ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation. #if RAPIDJSON_HAS_CXX11
size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated. using allocator_type::max_size;
void *userBuffer_; //!< User supplied buffer. using allocator_type::address;
BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks. using allocator_type::construct;
BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object. using allocator_type::destroy;
#else
size_t max_size() const RAPIDJSON_NOEXCEPT
{
return allocator_type::max_size();
}
pointer address(reference r) const RAPIDJSON_NOEXCEPT
{
return allocator_type::address(r);
}
const_pointer address(const_reference r) const RAPIDJSON_NOEXCEPT
{
return allocator_type::address(r);
}
void construct(pointer p, const_reference r)
{
allocator_type::construct(p, r);
}
void destroy(pointer p)
{
allocator_type::destroy(p);
}
#endif
template <typename U>
U* allocate(size_type n = 1, const void* = 0)
{
return RAPIDJSON_NAMESPACE::Malloc<U>(baseAllocator_, n);
}
template <typename U>
void deallocate(U* p, size_type n = 1)
{
RAPIDJSON_NAMESPACE::Free<U>(baseAllocator_, p, n);
}
pointer allocate(size_type n = 1, const void* = 0)
{
return allocate<value_type>(n);
}
void deallocate(pointer p, size_type n = 1)
{
deallocate<value_type>(p, n);
}
template<typename U>
bool operator==(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
{
return baseAllocator_ == rhs.baseAllocator_;
}
template<typename U>
bool operator!=(const StdAllocator<U, BaseAllocator>& rhs) const RAPIDJSON_NOEXCEPT
{
return !operator==(rhs);
}
//! rapidjson Allocator concept
void* Malloc(size_t size)
{
return baseAllocator_.Malloc(size);
}
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize)
{
return baseAllocator_.Realloc(originalPtr, originalSize, newSize);
}
static void Free(void *ptr) RAPIDJSON_NOEXCEPT
{
BaseAllocator::Free(ptr);
}
private:
template <typename, typename>
friend class StdAllocator; // access to StdAllocator<!T>.*
BaseAllocator baseAllocator_;
}; };
template <typename BaseAllocator>
class StdAllocator<void, BaseAllocator> :
public std::allocator<void>
{
typedef std::allocator<void> allocator_type;
public:
typedef BaseAllocator BaseAllocatorType;
StdAllocator() RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_()
{ }
StdAllocator(const StdAllocator& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
template<typename U>
StdAllocator(const StdAllocator<U, BaseAllocator>& rhs) RAPIDJSON_NOEXCEPT :
allocator_type(rhs),
baseAllocator_(rhs.baseAllocator_)
{ }
/* implicit */
StdAllocator(const BaseAllocator& allocator) RAPIDJSON_NOEXCEPT :
allocator_type(),
baseAllocator_(allocator)
{ }
~StdAllocator() RAPIDJSON_NOEXCEPT
{ }
typedef typename allocator_type::value_type value_type;
template<typename U>
struct rebind {
typedef StdAllocator<U, BaseAllocator> other;
};
private:
template <typename, typename>
friend class StdAllocator; // access to StdAllocator<!T>.*
BaseAllocator baseAllocator_;
};
#ifdef __GNUC__
RAPIDJSON_DIAG_POP
#endif
RAPIDJSON_NAMESPACE_END RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_ENCODINGS_H_ #endif // RAPIDJSON_ENCODINGS_H_