LockedPool: avoid quadratic-time allocation

Use separate maps for used/free chunks to avoid linear scan through alloced
chunks for each alloc.
This commit is contained in:
Kaz Wesley 2016-11-02 14:09:03 -07:00
parent 0b59f80625
commit b3ddc5e76f
3 changed files with 59 additions and 89 deletions

View File

@ -26,6 +26,8 @@
#include <unistd.h> // for sysconf #include <unistd.h> // for sysconf
#endif #endif
#include <algorithm>
LockedPoolManager* LockedPoolManager::_instance = NULL; LockedPoolManager* LockedPoolManager::_instance = NULL;
std::once_flag LockedPoolManager::init_flag; std::once_flag LockedPoolManager::init_flag;
@ -45,7 +47,7 @@ Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in) base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
{ {
// Start with one free chunk that covers the entire arena // Start with one free chunk that covers the entire arena
chunks.emplace(base, Chunk(size_in, false)); chunks_free.emplace(base, size_in);
} }
Arena::~Arena() Arena::~Arena()
@ -57,24 +59,30 @@ void* Arena::alloc(size_t size)
// Round to next multiple of alignment // Round to next multiple of alignment
size = align_up(size, alignment); size = align_up(size, alignment);
// Don't handle zero-sized chunks, or those bigger than MAX_SIZE // Don't handle zero-sized chunks
if (size == 0 || size >= Chunk::MAX_SIZE) { if (size == 0)
return nullptr; return nullptr;
}
for (auto& chunk: chunks) { // Pick a large enough free-chunk
if (!chunk.second.isInUse() && size <= chunk.second.getSize()) { auto it = std::find_if(chunks_free.begin(), chunks_free.end(),
char* _base = chunk.first; [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; });
size_t leftover = chunk.second.getSize() - size; if (it == chunks_free.end())
if (leftover > 0) { // Split chunk return nullptr;
chunks.emplace(_base + size, Chunk(leftover, false));
chunk.second.setSize(size); // Create the used-chunk, taking its space from the end of the free-chunk
} auto alloced = chunks_used.emplace(it->first + it->second - size, size).first;
chunk.second.setInUse(true); if (!(it->second -= size))
return reinterpret_cast<void*>(_base); chunks_free.erase(it);
} return reinterpret_cast<void*>(alloced->first);
}
/* extend the Iterator if other begins at its end */
template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) {
if (it->first + it->second == other.first) {
it->second += other.second;
return true;
} }
return nullptr; return false;
} }
void Arena::free(void *ptr) void Arena::free(void *ptr)
@ -83,65 +91,49 @@ void Arena::free(void *ptr)
if (ptr == nullptr) { if (ptr == nullptr) {
return; return;
} }
auto i = chunks.find(static_cast<char*>(ptr));
if (i == chunks.end() || !i->second.isInUse()) { // Remove chunk from used map
auto i = chunks_used.find(static_cast<char*>(ptr));
if (i == chunks_used.end()) {
throw std::runtime_error("Arena: invalid or double free"); throw std::runtime_error("Arena: invalid or double free");
} }
auto freed = *i;
chunks_used.erase(i);
i->second.setInUse(false); // Add space to free map, coalescing contiguous chunks
auto next = chunks_free.upper_bound(freed.first);
if (i != chunks.begin()) { // Absorb into previous chunk if exists and free auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next);
auto prev = i; if (prev == chunks_free.end() || !extend(prev, freed))
--prev; prev = chunks_free.emplace_hint(next, freed);
if (!prev->second.isInUse()) { if (next != chunks_free.end() && extend(prev, *next))
// Absorb current chunk size into previous chunk. chunks_free.erase(next);
prev->second.setSize(prev->second.getSize() + i->second.getSize());
// Erase current chunk. Erasing does not invalidate current
// iterators for a map, except for that pointing to the object
// itself, which will be overwritten in the next statement.
chunks.erase(i);
// From here on, the previous chunk is our current chunk.
i = prev;
}
}
auto next = i;
++next;
if (next != chunks.end()) { // Absorb next chunk if exists and free
if (!next->second.isInUse()) {
// Absurb next chunk size into current chunk
i->second.setSize(i->second.getSize() + next->second.getSize());
// Erase next chunk.
chunks.erase(next);
}
}
} }
Arena::Stats Arena::stats() const Arena::Stats Arena::stats() const
{ {
Arena::Stats r; Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; for (const auto& chunk: chunks_used)
for (const auto& chunk: chunks) { r.used += chunk.second;
if (chunk.second.isInUse()) { for (const auto& chunk: chunks_free)
r.used += chunk.second.getSize(); r.free += chunk.second;
r.chunks_used += 1; r.total = r.used + r.free;
} else {
r.free += chunk.second.getSize();
r.chunks_free += 1;
}
r.total += chunk.second.getSize();
}
return r; return r;
} }
#ifdef ARENA_DEBUG #ifdef ARENA_DEBUG
void printchunk(char* base, size_t sz, bool used) {
std::cout <<
"0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
" 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
" 0x" << used << std::endl;
}
void Arena::walk() const void Arena::walk() const
{ {
for (const auto& chunk: chunks) { for (const auto& chunk: chunks_used)
std::cout << printchunk(chunk.first, chunk.second, true);
"0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first << std::cout << std::endl;
" 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() << for (const auto& chunk: chunks_free)
" 0x" << chunk.second.isInUse() << std::endl; printchunk(chunk.first, chunk.second, false);
}
std::cout << std::endl; std::cout << std::endl;
} }
#endif #endif
@ -312,9 +304,7 @@ void LockedPool::free(void *ptr)
LockedPool::Stats LockedPool::stats() const LockedPool::Stats LockedPool::stats() const
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
LockedPool::Stats r; LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0;
r.locked = cumulative_bytes_locked;
for (const auto &arena: arenas) { for (const auto &arena: arenas) {
Arena::Stats i = arena.stats(); Arena::Stats i = arena.stats();
r.used += i.used; r.used += i.used;

View File

@ -50,27 +50,6 @@ public:
Arena(void *base, size_t size, size_t alignment); Arena(void *base, size_t size, size_t alignment);
virtual ~Arena(); virtual ~Arena();
/** A chunk of memory.
*/
struct Chunk
{
/** Most significant bit of size_t. This is used to mark
* in-usedness of chunk.
*/
const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1);
/** Maximum size of a chunk */
const static size_t MAX_SIZE = SIZE_MSB - 1;
Chunk(size_t size_in, bool used_in):
size(size_in | (used_in ? SIZE_MSB : 0)) {}
bool isInUse() const { return size & SIZE_MSB; }
void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); }
size_t getSize() const { return size & ~SIZE_MSB; }
void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; }
private:
size_t size;
};
/** Memory statistics. */ /** Memory statistics. */
struct Stats struct Stats
{ {
@ -112,7 +91,8 @@ private:
/** Map of chunk address to chunk information. This class makes use of the /** Map of chunk address to chunk information. This class makes use of the
* sorted order to merge previous and next chunks during deallocation. * sorted order to merge previous and next chunks during deallocation.
*/ */
std::map<char*, Chunk> chunks; std::map<char*, size_t> chunks_free;
std::map<char*, size_t> chunks_used;
/** Base address of arena */ /** Base address of arena */
char* base; char* base;
/** End address of arena */ /** End address of arena */

View File

@ -39,7 +39,6 @@ BOOST_AUTO_TEST_CASE(arena_tests)
} }
void *a0 = b.alloc(128); void *a0 = b.alloc(128);
BOOST_CHECK(a0 == synth_base); // first allocation must start at beginning
void *a1 = b.alloc(256); void *a1 = b.alloc(256);
void *a2 = b.alloc(512); void *a2 = b.alloc(512);
BOOST_CHECK(b.stats().used == 896); BOOST_CHECK(b.stats().used == 896);
@ -63,8 +62,10 @@ BOOST_AUTO_TEST_CASE(arena_tests)
BOOST_CHECK(b.stats().used == 128); BOOST_CHECK(b.stats().used == 128);
b.free(a3); b.free(a3);
BOOST_CHECK(b.stats().used == 0); BOOST_CHECK(b.stats().used == 0);
BOOST_CHECK_EQUAL(b.stats().chunks_used, 0);
BOOST_CHECK(b.stats().total == synth_size); BOOST_CHECK(b.stats().total == synth_size);
BOOST_CHECK(b.stats().free == synth_size); BOOST_CHECK(b.stats().free == synth_size);
BOOST_CHECK_EQUAL(b.stats().chunks_free, 1);
std::vector<void*> addr; std::vector<void*> addr;
BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr
@ -74,7 +75,6 @@ BOOST_AUTO_TEST_CASE(arena_tests)
// Sweeping allocate all memory // Sweeping allocate all memory
for (int x=0; x<1024; ++x) for (int x=0; x<1024; ++x)
addr.push_back(b.alloc(1024)); addr.push_back(b.alloc(1024));
BOOST_CHECK(addr[0] == synth_base); // first allocation must start at beginning
BOOST_CHECK(b.stats().free == 0); BOOST_CHECK(b.stats().free == 0);
BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr
BOOST_CHECK(b.alloc(0) == nullptr); BOOST_CHECK(b.alloc(0) == nullptr);