created batch allocator that leverages background worker pool

This commit is contained in:
Francisco 2021-02-25 14:38:40 +00:00 committed by Francisco Paisana
parent d41b6187c1
commit 20928651c7
5 changed files with 104 additions and 13 deletions

View File

@ -13,6 +13,7 @@
#ifndef SRSLTE_MEM_POOL_H
#define SRSLTE_MEM_POOL_H
#include "srslte/common/thread_pool.h"
#include <cassert>
#include <cstdint>
#include <memory>
@ -196,6 +197,96 @@ public:
}
};
/**
* Pool specialized for in allocating batches of objects in a preemptive way in a background thread to minimize latency.
* Note: Current implementation assumes that the pool object will outlive the background callbacks to allocate new
* batches
* @tparam T individual object type that is being allocated
* @tparam BatchSize number of T objects in a batch
* @tparam ThresholdSize number of T objects below which a new batch needs to be allocated
*/
template <typename T, size_t BatchSize, size_t ThresholdSize>
class background_allocator_obj_pool
{
static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive");
static_assert(BatchSize > 1, "BatchSize needs to be higher than 1");
public:
background_allocator_obj_pool(bool lazy_start = false)
{
if (not lazy_start) {
allocate_batch_in_background();
}
}
background_allocator_obj_pool(background_allocator_obj_pool&&) = delete;
background_allocator_obj_pool(const background_allocator_obj_pool&) = delete;
background_allocator_obj_pool& operator=(background_allocator_obj_pool&&) = delete;
background_allocator_obj_pool& operator=(const background_allocator_obj_pool&) = delete;
~background_allocator_obj_pool()
{
std::lock_guard<std::mutex> lock(mutex);
batches.clear();
}
/// alloc new object space. If no memory is pre-reserved in the pool, malloc is called to allocate new batch.
void* allocate_node(size_t sz)
{
assert(sz == sizeof(T));
std::lock_guard<std::mutex> lock(mutex);
uint8_t* block = obj_cache.try_pop();
if (block != nullptr) {
// allocation successful
if (obj_cache.size() < ThresholdSize) {
get_background_workers().push_task([this]() {
std::lock_guard<std::mutex> lock(mutex);
allocate_batch_();
});
}
return block;
}
// try allocation of new batch in same thread as caller.
allocate_batch_();
return obj_cache.try_pop();
}
void deallocate_node(void* p)
{
std::lock_guard<std::mutex> lock(mutex);
if (p != nullptr) {
obj_cache.push(static_cast<uint8_t*>(p));
}
}
void allocate_batch_in_background()
{
get_background_workers().push_task([this]() {
std::lock_guard<std::mutex> lock(mutex);
allocate_batch_();
});
}
private:
using obj_storage_t = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
using batch_obj_t = std::array<obj_storage_t, BatchSize>;
/// Unprotected allocation of new Batch of Objects
void allocate_batch_()
{
batches.emplace_back(new batch_obj_t());
batch_obj_t& batch = *batches.back();
for (obj_storage_t& obj_store : batch) {
obj_cache.push(reinterpret_cast<uint8_t*>(&obj_store));
}
}
// memory stack to cache allocate memory chunks
std::mutex mutex;
memblock_stack obj_cache;
std::vector<std::unique_ptr<batch_obj_t> > batches;
};
} // namespace srslte
#endif // SRSLTE_MEM_POOL_H

View File

@ -200,8 +200,6 @@ private:
void rem_user_thread(uint16_t rnti);
std::mutex paging_mutex;
static srslte::big_obj_pool<ue, false> ue_pool;
};
} // namespace srsenb

View File

@ -119,6 +119,8 @@ public:
void operator delete(void* ptr)noexcept;
void operator delete[](void* ptr) = delete;
static srslte::background_allocator_obj_pool<ue, 16, 4>* get_ue_pool();
private:
// args
srslte::timer_handler::unique_timer activity_timer;

View File

@ -35,7 +35,7 @@ rrc::rrc(srslte::task_sched_handle task_sched_) :
logger(srslog::fetch_basic_logger("RRC")), task_sched(task_sched_), rx_pdu_queue(64)
{
pending_paging.clear();
ue_pool.reserve(16);
rrc::ue::get_ue_pool()->allocate_batch_in_background();
}
rrc::~rrc() {}
@ -1019,7 +1019,4 @@ void rrc::tti_clock()
}
}
// definition of rrc static member
srslte::big_obj_pool<rrc::ue, false> rrc::ue_pool;
} // namespace srsenb

View File

@ -63,18 +63,21 @@ int rrc::ue::init()
return SRSLTE_SUCCESS;
}
srslte::background_allocator_obj_pool<rrc::ue, 16, 4>* rrc::ue::get_ue_pool()
{
// Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore,
// will only be initialized if we instantiate an eNB
static srslte::background_allocator_obj_pool<rrc::ue, 16, 4> ue_pool(true);
return &ue_pool;
}
void* rrc::ue::operator new(size_t sz)
{
assert(sz == sizeof(ue));
void* memchunk = rrc::ue_pool.allocate_node(sz);
if (ue_pool.capacity() <= 4) {
srslte::get_background_workers().push_task([]() { rrc::ue_pool.reserve(4); });
}
return memchunk;
return rrc::ue::get_ue_pool()->allocate_node(sz);
}
void rrc::ue::operator delete(void* ptr)noexcept
{
rrc::ue_pool.deallocate_node(ptr);
rrc::ue::get_ue_pool()->deallocate_node(ptr);
}
rrc_state_t rrc::ue::get_state()