From 20928651c7523d76f9bf126c892eae4eb730d426 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 25 Feb 2021 14:38:40 +0000 Subject: [PATCH] created batch allocator that leverages background worker pool --- lib/include/srslte/adt/mem_pool.h | 91 +++++++++++++++++++++++++++++++ srsenb/hdr/stack/rrc/rrc.h | 2 - srsenb/hdr/stack/rrc/rrc_ue.h | 2 + srsenb/src/stack/rrc/rrc.cc | 5 +- srsenb/src/stack/rrc/rrc_ue.cc | 17 +++--- 5 files changed, 104 insertions(+), 13 deletions(-) diff --git a/lib/include/srslte/adt/mem_pool.h b/lib/include/srslte/adt/mem_pool.h index a73de0249..1277e3d8d 100644 --- a/lib/include/srslte/adt/mem_pool.h +++ b/lib/include/srslte/adt/mem_pool.h @@ -13,6 +13,7 @@ #ifndef SRSLTE_MEM_POOL_H #define SRSLTE_MEM_POOL_H +#include "srslte/common/thread_pool.h" #include #include #include @@ -196,6 +197,96 @@ public: } }; +/** + * Pool specialized for in allocating batches of objects in a preemptive way in a background thread to minimize latency. + * Note: Current implementation assumes that the pool object will outlive the background callbacks to allocate new + * batches + * @tparam T individual object type that is being allocated + * @tparam BatchSize number of T objects in a batch + * @tparam ThresholdSize number of T objects below which a new batch needs to be allocated + */ +template +class background_allocator_obj_pool +{ + static_assert(ThresholdSize > 0, "ThresholdSize needs to be positive"); + static_assert(BatchSize > 1, "BatchSize needs to be higher than 1"); + +public: + background_allocator_obj_pool(bool lazy_start = false) + { + if (not lazy_start) { + allocate_batch_in_background(); + } + } + background_allocator_obj_pool(background_allocator_obj_pool&&) = delete; + background_allocator_obj_pool(const background_allocator_obj_pool&) = delete; + background_allocator_obj_pool& operator=(background_allocator_obj_pool&&) = delete; + background_allocator_obj_pool& operator=(const background_allocator_obj_pool&) = delete; + ~background_allocator_obj_pool() + { + std::lock_guard lock(mutex); + batches.clear(); + } + + /// alloc new object space. If no memory is pre-reserved in the pool, malloc is called to allocate new batch. + void* allocate_node(size_t sz) + { + assert(sz == sizeof(T)); + std::lock_guard lock(mutex); + uint8_t* block = obj_cache.try_pop(); + + if (block != nullptr) { + // allocation successful + if (obj_cache.size() < ThresholdSize) { + get_background_workers().push_task([this]() { + std::lock_guard lock(mutex); + allocate_batch_(); + }); + } + return block; + } + + // try allocation of new batch in same thread as caller. + allocate_batch_(); + return obj_cache.try_pop(); + } + + void deallocate_node(void* p) + { + std::lock_guard lock(mutex); + if (p != nullptr) { + obj_cache.push(static_cast(p)); + } + } + + void allocate_batch_in_background() + { + get_background_workers().push_task([this]() { + std::lock_guard lock(mutex); + allocate_batch_(); + }); + } + +private: + using obj_storage_t = typename std::aligned_storage::type; + using batch_obj_t = std::array; + + /// Unprotected allocation of new Batch of Objects + void allocate_batch_() + { + batches.emplace_back(new batch_obj_t()); + batch_obj_t& batch = *batches.back(); + for (obj_storage_t& obj_store : batch) { + obj_cache.push(reinterpret_cast(&obj_store)); + } + } + + // memory stack to cache allocate memory chunks + std::mutex mutex; + memblock_stack obj_cache; + std::vector > batches; +}; + } // namespace srslte #endif // SRSLTE_MEM_POOL_H diff --git a/srsenb/hdr/stack/rrc/rrc.h b/srsenb/hdr/stack/rrc/rrc.h index 15ad5ebcb..a8685eb8f 100644 --- a/srsenb/hdr/stack/rrc/rrc.h +++ b/srsenb/hdr/stack/rrc/rrc.h @@ -200,8 +200,6 @@ private: void rem_user_thread(uint16_t rnti); std::mutex paging_mutex; - - static srslte::big_obj_pool ue_pool; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/rrc/rrc_ue.h b/srsenb/hdr/stack/rrc/rrc_ue.h index 725abaeb5..9505fcd06 100644 --- a/srsenb/hdr/stack/rrc/rrc_ue.h +++ b/srsenb/hdr/stack/rrc/rrc_ue.h @@ -119,6 +119,8 @@ public: void operator delete(void* ptr)noexcept; void operator delete[](void* ptr) = delete; + static srslte::background_allocator_obj_pool* get_ue_pool(); + private: // args srslte::timer_handler::unique_timer activity_timer; diff --git a/srsenb/src/stack/rrc/rrc.cc b/srsenb/src/stack/rrc/rrc.cc index 9674dd95e..36840741c 100644 --- a/srsenb/src/stack/rrc/rrc.cc +++ b/srsenb/src/stack/rrc/rrc.cc @@ -35,7 +35,7 @@ rrc::rrc(srslte::task_sched_handle task_sched_) : logger(srslog::fetch_basic_logger("RRC")), task_sched(task_sched_), rx_pdu_queue(64) { pending_paging.clear(); - ue_pool.reserve(16); + rrc::ue::get_ue_pool()->allocate_batch_in_background(); } rrc::~rrc() {} @@ -1019,7 +1019,4 @@ void rrc::tti_clock() } } -// definition of rrc static member -srslte::big_obj_pool rrc::ue_pool; - } // namespace srsenb diff --git a/srsenb/src/stack/rrc/rrc_ue.cc b/srsenb/src/stack/rrc/rrc_ue.cc index 9233e8a6c..da1eaacb9 100644 --- a/srsenb/src/stack/rrc/rrc_ue.cc +++ b/srsenb/src/stack/rrc/rrc_ue.cc @@ -63,18 +63,21 @@ int rrc::ue::init() return SRSLTE_SUCCESS; } +srslte::background_allocator_obj_pool* rrc::ue::get_ue_pool() +{ + // Note: batch allocation is going to be explicitly called in enb class construction. The pool object, therefore, + // will only be initialized if we instantiate an eNB + static srslte::background_allocator_obj_pool ue_pool(true); + return &ue_pool; +} + void* rrc::ue::operator new(size_t sz) { - assert(sz == sizeof(ue)); - void* memchunk = rrc::ue_pool.allocate_node(sz); - if (ue_pool.capacity() <= 4) { - srslte::get_background_workers().push_task([]() { rrc::ue_pool.reserve(4); }); - } - return memchunk; + return rrc::ue::get_ue_pool()->allocate_node(sz); } void rrc::ue::operator delete(void* ptr)noexcept { - rrc::ue_pool.deallocate_node(ptr); + rrc::ue::get_ue_pool()->deallocate_node(ptr); } rrc_state_t rrc::ue::get_state()