From f1c67f5b2b51cb9c8c0abd5550c6ee1b8c2456a5 Mon Sep 17 00:00:00 2001 From: Francisco Date: Thu, 8 Apr 2021 14:37:16 +0100 Subject: [PATCH] pool - avoid concurrent batch allocations in background threads --- lib/include/srsran/adt/pool/batch_mem_pool.h | 32 +++++++++++++++----- lib/include/srsran/adt/pool/obj_pool.h | 20 +++++++++--- 2 files changed, 39 insertions(+), 13 deletions(-) diff --git a/lib/include/srsran/adt/pool/batch_mem_pool.h b/lib/include/srsran/adt/pool/batch_mem_pool.h index a74b8ef41..1829fca26 100644 --- a/lib/include/srsran/adt/pool/batch_mem_pool.h +++ b/lib/include/srsran/adt/pool/batch_mem_pool.h @@ -121,7 +121,7 @@ public: void* node = grow_pool.allocate_node(); if (grow_pool.size() < batch_threshold) { - allocate_batch_in_background(); + allocate_batch_in_background_unlocked(); } return node; } @@ -139,23 +139,39 @@ public: } size_t get_node_max_size() const { return grow_pool.get_node_max_size(); } - size_t cache_size() const { return grow_pool.cache_size(); } + size_t cache_size() const + { + std::lock_guard lock(state->mutex); + return grow_pool.cache_size(); + } private: - void allocate_batch_in_background() + void allocate_batch_in_background_unlocked() { - std::shared_ptr state_copy = state; - get_background_workers().push_task([state_copy]() { - std::lock_guard lock(state_copy->mutex); - if (state_copy->pool != nullptr) { - state_copy->pool->grow_pool.allocate_batch(); + if (state->dispatched) { + // new batch allocation already ongoing + return; + } + state->dispatched = true; + std::shared_ptr state_sptr = state; + get_background_workers().push_task([state_sptr]() { + std::lock_guard lock(state_sptr->mutex); + // check if pool has not been destroyed + if (state_sptr->pool != nullptr) { + auto* pool = state_sptr->pool; + do { + pool->grow_pool.allocate_batch(); + } while (pool->grow_pool.cache_size() < pool->batch_threshold); } + state_sptr->dispatched = false; }); } + // State is stored in a shared_ptr that may outlive the pool. struct detached_pool_state { std::mutex mutex; background_mem_pool* pool; + bool dispatched = false; explicit detached_pool_state(background_mem_pool* pool_) : pool(pool_) {} }; std::shared_ptr state; diff --git a/lib/include/srsran/adt/pool/obj_pool.h b/lib/include/srsran/adt/pool/obj_pool.h index 4dc9de821..769479365 100644 --- a/lib/include/srsran/adt/pool/obj_pool.h +++ b/lib/include/srsran/adt/pool/obj_pool.h @@ -182,12 +182,21 @@ private: void allocate_batch_in_background_() { - std::shared_ptr state_copy = state; - get_background_workers().push_task([state_copy]() { - std::lock_guard lock(state_copy->mutex); - if (state_copy->pool != nullptr) { - state_copy->pool->grow_pool.allocate_batch(); + if (state->dispatched) { + // new batch allocation already ongoing + return; + } + state->dispatched = true; + std::shared_ptr state_sptr = state; + get_background_workers().push_task([state_sptr]() { + std::lock_guard lock(state_sptr->mutex); + if (state_sptr->pool != nullptr) { + auto* pool = state_sptr->pool; + do { + pool->grow_pool.allocate_batch(); + } while (pool->grow_pool.cache_size() < pool->thres); } + state_sptr->dispatched = false; }); } @@ -197,6 +206,7 @@ private: struct detached_pool_state { std::mutex mutex; background_obj_pool* pool; + bool dispatched = false; explicit detached_pool_state(background_obj_pool* pool_) : pool(pool_) {} }; std::shared_ptr state;