integration of object pool in mac::ue for allocation of softbuffers. Definition of common object pool interface class

This commit is contained in:
Francisco 2021-04-01 16:28:29 +01:00 committed by Francisco Paisana
parent 460d7a8f4f
commit b004c2024d
7 changed files with 179 additions and 87 deletions

View File

@ -13,6 +13,7 @@
#ifndef SRSRAN_BACKGROUND_MEM_POOL_H #ifndef SRSRAN_BACKGROUND_MEM_POOL_H
#define SRSRAN_BACKGROUND_MEM_POOL_H #define SRSRAN_BACKGROUND_MEM_POOL_H
#include "common_pool.h"
#include "memblock_cache.h" #include "memblock_cache.h"
#include "pool_utils.h" #include "pool_utils.h"
#include "srsran/common/srsran_assert.h" #include "srsran/common/srsran_assert.h"
@ -41,10 +42,12 @@ class base_background_pool
using pool_type = base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>; using pool_type = base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
public: public:
explicit base_background_pool(size_t initial_size = BatchSize, explicit base_background_pool(size_t initial_size = BatchSize,
CtorFunc ctor_func_ = {}, CtorFunc&& ctor_func_ = {},
RecycleFunc recycle_func_ = {}) : RecycleFunc&& recycle_func_ = {}) :
ctor_func(ctor_func_), recycle_func(recycle_func_), state(std::make_shared<detached_pool_state>(this)) ctor_func(std::forward<CtorFunc>(ctor_func_)),
recycle_func(std::forward<RecycleFunc>(recycle_func_)),
state(std::make_shared<detached_pool_state>(this))
{ {
int nof_batches = ceilf(initial_size / (float)BatchSize); int nof_batches = ceilf(initial_size / (float)BatchSize);
while (nof_batches-- > 0) { while (nof_batches-- > 0) {
@ -90,7 +93,7 @@ public:
void deallocate_node(void* p) void deallocate_node(void* p)
{ {
std::lock_guard<std::mutex> lock(state->mutex); std::lock_guard<std::mutex> lock(state->mutex);
recycle_func(static_cast<void*>(p)); recycle_func(*static_cast<T*>(p));
obj_cache.push(static_cast<void*>(p)); obj_cache.push(static_cast<void*>(p));
} }
@ -148,9 +151,9 @@ using background_mem_pool =
template <typename T, template <typename T,
size_t BatchSize, size_t BatchSize,
size_t ThresholdSize, size_t ThresholdSize,
typename CtorFunc = detail::default_ctor_operator<T>, typename CtorFunc = detail::inplace_default_ctor_operator<T>,
typename RecycleFunc = detail::noop_operator> typename RecycleFunc = detail::noop_operator>
class background_obj_pool class background_obj_pool : public obj_pool_itf<T>
{ {
using pool_type = background_obj_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>; using pool_type = background_obj_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
using mem_pool_type = detail::base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>; using mem_pool_type = detail::base_background_pool<T, BatchSize, ThresholdSize, CtorFunc, RecycleFunc>;
@ -171,7 +174,7 @@ public:
pool(initial_size, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func)) pool(initial_size, std::forward<CtorFunc>(ctor_func), std::forward<RecycleFunc>(recycle_func))
{} {}
unique_pool_ptr<T> allocate_object() unique_pool_ptr<T> allocate_object() final
{ {
void* ptr = pool.allocate_node(sizeof(T)); void* ptr = pool.allocate_node(sizeof(T));
return std::unique_ptr<T, pool_deleter>(static_cast<T*>(ptr), pool_deleter(&pool)); return std::unique_ptr<T, pool_deleter>(static_cast<T*>(ptr), pool_deleter(&pool));

View File

@ -0,0 +1,43 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_COMMON_POOL_H
#define SRSRAN_COMMON_POOL_H
#include "srsran/adt/move_callback.h"
namespace srsran {
/// unique ptr with type-erased dtor, so that it can be used by any pool
template <typename T>
using unique_pool_ptr = std::unique_ptr<T, srsran::move_callback<void(void*)> >;
/// Common object pool interface
template <typename T>
class obj_pool_itf
{
public:
using object_type = T;
obj_pool_itf() = default;
obj_pool_itf(const obj_pool_itf&) = delete;
obj_pool_itf(obj_pool_itf&&) = delete;
obj_pool_itf& operator=(const obj_pool_itf&) = delete;
obj_pool_itf& operator=(obj_pool_itf&&) = delete;
virtual ~obj_pool_itf() = default;
virtual unique_pool_ptr<T> allocate_object() = 0;
};
} // namespace srsran
#endif // SRSRAN_COMMON_POOL_H

View File

@ -21,7 +21,7 @@ namespace srsran {
namespace detail { namespace detail {
template <typename T> template <typename T>
struct default_ctor_operator { struct inplace_default_ctor_operator {
void operator()(void* ptr) { new (ptr) T(); } void operator()(void* ptr) { new (ptr) T(); }
}; };
@ -35,10 +35,6 @@ struct noop_operator {
} // namespace detail } // namespace detail
/// unique ptr with type-erased dtor, so that it can be used by any pool
template <typename T>
using unique_pool_ptr = std::unique_ptr<T, srsran::move_callback<void(void*)> >;
} // namespace srsran } // namespace srsran
#endif // SRSRAN_POOL_UTILS_H #endif // SRSRAN_POOL_UTILS_H

View File

@ -16,6 +16,7 @@
#include "sched.h" #include "sched.h"
#include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h" #include "srsenb/hdr/stack/mac/schedulers/sched_time_rr.h"
#include "srsran/adt/circular_map.h" #include "srsran/adt/circular_map.h"
#include "srsran/adt/pool/background_mem_pool.h"
#include "srsran/common/mac_pcap.h" #include "srsran/common/mac_pcap.h"
#include "srsran/common/mac_pcap_net.h" #include "srsran/common/mac_pcap_net.h"
#include "srsran/common/task_scheduler.h" #include "srsran/common/task_scheduler.h"
@ -180,6 +181,9 @@ private:
// Number of rach preambles detected for a cc. // Number of rach preambles detected for a cc.
std::vector<uint32_t> detected_rachs; std::vector<uint32_t> detected_rachs;
// Softbuffer pool
std::unique_ptr<srsran::obj_pool_itf<ue_cc_softbuffers> > softbuffer_pool;
}; };
} // namespace srsenb } // namespace srsenb

View File

@ -16,6 +16,7 @@
#include "mac_metrics.h" #include "mac_metrics.h"
#include "srsran/adt/circular_array.h" #include "srsran/adt/circular_array.h"
#include "srsran/adt/circular_map.h" #include "srsran/adt/circular_map.h"
#include "srsran/adt/pool/common_pool.h"
#include "srsran/common/block_queue.h" #include "srsran/common/block_queue.h"
#include "srsran/common/mac_pcap.h" #include "srsran/common/mac_pcap.h"
#include "srsran/common/mac_pcap_net.h" #include "srsran/common/mac_pcap_net.h"
@ -35,6 +36,28 @@ class rrc_interface_mac;
class rlc_interface_mac; class rlc_interface_mac;
class phy_interface_stack_lte; class phy_interface_stack_lte;
struct ue_cc_softbuffers {
// List of Tx softbuffers for all HARQ processes of one carrier
using cc_softbuffer_tx_list_t = std::vector<srsran_softbuffer_tx_t>;
// List of Rx softbuffers for all HARQ processes of one carrier
using cc_softbuffer_rx_list_t = std::vector<srsran_softbuffer_rx_t>;
const uint32_t nof_tx_harq_proc;
const uint32_t nof_rx_harq_proc;
cc_softbuffer_tx_list_t softbuffer_tx_list;
cc_softbuffer_rx_list_t softbuffer_rx_list;
ue_cc_softbuffers(uint32_t nof_prb, uint32_t nof_tx_harq_proc_, uint32_t nof_rx_harq_proc_);
~ue_cc_softbuffers();
void clear();
srsran_softbuffer_tx_t& get_tx(uint32_t pid, uint32_t tb_idx)
{
return softbuffer_tx_list.at(pid * SRSRAN_MAX_TB + tb_idx);
}
srsran_softbuffer_rx_t& get_rx(uint32_t tti) { return softbuffer_rx_list.at(tti % nof_rx_harq_proc); }
};
class cc_used_buffers_map class cc_used_buffers_map
{ {
public: public:
@ -66,24 +89,19 @@ private:
class cc_buffer_handler class cc_buffer_handler
{ {
public: public:
// List of Tx softbuffers for all HARQ processes of one carrier
using cc_softbuffer_tx_list_t = std::vector<srsran_softbuffer_tx_t>;
// List of Rx softbuffers for all HARQ processes of one carrier
using cc_softbuffer_rx_list_t = std::vector<srsran_softbuffer_rx_t>;
explicit cc_buffer_handler(srsran::pdu_queue& shared_pdu_queue_); explicit cc_buffer_handler(srsran::pdu_queue& shared_pdu_queue_);
~cc_buffer_handler(); ~cc_buffer_handler();
void reset(); void reset();
void allocate_cc(uint32_t nof_prb, uint32_t nof_rx_harq_proc, uint32_t nof_tx_harq_proc); void allocate_cc(srsran::unique_pool_ptr<ue_cc_softbuffers> cc_softbuffers_);
void deallocate_cc(); void deallocate_cc();
bool empty() const { return softbuffer_tx_list.empty() and softbuffer_rx_list.empty(); } bool empty() const { return cc_softbuffers == nullptr; }
srsran_softbuffer_tx_t& get_tx_softbuffer(uint32_t pid, uint32_t tb_idx) srsran_softbuffer_tx_t& get_tx_softbuffer(uint32_t pid, uint32_t tb_idx)
{ {
return softbuffer_tx_list.at(pid * SRSRAN_MAX_TB + tb_idx); return cc_softbuffers->get_tx(pid, tb_idx);
} }
srsran_softbuffer_rx_t& get_rx_softbuffer(uint32_t tti) { return softbuffer_rx_list.at(tti % nof_rx_harq_proc); } srsran_softbuffer_rx_t& get_rx_softbuffer(uint32_t tti) { return cc_softbuffers->get_rx(tti); }
srsran::byte_buffer_t* get_tx_payload_buffer(size_t harq_pid, size_t tb) srsran::byte_buffer_t* get_tx_payload_buffer(size_t harq_pid, size_t tb)
{ {
return tx_payload_buffer[harq_pid][tb].get(); return tx_payload_buffer[harq_pid][tb].get();
@ -91,15 +109,11 @@ public:
cc_used_buffers_map& get_rx_used_buffers() { return rx_used_buffers; } cc_used_buffers_map& get_rx_used_buffers() { return rx_used_buffers; }
private: private:
// args // CC softbuffers
uint32_t nof_prb; srsran::unique_pool_ptr<ue_cc_softbuffers> cc_softbuffers;
uint32_t nof_rx_harq_proc;
uint32_t nof_tx_harq_proc;
// buffers // buffers
cc_softbuffer_tx_list_t softbuffer_tx_list; ///< List of softbuffer lists for Tx cc_used_buffers_map rx_used_buffers;
cc_softbuffer_rx_list_t softbuffer_rx_list; ///< List of softbuffer lists for Rx
cc_used_buffers_map rx_used_buffers;
// One buffer per TB per HARQ process and per carrier is needed for each UE. // One buffer per TB per HARQ process and per carrier is needed for each UE.
std::array<std::array<srsran::unique_byte_buffer_t, SRSRAN_MAX_TB>, SRSRAN_FDD_NOF_HARQ> tx_payload_buffer; std::array<std::array<srsran::unique_byte_buffer_t, SRSRAN_MAX_TB>, SRSRAN_FDD_NOF_HARQ> tx_payload_buffer;
@ -108,16 +122,17 @@ private:
class ue : public srsran::read_pdu_interface, public srsran::pdu_queue::process_callback, public mac_ta_ue_interface class ue : public srsran::read_pdu_interface, public srsran::pdu_queue::process_callback, public mac_ta_ue_interface
{ {
public: public:
ue(uint16_t rnti, ue(uint16_t rnti,
uint32_t nof_prb, uint32_t nof_prb,
sched_interface* sched, sched_interface* sched,
rrc_interface_mac* rrc_, rrc_interface_mac* rrc_,
rlc_interface_mac* rlc, rlc_interface_mac* rlc,
phy_interface_stack_lte* phy_, phy_interface_stack_lte* phy_,
srslog::basic_logger& logger, srslog::basic_logger& logger,
uint32_t nof_cells_, uint32_t nof_cells_,
uint32_t nof_rx_harq_proc = SRSRAN_FDD_NOF_HARQ, srsran::obj_pool_itf<ue_cc_softbuffers>* softbuffer_pool,
uint32_t nof_tx_harq_proc = SRSRAN_FDD_NOF_HARQ); uint32_t nof_rx_harq_proc = SRSRAN_FDD_NOF_HARQ,
uint32_t nof_tx_harq_proc = SRSRAN_FDD_NOF_HARQ);
virtual ~ue(); virtual ~ue();
void reset(); void reset();
@ -182,6 +197,8 @@ private:
int nof_rx_harq_proc = 0; int nof_rx_harq_proc = 0;
int nof_tx_harq_proc = 0; int nof_tx_harq_proc = 0;
srsran::obj_pool_itf<ue_cc_softbuffers>* softbuffer_pool = nullptr;
srsran::bounded_vector<cc_buffer_handler, SRSRAN_MAX_CARRIERS> cc_buffers; srsran::bounded_vector<cc_buffer_handler, SRSRAN_MAX_CARRIERS> cc_buffers;
std::mutex rx_buffers_mutex; std::mutex rx_buffers_mutex;

View File

@ -76,6 +76,21 @@ bool mac::init(const mac_args_t& args_,
reset(); reset();
// Initiate common pool of softbuffers
using softbuffer_pool_t = srsran::background_obj_pool<ue_cc_softbuffers,
16,
4,
srsran::move_callback<void(void*)>,
srsran::move_callback<void(ue_cc_softbuffers&)> >;
uint32_t nof_prb = args.nof_prb;
auto init_softbuffers = [nof_prb](void* ptr) {
new (ptr) ue_cc_softbuffers(nof_prb, SRSRAN_FDD_NOF_HARQ, SRSRAN_FDD_NOF_HARQ);
};
auto recycle_softbuffers = [](ue_cc_softbuffers& softbuffers) { softbuffers.clear(); };
softbuffer_pool.reset(new softbuffer_pool_t(std::min(args.max_nof_ues, 16U), // initial allocation size
init_softbuffers,
recycle_softbuffers));
// Pre-alloc UE objects for first attaching users // Pre-alloc UE objects for first attaching users
prealloc_ue(10); prealloc_ue(10);
@ -570,8 +585,8 @@ void mac::rach_detected(uint32_t tti, uint32_t enb_cc_idx, uint32_t preamble_idx
void mac::prealloc_ue(uint32_t nof_ue) void mac::prealloc_ue(uint32_t nof_ue)
{ {
for (uint32_t i = 0; i < nof_ue; i++) { for (uint32_t i = 0; i < nof_ue; i++) {
std::unique_ptr<ue> ptr = std::unique_ptr<ue>( std::unique_ptr<ue> ptr = std::unique_ptr<ue>(new ue(
new ue(allocate_rnti(), args.nof_prb, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size())); allocate_rnti(), args.nof_prb, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size(), softbuffer_pool.get()));
if (not ue_pool.try_push(std::move(ptr))) { if (not ue_pool.try_push(std::move(ptr))) {
logger.info("Cannot preallocate more UEs as pool is full"); logger.info("Cannot preallocate more UEs as pool is full");
return; return;
@ -1000,8 +1015,8 @@ void mac::write_mcch(const srsran::sib2_mbms_t* sib2_,
sib13 = *sib13_; sib13 = *sib13_;
memcpy(mcch_payload_buffer, mcch_payload, mcch_payload_length * sizeof(uint8_t)); memcpy(mcch_payload_buffer, mcch_payload, mcch_payload_length * sizeof(uint8_t));
current_mcch_length = mcch_payload_length; current_mcch_length = mcch_payload_length;
ue_db[SRSRAN_MRNTI] = ue_db[SRSRAN_MRNTI] = std::unique_ptr<ue>{
std::unique_ptr<ue>{new ue(SRSRAN_MRNTI, args.nof_prb, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size())}; new ue(SRSRAN_MRNTI, args.nof_prb, &scheduler, rrc_h, rlc_h, phy_h, logger, cells.size(), softbuffer_pool.get())};
rrc_h->add_user(SRSRAN_MRNTI, {}); rrc_h->add_user(SRSRAN_MRNTI, {});
} }

View File

@ -23,6 +23,45 @@
namespace srsenb { namespace srsenb {
ue_cc_softbuffers::ue_cc_softbuffers(uint32_t nof_prb, uint32_t nof_tx_harq_proc_, uint32_t nof_rx_harq_proc_) :
nof_tx_harq_proc(nof_tx_harq_proc_), nof_rx_harq_proc(nof_rx_harq_proc_)
{
// Create and init Rx buffers
softbuffer_rx_list.resize(nof_rx_harq_proc);
for (srsran_softbuffer_rx_t& buffer : softbuffer_rx_list) {
srsran_softbuffer_rx_init(&buffer, nof_prb);
}
// Create and init Tx buffers
softbuffer_tx_list.resize(nof_tx_harq_proc * SRSRAN_MAX_TB);
for (auto& buffer : softbuffer_tx_list) {
srsran_softbuffer_tx_init(&buffer, nof_prb);
}
}
ue_cc_softbuffers::~ue_cc_softbuffers()
{
for (auto& buffer : softbuffer_rx_list) {
srsran_softbuffer_rx_free(&buffer);
}
softbuffer_rx_list.clear();
for (auto& buffer : softbuffer_tx_list) {
srsran_softbuffer_tx_free(&buffer);
}
softbuffer_tx_list.clear();
}
void ue_cc_softbuffers::clear()
{
for (auto& buffer : softbuffer_rx_list) {
srsran_softbuffer_rx_reset(&buffer);
}
for (auto& buffer : softbuffer_tx_list) {
srsran_softbuffer_tx_reset(&buffer);
}
}
cc_used_buffers_map::cc_used_buffers_map(srsran::pdu_queue& shared_pdu_queue_) : cc_used_buffers_map::cc_used_buffers_map(srsran::pdu_queue& shared_pdu_queue_) :
shared_pdu_queue(&shared_pdu_queue_), logger(&srslog::fetch_basic_logger("MAC")) shared_pdu_queue(&shared_pdu_queue_), logger(&srslog::fetch_basic_logger("MAC"))
{} {}
@ -136,59 +175,33 @@ cc_buffer_handler::~cc_buffer_handler()
* @param num_cc Number of carriers to add buffers for (default 1) * @param num_cc Number of carriers to add buffers for (default 1)
* @return number of carriers * @return number of carriers
*/ */
void cc_buffer_handler::allocate_cc(uint32_t nof_prb_, uint32_t nof_rx_harq_proc_, uint32_t nof_tx_harq_proc_) void cc_buffer_handler::allocate_cc(srsran::unique_pool_ptr<ue_cc_softbuffers> cc_softbuffers_)
{ {
srsran_assert(empty(), "Cannot allocate softbuffers in CC that is already initialized"); srsran_assert(empty(), "Cannot allocate softbuffers in CC that is already initialized");
nof_prb = nof_prb_; cc_softbuffers = std::move(cc_softbuffers_);
nof_rx_harq_proc = nof_rx_harq_proc_;
nof_tx_harq_proc = nof_tx_harq_proc_;
// Create and init Rx buffers
softbuffer_rx_list.resize(nof_rx_harq_proc);
for (srsran_softbuffer_rx_t& buffer : softbuffer_rx_list) {
srsran_softbuffer_rx_init(&buffer, nof_prb);
}
// Create and init Tx buffers
softbuffer_tx_list.resize(nof_tx_harq_proc * SRSRAN_MAX_TB);
for (auto& buffer : softbuffer_tx_list) {
srsran_softbuffer_tx_init(&buffer, nof_prb);
}
} }
void cc_buffer_handler::deallocate_cc() void cc_buffer_handler::deallocate_cc()
{ {
for (auto& buffer : softbuffer_rx_list) { cc_softbuffers.reset();
srsran_softbuffer_rx_free(&buffer);
}
softbuffer_rx_list.clear();
for (auto& buffer : softbuffer_tx_list) {
srsran_softbuffer_tx_free(&buffer);
}
softbuffer_tx_list.clear();
} }
void cc_buffer_handler::reset() void cc_buffer_handler::reset()
{ {
for (auto& buffer : softbuffer_rx_list) { cc_softbuffers->clear();
srsran_softbuffer_rx_reset(&buffer);
}
for (auto& buffer : softbuffer_tx_list) {
srsran_softbuffer_tx_reset(&buffer);
}
} }
ue::ue(uint16_t rnti_, ue::ue(uint16_t rnti_,
uint32_t nof_prb_, uint32_t nof_prb_,
sched_interface* sched_, sched_interface* sched_,
rrc_interface_mac* rrc_, rrc_interface_mac* rrc_,
rlc_interface_mac* rlc_, rlc_interface_mac* rlc_,
phy_interface_stack_lte* phy_, phy_interface_stack_lte* phy_,
srslog::basic_logger& logger_, srslog::basic_logger& logger_,
uint32_t nof_cells_, uint32_t nof_cells_,
uint32_t nof_rx_harq_proc_, srsran::obj_pool_itf<ue_cc_softbuffers>* softbuffer_pool_,
uint32_t nof_tx_harq_proc_) : uint32_t nof_rx_harq_proc_,
uint32_t nof_tx_harq_proc_) :
rnti(rnti_), rnti(rnti_),
nof_prb(nof_prb_), nof_prb(nof_prb_),
sched(sched_), sched(sched_),
@ -202,7 +215,8 @@ ue::ue(uint16_t rnti_,
pdus(logger_), pdus(logger_),
nof_rx_harq_proc(nof_rx_harq_proc_), nof_rx_harq_proc(nof_rx_harq_proc_),
nof_tx_harq_proc(nof_tx_harq_proc_), nof_tx_harq_proc(nof_tx_harq_proc_),
ta_fsm(this) ta_fsm(this),
softbuffer_pool(softbuffer_pool_)
{ {
for (size_t i = 0; i < nof_cells_; ++i) { for (size_t i = 0; i < nof_cells_; ++i) {
cc_buffers.emplace_back(pdus); cc_buffers.emplace_back(pdus);
@ -210,7 +224,7 @@ ue::ue(uint16_t rnti_,
pdus.init(this); pdus.init(this);
// Allocate buffer for PCell // Allocate buffer for PCell
cc_buffers[0].allocate_cc(nof_prb, nof_rx_harq_proc, nof_tx_harq_proc); cc_buffers[0].allocate_cc(softbuffer_pool->allocate_object());
} }
ue::~ue() ue::~ue()
@ -540,7 +554,7 @@ void ue::allocate_ce(srsran::sch_pdu* pdu, uint32_t lcid)
// Allocate and initialize Rx/Tx softbuffers for new carriers (exclude PCell) // Allocate and initialize Rx/Tx softbuffers for new carriers (exclude PCell)
for (size_t i = 0; i < std::min(active_scell_list.size(), cc_buffers.size()); ++i) { for (size_t i = 0; i < std::min(active_scell_list.size(), cc_buffers.size()); ++i) {
if (active_scell_list[i] and cc_buffers[i].empty()) { if (active_scell_list[i] and cc_buffers[i].empty()) {
cc_buffers[i].allocate_cc(nof_prb, nof_rx_harq_proc, nof_tx_harq_proc); cc_buffers[i].allocate_cc(softbuffer_pool->allocate_object());
} }
} }
} else { } else {