simplified mutexing of the scheduler. One single mutex for everything, and removed rwlock

This commit is contained in:
Francisco Paisana 2020-03-02 16:38:53 +00:00
parent 7210c35c6c
commit 3cc94c3694
6 changed files with 30 additions and 95 deletions

View File

@ -121,7 +121,7 @@ public:
************************************************************/
sched();
~sched();
~sched() override;
void init(rrc_interface_mac* rrc);
int cell_cfg(const std::vector<cell_cfg_t>& cell_cfg) override;
@ -187,8 +187,6 @@ protected:
sched_args_t sched_cfg = {};
std::vector<sched_cell_params_t> sched_cell_params;
pthread_rwlock_t rwlock;
// Helper methods
template <typename Func>
int ue_db_access(uint16_t rnti, Func);
@ -200,6 +198,7 @@ protected:
std::array<uint32_t, 10> pdsch_re = {};
uint32_t current_tti = 0;
std::mutex sched_mutex;
bool configured = false;
};

View File

@ -70,9 +70,6 @@ private:
std::unique_ptr<bc_sched> bc_sched_ptr;
std::unique_ptr<ra_sched> ra_sched_ptr;
// protects access to bc/ra schedulers and harqs
std::mutex carrier_mutex;
};
//! Broadcast (SIB + paging) scheduler

View File

@ -29,7 +29,6 @@
#include "scheduler_harq.h"
#include <deque>
#include <mutex>
namespace srsenb {
@ -99,8 +98,6 @@ private:
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
* higher layers and mac threads.
*
* 1 mutex is created for every user and only access to same user variables are mutexed
*/
class sched_ue
{
@ -264,8 +261,6 @@ private:
const std::vector<sched_cell_params_t>* cell_params_list = nullptr;
const sched_cell_params_t* main_cc_params = nullptr;
std::mutex mutex;
/* Buffer states */
bool sr = false;
int buf_mac = 0;

View File

@ -127,20 +127,14 @@ bool sched_cell_params_t::set_cfg(uint32_t enb_cc_id
* Initialization and sched configuration functions
*
*******************************************************/
sched::sched()
{
pthread_rwlock_init(&rwlock, nullptr);
}
sched::~sched()
{
pthread_rwlock_destroy(&rwlock);
}
sched::sched() : log_h(srslte::logmap::get("MAC")) {}
sched::~sched() {}
void sched::init(rrc_interface_mac* rrc_)
{
log_h = srslte::logmap::get("MAC");
rrc = rrc_;
rrc = rrc_;
// Initialize first carrier scheduler
carrier_schedulers.emplace_back(new carrier_sched{rrc, &ue_db, 0});
@ -150,18 +144,18 @@ void sched::init(rrc_interface_mac* rrc_)
int sched::reset()
{
std::lock_guard<std::mutex> lock(sched_mutex);
configured = false;
for (std::unique_ptr<carrier_sched>& c : carrier_schedulers) {
c->reset();
}
pthread_rwlock_wrlock(&rwlock);
ue_db.clear();
pthread_rwlock_unlock(&rwlock);
return 0;
}
void sched::set_sched_cfg(sched_interface::sched_args_t* sched_cfg_)
{
std::lock_guard<std::mutex> lock(sched_mutex);
if (sched_cfg_ != nullptr) {
sched_cfg = *sched_cfg_;
}
@ -169,6 +163,7 @@ void sched::set_sched_cfg(sched_interface::sched_args_t* sched_cfg_)
int sched::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
{
std::lock_guard<std::mutex> lock(sched_mutex);
// Setup derived config params
sched_cell_params.resize(cell_cfg.size());
for (uint32_t cc_idx = 0; cc_idx < cell_cfg.size(); ++cc_idx) {
@ -202,34 +197,29 @@ int sched::cell_cfg(const std::vector<sched_interface::cell_cfg_t>& cell_cfg)
int sched::ue_cfg(uint16_t rnti, const sched_interface::ue_cfg_t& ue_cfg)
{
std::lock_guard<std::mutex> lock(sched_mutex);
// Add or config user
pthread_rwlock_rdlock(&rwlock);
auto it = ue_db.find(rnti);
if (it == ue_db.end()) {
pthread_rwlock_unlock(&rwlock);
// create new user
pthread_rwlock_wrlock(&rwlock);
ue_db[rnti].init(rnti, sched_cell_params);
it = ue_db.find(rnti);
pthread_rwlock_rdlock(&rwlock);
}
it->second.set_cfg(ue_cfg);
pthread_rwlock_unlock(&rwlock);
return 0;
}
int sched::ue_rem(uint16_t rnti)
{
int ret = 0;
pthread_rwlock_wrlock(&rwlock);
std::lock_guard<std::mutex> lock(sched_mutex);
int ret = 0;
if (ue_db.count(rnti) > 0) {
ue_db.erase(rnti);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
@ -240,13 +230,12 @@ bool sched::ue_exists(uint16_t rnti)
void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd)
{
pthread_rwlock_rdlock(&rwlock);
std::lock_guard<std::mutex> lock(sched_mutex);
if (ue_db.count(rnti) > 0) {
ue_db[rnti].set_needs_ta_cmd(nof_ta_cmd);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void sched::phy_config_enabled(uint16_t rnti, bool enabled)
@ -323,6 +312,7 @@ int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_
int sched::dl_rach_info(uint32_t enb_cc_idx, dl_sched_rar_info_t rar_info)
{
std::lock_guard<std::mutex> lock(sched_mutex);
return carrier_schedulers[enb_cc_idx]->dl_rach_info(rar_info);
}
@ -354,6 +344,7 @@ int sched::ul_sr_info(uint32_t tti, uint16_t rnti)
void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs)
{
std::lock_guard<std::mutex> lock(sched_mutex);
carrier_schedulers[0]->set_dl_tti_mask(tti_mask, nof_sfs);
}
@ -387,14 +378,13 @@ int sched::dl_sched(uint32_t tti, uint32_t cc_idx, sched_interface::dl_sched_res
return 0;
}
uint32_t tti_rx = sched_utils::tti_subtract(tti, TX_DELAY);
current_tti = sched_utils::max_tti(current_tti, tti_rx);
std::lock_guard<std::mutex> lock(sched_mutex);
uint32_t tti_rx = sched_utils::tti_subtract(tti, TX_DELAY);
current_tti = sched_utils::max_tti(current_tti, tti_rx);
if (cc_idx < carrier_schedulers.size()) {
// Compute scheduling Result for tti_rx
pthread_rwlock_rdlock(&rwlock);
sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
pthread_rwlock_unlock(&rwlock);
// copy result
sched_result = tti_sched->dl_sched_result;
@ -410,13 +400,12 @@ int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_s
return 0;
}
std::lock_guard<std::mutex> lock(sched_mutex);
// Compute scheduling Result for tti_rx
uint32_t tti_rx = sched_utils::tti_subtract(tti, 2 * FDD_HARQ_DELAY_MS);
if (cc_idx < carrier_schedulers.size()) {
pthread_rwlock_rdlock(&rwlock);
sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx);
pthread_rwlock_unlock(&rwlock);
// copy result
sched_result = tti_sched->ul_sched_result;
@ -459,16 +448,15 @@ void sched::generate_cce_location(srslte_regs_t* regs_,
template <typename Func>
int sched::ue_db_access(uint16_t rnti, Func f)
{
int ret = 0;
pthread_rwlock_rdlock(&rwlock);
auto it = ue_db.find(rnti);
int ret = 0;
std::lock_guard<std::mutex> lock(sched_mutex);
auto it = ue_db.find(rnti);
if (it != ue_db.end()) {
f(it->second);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}

View File

@ -297,7 +297,6 @@ sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_,
void sched::carrier_sched::reset()
{
std::lock_guard<std::mutex> lock(carrier_mutex);
ra_sched_ptr.reset();
bc_sched_ptr.reset();
}
@ -307,8 +306,6 @@ void sched::carrier_sched::carrier_cfg(const sched_cell_params_t& cell_params_)
// carrier_sched is now fully set
cc_cfg = &cell_params_;
std::lock_guard<std::mutex> lock(carrier_mutex);
// init Broadcast/RA schedulers
bc_sched_ptr.reset(new bc_sched{*cc_cfg, rrc});
ra_sched_ptr.reset(new ra_sched{*cc_cfg, *ue_db});
@ -349,9 +346,6 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx)
bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0;
tti_sched->new_tti(tti_rx, start_cfi);
// Protects access to pending_rar[], pending_msg3[], ra_sched, bc_sched, rlc buffers
std::lock_guard<std::mutex> lock(carrier_mutex);
/* Schedule PHICH */
generate_phich(tti_sched);
@ -474,7 +468,6 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched)
int sched::carrier_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
{
std::lock_guard<std::mutex> lock(carrier_mutex);
return ra_sched_ptr->dl_rach_info(rar_info);
}

View File

@ -74,7 +74,6 @@ sched_ue::sched_ue() : log_h(srslte::logmap::get("MAC "))
void sched_ue::init(uint16_t rnti_, const std::vector<sched_cell_params_t>& cell_list_params_)
{
{
std::lock_guard<std::mutex> lock(mutex);
rnti = rnti_;
cell_params_list = &cell_list_params_;
}
@ -84,8 +83,6 @@ void sched_ue::init(uint16_t rnti_, const std::vector<sched_cell_params_t>& cell
void sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_)
{
{
std::unique_lock<std::mutex> lock(mutex);
// for the first configured cc, set it as primary cc
if (cfg.supported_cc_list.empty()) {
uint32_t primary_cc_idx = 0;
@ -137,7 +134,6 @@ void sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_)
void sched_ue::reset()
{
{
std::lock_guard<std::mutex> lock(mutex);
cfg = {};
sr = false;
next_tpc_pusch = 1;
@ -164,14 +160,12 @@ void sched_ue::reset()
void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_)
{
std::lock_guard<std::mutex> lock(mutex);
cfg.ue_bearers[lc_id] = *cfg_;
set_bearer_cfg_unlocked(lc_id, *cfg_);
}
void sched_ue::rem_bearer(uint32_t lc_id)
{
std::lock_guard<std::mutex> lock(mutex);
cfg.ue_bearers[lc_id] = sched_interface::ue_bearer_cfg_t{};
set_bearer_cfg_unlocked(lc_id, sched_interface::ue_bearer_cfg_t{});
}
@ -186,7 +180,6 @@ void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
{
std::lock_guard<std::mutex> lock(mutex);
if (lc_id < sched_interface::MAX_LC) {
if (set_value) {
lch[lc_id].bsr = bsr;
@ -204,7 +197,6 @@ void sched_ue::ul_phr(int phr)
void sched_ue::dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
std::lock_guard<std::mutex> lock(mutex);
if (lc_id < sched_interface::MAX_LC) {
lch[lc_id].buf_retx = retx_queue;
lch[lc_id].buf_tx = tx_queue;
@ -214,7 +206,6 @@ void sched_ue::dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_q
void sched_ue::mac_buffer_state(uint32_t ce_code)
{
std::lock_guard<std::mutex> lock(mutex);
buf_mac++;
}
@ -249,9 +240,8 @@ bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
int sched_ue::set_ack_info(uint32_t tti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack)
{
std::lock_guard<std::mutex> lock(mutex);
int ret = -1;
auto p = get_cell_index(enb_cc_idx);
int ret = -1;
auto p = get_cell_index(enb_cc_idx);
if (p.first) {
ret = carriers[p.second].set_ack_info(tti, tb_idx, ack);
} else {
@ -262,8 +252,6 @@ int sched_ue::set_ack_info(uint32_t tti, uint32_t enb_cc_idx, uint32_t tb_idx, b
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
{
std::lock_guard<std::mutex> lock(mutex);
// Remove PDCP header??
if (len > 4) {
len -= 4;
@ -282,8 +270,7 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
void sched_ue::set_ul_crc(uint32_t tti, uint32_t enb_cc_idx, bool crc_res)
{
std::lock_guard<std::mutex> lock(mutex);
auto p = get_cell_index(enb_cc_idx);
auto p = get_cell_index(enb_cc_idx);
if (p.first) {
get_ul_harq(tti, p.second)->set_ack(0, crc_res);
} else {
@ -293,8 +280,7 @@ void sched_ue::set_ul_crc(uint32_t tti, uint32_t enb_cc_idx, bool crc_res)
void sched_ue::set_dl_ri(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri)
{
std::lock_guard<std::mutex> lock(mutex);
auto p = get_cell_index(enb_cc_idx);
auto p = get_cell_index(enb_cc_idx);
if (p.first) {
carriers[p.second].dl_ri = ri;
carriers[p.second].dl_ri_tti = tti;
@ -305,8 +291,7 @@ void sched_ue::set_dl_ri(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri)
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t enb_cc_idx, uint32_t pmi)
{
std::lock_guard<std::mutex> lock(mutex);
auto p = get_cell_index(enb_cc_idx);
auto p = get_cell_index(enb_cc_idx);
if (p.first) {
carriers[p.second].dl_pmi = pmi;
carriers[p.second].dl_pmi_tti = tti;
@ -317,8 +302,7 @@ void sched_ue::set_dl_pmi(uint32_t tti, uint32_t enb_cc_idx, uint32_t pmi)
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi)
{
std::lock_guard<std::mutex> lock(mutex);
auto p = get_cell_index(enb_cc_idx);
auto p = get_cell_index(enb_cc_idx);
if (p.second != std::numeric_limits<uint32_t>::max()) {
carriers[p.second].dl_cqi = cqi;
carriers[p.second].dl_cqi_tti = tti;
@ -330,8 +314,7 @@ void sched_ue::set_dl_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi)
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code)
{
std::lock_guard<std::mutex> lock(mutex);
auto p = get_cell_index(enb_cc_idx);
auto p = get_cell_index(enb_cc_idx);
if (p.first) {
carriers[p.second].ul_cqi = cqi;
carriers[p.second].ul_cqi_tti = tti;
@ -342,7 +325,6 @@ void sched_ue::set_ul_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi, uint3
void sched_ue::tpc_inc()
{
std::lock_guard<std::mutex> lock(mutex);
if (power_headroom > 0) {
next_tpc_pusch = 3;
next_tpc_pucch = 3;
@ -352,7 +334,6 @@ void sched_ue::tpc_inc()
void sched_ue::tpc_dec()
{
std::lock_guard<std::mutex> lock(mutex);
next_tpc_pusch = 0;
next_tpc_pucch = 0;
log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti);
@ -400,8 +381,6 @@ int sched_ue::generate_format1(dl_harq_proc* h,
uint32_t cfi,
const rbgmask_t& user_mask)
{
std::lock_guard<std::mutex> lock(mutex);
srslte_dci_dl_t* dci = &data->dci;
int mcs = 0;
@ -497,8 +476,7 @@ int sched_ue::generate_format2a(dl_harq_proc* h,
uint32_t cfi,
const rbgmask_t& user_mask)
{
std::lock_guard<std::mutex> lock(mutex);
int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
return ret;
}
@ -612,9 +590,6 @@ int sched_ue::generate_format2(dl_harq_proc* h,
uint32_t cfi,
const rbgmask_t& user_mask)
{
std::lock_guard<std::mutex> lock(mutex);
/* Call Format 2a (common) */
int ret = generate_format2a_unlocked(h, data, tti, cc_idx, cfi, user_mask);
@ -637,8 +612,6 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
srslte_dci_location_t dci_pos,
int explicit_mcs)
{
std::lock_guard<std::mutex> lock(mutex);
ul_harq_proc* h = get_ul_harq(tti, cc_idx);
srslte_dci_ul_t* dci = &data->dci;
@ -734,7 +707,6 @@ bool sched_ue::is_first_dl_tx()
bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_be_sent)
{
std::lock_guard<std::mutex> lock(mutex);
return needs_cqi_unlocked(tti, cc_idx, will_be_sent);
}
@ -766,7 +738,6 @@ bool sched_ue::is_conres_ce_pending() const
uint32_t sched_ue::get_pending_dl_new_data()
{
std::lock_guard<std::mutex> lock(mutex);
return get_pending_dl_new_data_unlocked();
}
@ -775,7 +746,6 @@ uint32_t sched_ue::get_pending_dl_new_data()
/// \return number of bytes to be allocated
uint32_t sched_ue::get_pending_dl_new_data_total()
{
std::lock_guard<std::mutex> lock(mutex);
return get_pending_dl_new_data_total_unlocked();
}
@ -808,13 +778,11 @@ uint32_t sched_ue::get_pending_dl_new_data_unlocked()
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{
std::lock_guard<std::mutex> lock(mutex);
return get_pending_ul_new_data_unlocked(tti);
}
uint32_t sched_ue::get_pending_ul_old_data(uint32_t cc_idx)
{
std::lock_guard<std::mutex> lock(mutex);
return get_pending_ul_old_data_unlocked(cc_idx);
}
@ -865,8 +833,6 @@ uint32_t sched_ue::get_pending_ul_old_data_unlocked(uint32_t cc_idx)
uint32_t sched_ue::get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint32_t nof_ctrl_symbols)
{
std::lock_guard<std::mutex> lock(mutex);
int mcs = 0;
uint32_t nof_re = 0;
int tbs = 0;
@ -893,7 +859,6 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t cc_idx, uint32_t req_bytes, uint
uint32_t sched_ue::get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes)
{
std::lock_guard<std::mutex> lock(mutex);
return carriers[cc_idx].get_required_prb_ul(req_bytes);
}
@ -926,7 +891,6 @@ void sched_ue::reset_pending_pids(uint32_t tti_rx, uint32_t cc_idx)
/* Gets HARQ process with oldest pending retx */
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx)
{
std::lock_guard<std::mutex> lock(mutex);
if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].is_active()) {
return carriers[ue_cc_idx].get_pending_dl_harq(tti_tx_dl);
}
@ -935,7 +899,6 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_i
dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx)
{
std::lock_guard<std::mutex> lock(mutex);
if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].is_active()) {
return carriers[ue_cc_idx].get_empty_dl_harq(tti_tx_dl);
}