From ad0b3ce6e2a9c93212c5feb0c8618e228d42e7f2 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Tue, 15 Oct 2019 14:07:53 +0100 Subject: [PATCH 1/8] modernize the random sched test --- srsenb/test/mac/scheduler_test_rand.cc | 174 +++++++++++++------------ 1 file changed, 92 insertions(+), 82 deletions(-) diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index cf744de9d..438b6e2dd 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -22,7 +22,6 @@ #include "srsenb/hdr/stack/mac/scheduler.h" #include "srsenb/hdr/stack/mac/scheduler_ue.h" #include -#include #include #include #include @@ -37,14 +36,46 @@ #include "srslte/phy/utils/debug.h" #include "srslte/radio/radio.h" +/******************************************************** + * Random Tester for Scheduler. + * Current Checks: + * - Check if users are only added during a PRACH TTI + * - Allocation (DCI+RBs) of users that no longer exist + * - RAR is scheduled within the RAR window + * - Msg3 checks: + * - scheduled/received at expected TTI + * - with the correct RNTI and without PDCCH alloc + * - unexpected msg3 arrival + * - Users without data to Tx cannot be allocated in UL + * - Retxs always take precedence + * - DCI: + * - collisions detected + * - mismatch between the union of all dcis and + * scheduler class aggregate dci value + * - Invalid BC SIB index or TBS + * - Harqs: + * - invalid pids scheduled + * - empty harqs scheduled + * - invalid harq TTI + * - consistent NCCE loc + * - invalid retx number + * - DL adaptive retx/new tx <=> PDCCH alloc + * ... + *******************************************************/ + +/*************************** + * Setup Random generators + **************************/ // uint32_t const seed = std::random_device()(); -uint32_t const seed = 2452071795; // time(NULL); +uint32_t const seed = 2452071795; // time(nullptr); std::default_random_engine rand_gen(seed); std::uniform_real_distribution unif_dist(0, 1.0); -float randf() + +float randf() { return unif_dist(rand_gen); } + uint32_t err_counter = 0; uint32_t warn_counter = 0; struct ue_stats_t { @@ -73,42 +104,26 @@ class log_tester : public srslte::log_filter { public: explicit log_tester(std::string layer) : srslte::log_filter(layer) {} - void error(const char* message, ...) __attribute__((format(printf, 2, 3))); + ~log_tester() final + { + info("[TESTER] UE stats:\n"); + for (auto& e : ue_stats) { + info("0x%x: {DL RBs: %lu, UL RBs: %lu}\n", e.first, e.second.nof_dl_rbs, e.second.nof_ul_rbs); + } + info("[TESTER] This was the seed: %u\n", seed); + } }; -void log_tester::error(const char* message, ...) -{ - if (level >= srslte::LOG_LEVEL_ERROR) { - char* args_msg = NULL; - va_list args; - va_start(args, message); - if (vasprintf(&args_msg, message, args) > 0) - all_log(srslte::LOG_LEVEL_ERROR, tti, args_msg); - va_end(args); - free(args_msg); - } -} log_tester log_out("ALL"); -void log_on_exit() -{ - log_out.info("[TESTER] UE stats:\n"); - for (auto& e : ue_stats) { - log_out.info("0x%x: {DL RBs: %lu, UL RBs: %lu}\n", e.first, e.second.nof_dl_rbs, e.second.nof_ul_rbs); - } - log_out.info("[TESTER] This was the seed: %u\n", seed); -} - #define Warning(fmt, ...) \ log_out.warning(fmt, ##__VA_ARGS__); \ warn_counter++; #define TestError(fmt, ...) \ log_out.error(fmt, ##__VA_ARGS__); \ - log_on_exit(); \ exit(-1); #define CondError(cond, fmt, ...) \ if (cond) { \ log_out.error(fmt, ##__VA_ARGS__); \ - log_on_exit(); \ exit(-1); \ } @@ -119,17 +134,15 @@ void log_on_exit() struct sched_sim_args { struct tti_event_t { struct user_event_t { - uint32_t sr_data; - uint32_t dl_data; - uint32_t dl_nof_retxs; - user_event_t() : sr_data(0), dl_data(0), dl_nof_retxs(0) {} + uint32_t sr_data = 0; + uint32_t dl_data = 0; + uint32_t dl_nof_retxs = 0; }; std::map users; - bool new_user; - bool rem_user; + bool new_user = false; + bool rem_user = false; uint32_t new_rnti; uint32_t rem_rnti; - tti_event_t() : new_user(false), rem_user(false) {} }; std::vector tti_events; @@ -150,8 +163,8 @@ struct sched_tester : public srsenb::sched { bool has_ul_retx = false; bool has_ul_newtx = false; ///< *no* retx, but has tx bool ul_retx_got_delayed = false; - srsenb::sched_interface::ul_sched_data_t* ul_sched = NULL; // fast lookup - srsenb::sched_interface::dl_sched_data_t* dl_sched = NULL; // fast lookup + srsenb::sched_interface::ul_sched_data_t* ul_sched = nullptr; // fast lookup + srsenb::sched_interface::dl_sched_data_t* dl_sched = nullptr; // fast lookup srsenb::dl_harq_proc dl_harqs[2 * FDD_HARQ_DELAY_MS]; srsenb::ul_harq_proc ul_harq; }; @@ -172,20 +185,18 @@ struct sched_tester : public srsenb::sched { typedef std::map::iterator ue_it_t; }; struct ue_info { - int prach_tti, rar_tti, msg3_tti; + int prach_tti = -1, rar_tti = -1, msg3_tti = -1; srsenb::sched_interface::ue_bearer_cfg_t bearer_cfg; srsenb::sched_interface::ue_cfg_t user_cfg; - uint32_t dl_data; - uint32_t ul_data; - ue_info() : prach_tti(-1), rar_tti(-1), msg3_tti(-1), dl_data(0), ul_data(0) {} + uint32_t dl_data = 0; + uint32_t ul_data = 0; }; struct ack_info_t { - uint16_t rnti; - uint32_t tti; - bool dl_ack; - uint32_t retx_delay; + uint16_t rnti; + uint32_t tti; + bool dl_ack = false; + uint32_t retx_delay = 0; srsenb::dl_harq_proc dl_harq; - ack_info_t() : dl_ack(false), retx_delay(0) {} }; struct ul_ack_info_t { uint16_t rnti; @@ -234,8 +245,8 @@ void sched_tester::add_user(uint16_t rnti, info.user_cfg = ue_cfg_; tester_ues.insert(std::make_pair(rnti, info)); - if (ue_cfg(rnti, &ue_cfg_)) { - TestError("[TESTER] Registering new user rnti=0x%x to SCHED\n", rnti); + if (ue_cfg(rnti, &ue_cfg_) != SRSLTE_SUCCESS) { + TestError("[TESTER] Registering new user rnti=0x%x to SCHED\n", rnti) } dl_sched_rar_info_t rar_info = {}; rar_info.prach_tti = tti_data.tti_rx; @@ -312,12 +323,10 @@ void sched_tester::process_tti_args() void sched_tester::before_sched() { - typedef std::map::iterator it_t; - // check pending data buffers - for (it_t it = ue_db.begin(); it != ue_db.end(); ++it) { - uint16_t rnti = it->first; - srsenb::sched_ue* user = &it->second; + for (auto& it : ue_db) { + uint16_t rnti = it.first; + srsenb::sched_ue* user = &it.second; tester_user_results d; srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul); d.ul_pending_data = get_ul_buffer(rnti); @@ -326,9 +335,9 @@ void sched_tester::before_sched() d.has_ul_retx = hul->has_pending_retx(); d.has_ul_tx = d.has_ul_retx or d.ul_pending_data > 0; srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(tti_data.tti_tx_dl); - d.has_dl_retx = (hdl != NULL) and hdl->has_pending_retx(0, tti_data.tti_tx_dl); - d.has_dl_tx = (hdl != NULL) or (it->second.get_empty_dl_harq() != NULL and d.dl_pending_data > 0); - d.has_ul_newtx = not d.has_ul_retx and d.ul_pending_data > 0; + d.has_dl_retx = (hdl != nullptr) and hdl->has_pending_retx(0, tti_data.tti_tx_dl); + d.has_dl_tx = (hdl != nullptr) or (it.second.get_empty_dl_harq() != nullptr and d.dl_pending_data > 0); + d.has_ul_newtx = not d.has_ul_retx and d.ul_pending_data > 0; tti_data.ue_data.insert(std::make_pair(rnti, d)); tti_data.total_ues.dl_pending_data += d.dl_pending_data; tti_data.total_ues.ul_pending_data += d.ul_pending_data; @@ -449,15 +458,15 @@ void sched_tester::assert_no_empty_allocs() { // Test if allocations only take place for users with pending data or in RAR for (auto& iter : tti_data.ue_data) { - uint16_t rnti = iter.first; - srsenb::sched_ue* user = &ue_db[rnti]; + uint16_t rnti = iter.first; + // srsenb::sched_ue* user = &ue_db[rnti]; - if (!iter.second.has_ul_tx and tti_data.ue_data[rnti].ul_sched != NULL and + if (!iter.second.has_ul_tx and tti_data.ue_data[rnti].ul_sched != nullptr and tti_data.ue_data[rnti].ul_sched->needs_pdcch) { // FIXME: This test does not work for adaptive re-tx TestError("[TESTER] There was a user without data that got allocated in UL\n"); } - srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul); + // srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_data.tti_tx_ul); iter.second.ul_retx_got_delayed = iter.second.has_ul_retx and iter.second.ul_harq.is_empty(0); tti_data.total_ues.ul_retx_got_delayed |= iter.second.ul_retx_got_delayed; // Retxs cannot give space to newtx allocations @@ -467,9 +476,8 @@ void sched_tester::assert_no_empty_allocs() // There must be allocations if there is pending data/retxs. bool no_dl_allocs = true; - for (std::map::iterator it = tti_data.ue_data.begin(); it != tti_data.ue_data.end(); - ++it) { - if (it->second.dl_sched != NULL) { + for (auto& it : tti_data.ue_data) { + if (it.second.dl_sched != nullptr) { no_dl_allocs = false; } } @@ -629,13 +637,15 @@ void sched_tester::test_harqs() } for (const auto& ue : ue_db) { const auto& hprev = tti_data.ue_data[ue.first].ul_harq; - if (not hprev.has_pending_ack()) + if (not hprev.has_pending_ack()) { continue; + } uint32_t i = 0; for (; i < tti_data.sched_result_ul.nof_phich_elems; ++i) { const auto& phich = tti_data.sched_result_ul.phich[i]; - if (phich.rnti == ue.first) + if (phich.rnti == ue.first) { break; + } } CondError(i == tti_data.sched_result_ul.nof_phich_elems, "[TESTER] harq had pending ack but no phich was allocked\n"); @@ -777,11 +787,11 @@ void sched_tester::test_collisions() CondError(srslte_ra_dl_dci_to_grant( &cfg.cell, &dl_sf, SRSLTE_TM1, false, &tti_data.sched_result_dl.data[i].dci, &grant) == SRSLTE_ERROR, "Failed to decode PDSCH grant\n"); - for (uint32_t i = 0; i < alloc_mask.size(); ++i) { - if (grant.prb_idx[0][i]) { - alloc_mask.set(i); + for (uint32_t j = 0; j < alloc_mask.size(); ++j) { + if (grant.prb_idx[0][j]) { + alloc_mask.set(j); } else { - alloc_mask.reset(i); + alloc_mask.reset(j); } } if ((dl_allocs & alloc_mask).any()) { @@ -798,9 +808,9 @@ void sched_tester::test_collisions() &cfg.cell, &dl_sf, SRSLTE_TM1, false, &tti_data.sched_result_dl.bc[i].dci, &grant) == SRSLTE_ERROR, "Failed to decode PDSCH grant\n"); alloc_mask.reset(); - for (uint32_t i = 0; i < alloc_mask.size(); ++i) { - if (grant.prb_idx[0][i]) { - alloc_mask.set(i); + for (uint32_t j = 0; j < alloc_mask.size(); ++j) { + if (grant.prb_idx[0][j]) { + alloc_mask.set(j); } } if ((dl_allocs & alloc_mask).any()) { @@ -816,11 +826,11 @@ void sched_tester::test_collisions() CondError(srslte_ra_dl_dci_to_grant( &cfg.cell, &dl_sf, SRSLTE_TM1, false, &tti_data.sched_result_dl.rar[i].dci, &grant) == SRSLTE_ERROR, "Failed to decode PDSCH grant\n"); - for (uint32_t i = 0; i < alloc_mask.size(); ++i) { - if (grant.prb_idx[0][i]) { - alloc_mask.set(i); + for (uint32_t j = 0; j < alloc_mask.size(); ++j) { + if (grant.prb_idx[0][j]) { + alloc_mask.set(j); } else { - alloc_mask.reset(i); + alloc_mask.reset(j); } } if ((dl_allocs & alloc_mask).any()) { @@ -924,7 +934,7 @@ void sched_tester::ack_txs() // for (auto it = ue_db.begin(); it != ue_db.end(); ++it) { // uint16_t rnti = it->first; // srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_rx); - // if (h != NULL and not h->is_empty()) { + // if (h != nullptr and not h->is_empty()) { // ul_crc_info(tti_data.tti_rx, rnti, ack); // } // } @@ -974,7 +984,7 @@ void test_scheduler_rand(srsenb::sched_interface::cell_cfg_t cell_cfg, const sch srsenb::sched_interface::dl_sched_res_t& sched_result_dl = tester.tti_data.sched_result_dl; srsenb::sched_interface::ul_sched_res_t& sched_result_ul = tester.tti_data.sched_result_ul; - tester.init(NULL, &log_out); + tester.init(nullptr, &log_out); tester.set_metric(&dl_metric, &ul_metric); tester.cell_cfg(&cell_cfg); @@ -1005,6 +1015,7 @@ sched_sim_args rand_sim_params(const srsenb::sched_interface::cell_cfg_t& cell_c float ul_sr_exps[] = {1, 4}; // log rand float dl_data_exps[] = {1, 4}; // log rand uint32_t max_nof_users = 500; + std::uniform_int_distribution<> connection_dur_dist(min_conn_dur, max_conn_dur); bzero(&sim_args.ue_cfg, sizeof(srsenb::sched_interface::ue_cfg_t)); sim_args.ue_cfg.aperiodic_cqi_period = 40; @@ -1029,8 +1040,8 @@ sched_sim_args rand_sim_params(const srsenb::sched_interface::cell_cfg_t& cell_c } } - for (uint32_t i = 0; i < current_rntis.size(); ++i) { - uint32_t rnti = current_rntis[i][0]; + for (auto& current_rnti : current_rntis) { + uint32_t rnti = current_rnti[0]; if (randf() < P_ul_sr) { float exp = ul_sr_exps[0] + randf() * (ul_sr_exps[1] - ul_sr_exps[0]); sim_args.tti_events[tti].users[rnti].sr_data = (uint32_t)pow(10, exp); @@ -1048,7 +1059,7 @@ sched_sim_args rand_sim_params(const srsenb::sched_interface::cell_cfg_t& cell_c std::vector elem(3); elem[0] = rnti_start; elem[1] = tti; - elem[2] = min_conn_dur + rand() % (max_conn_dur - min_conn_dur); + elem[2] = connection_dur_dist(rand_gen); current_rntis.push_back(elem); sim_args.tti_events[tti].new_user = true; sim_args.tti_events[tti].new_rnti = rnti_start++; @@ -1058,11 +1069,10 @@ sched_sim_args rand_sim_params(const srsenb::sched_interface::cell_cfg_t& cell_c return sim_args; } -int main(int argc, char* argv[]) +int main() { printf("[TESTER] This is the chosen seed: %u\n", seed); /* initialize random seed: */ - srand(seed); uint32_t N_runs = 1, nof_ttis = 10240 + 10; for (uint32_t n = 0; n < N_runs; ++n) { From 84ac16826fca2e4c1c38e58cd6472034943ac606 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Tue, 15 Oct 2019 16:35:12 +0100 Subject: [PATCH 2/8] modernized the scheduler code, and removed some clang-tidy warnings --- srsenb/hdr/stack/mac/scheduler.h | 81 +++++++++--------------- srsenb/hdr/stack/mac/scheduler_grid.h | 41 +++++++++--- srsenb/hdr/stack/mac/scheduler_metric.h | 20 +++--- srsenb/src/stack/mac/scheduler.cc | 73 +++++++++++---------- srsenb/src/stack/mac/scheduler_metric.cc | 36 ++++++----- srsenb/test/mac/scheduler_test_rand.cc | 33 +++++----- 6 files changed, 143 insertions(+), 141 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index 26483283c..a0c7b09bc 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -46,31 +46,10 @@ namespace srsenb { class sched : public sched_interface { public: - // handle for DL metric - class dl_tti_sched_t - { - public: - virtual alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) = 0; - virtual const rbgmask_t& get_dl_mask() const = 0; - virtual uint32_t get_tti_tx_dl() const = 0; - virtual uint32_t get_nof_ctrl_symbols() const = 0; - virtual bool is_dl_alloc(sched_ue* user) const = 0; - }; - - // handle for UL metric - class ul_tti_sched_t - { - public: - virtual alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) = 0; - virtual const prbmask_t& get_ul_mask() const = 0; - virtual uint32_t get_tti_tx_ul() const = 0; - virtual bool is_ul_alloc(sched_ue* user) const = 0; - }; - /************************************************************* - * + * * Scheduling metric interface definition - * + * ************************************************************/ class metric_dl @@ -100,41 +79,41 @@ public: sched(); ~sched(); - void init(rrc_interface_mac *rrc, srslte::log *log); - void set_metric(metric_dl *dl_metric, metric_ul *ul_metric); - int cell_cfg(cell_cfg_t *cell_cfg); - void set_sched_cfg(sched_args_t *sched_cfg); - int reset(); + void init(rrc_interface_mac* rrc, srslte::log* log); + void set_metric(metric_dl* dl_metric, metric_ul* ul_metric); + int cell_cfg(cell_cfg_t* cell_cfg) final; + void set_sched_cfg(sched_args_t* sched_cfg); + int reset() final; - int ue_cfg(uint16_t rnti, ue_cfg_t *ue_cfg); - int ue_rem(uint16_t rnti); - bool ue_exists(uint16_t rnti); + int ue_cfg(uint16_t rnti, ue_cfg_t* ue_cfg) final; + int ue_rem(uint16_t rnti) final; + bool ue_exists(uint16_t rnti) final; void ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd); - void phy_config_enabled(uint16_t rnti, bool enabled); - - int bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, ue_bearer_cfg_t *cfg); - int bearer_ue_rem(uint16_t rnti, uint32_t lc_id); + void phy_config_enabled(uint16_t rnti, bool enabled); - uint32_t get_ul_buffer(uint16_t rnti); - uint32_t get_dl_buffer(uint16_t rnti); + int bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, ue_bearer_cfg_t* cfg) final; + int bearer_ue_rem(uint16_t rnti, uint32_t lc_id) final; - int dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue); - int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code); + uint32_t get_ul_buffer(uint16_t rnti) final; + uint32_t get_dl_buffer(uint16_t rnti) final; + + int dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue) final; + int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) final; int dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dedicated); - int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack); - int dl_rach_info(dl_sched_rar_info_t rar_info); - int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value); - int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value); - int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value); - - int ul_crc_info(uint32_t tti, uint16_t rnti, bool crc); - int ul_sr_info(uint32_t tti, uint16_t rnti); - int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true); - int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len); - int ul_phr(uint16_t rnti, int phr); - int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code); + int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) final; + int dl_rach_info(dl_sched_rar_info_t rar_info) final; + int dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) final; + int dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) final; + int dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) final; + + int ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) final; + int ul_sr_info(uint32_t tti, uint16_t rnti) override; + int ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value = true) final; + int ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) final; + int ul_phr(uint16_t rnti, int phr) final; + int ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) final; int dl_sched(uint32_t tti, dl_sched_res_t* sched_result) final; int ul_sched(uint32_t tti, ul_sched_res_t* sched_result) final; diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index ced9b7d0a..e65af20fd 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -30,10 +30,10 @@ namespace srsenb { -// Type of Allocation +//! Type of Allocation enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA }; -// Result of alloc attempt +//! Result of alloc attempt struct alloc_outcome_t { enum result_enum { SUCCESS, DCI_COLLISION, RB_COLLISION, ERROR }; result_enum result = ERROR; @@ -41,19 +41,20 @@ struct alloc_outcome_t { alloc_outcome_t(result_enum e) : result(e) {} operator result_enum() { return result; } operator bool() { return result == SUCCESS; } - const char* to_string() const; + const char* to_string() const; }; +//! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions. class pdcch_grid_t { public: struct alloc_t { uint16_t rnti; srslte_dci_location_t dci_pos = {0, 0}; - pdcch_mask_t current_mask; - pdcch_mask_t total_mask; + pdcch_mask_t current_mask; + pdcch_mask_t total_mask; }; - typedef std::vector alloc_result_t; + using alloc_result_t = std::vector; void init(srslte::log* log_, srslte_regs_t* regs, @@ -99,10 +100,11 @@ private: size_t nof_dci_allocs = 0; }; +//! manages a full TTI grid, namely CCE and RB allocations class tti_grid_t { public: - typedef std::pair ctrl_alloc_t; + using ctrl_alloc_t = std::pair; void init(srslte::log* log_, sched_interface::cell_cfg_t* cell_, const pdcch_grid_t& pdcch_grid); void new_tti(uint32_t tti_rx_, uint32_t start_cfi); @@ -125,14 +127,14 @@ public: uint32_t get_sf_idx() const { return pdcch_alloc.get_sf_idx(); } private: - alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = NULL); + alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = nullptr); // consts srslte::log* log_h = nullptr; sched_interface::cell_cfg_t* cell_cfg = nullptr; uint32_t nof_prbs = 0; uint32_t nof_rbgs = 0; - uint32_t si_n_rbg, rar_n_rbg = 0; + uint32_t si_n_rbg = 0, rar_n_rbg = 0; // tti const uint32_t tti_rx = 10241; @@ -147,6 +149,27 @@ private: prbmask_t ul_mask = {}; }; +//! generic interface used by DL scheduler algorithm +class dl_tti_sched_t +{ +public: + virtual alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) = 0; + virtual const rbgmask_t& get_dl_mask() const = 0; + virtual uint32_t get_tti_tx_dl() const = 0; + virtual uint32_t get_nof_ctrl_symbols() const = 0; + virtual bool is_dl_alloc(sched_ue* user) const = 0; +}; + +//! generic interface used by UL scheduler algorithm +class ul_tti_sched_t +{ +public: + virtual alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) = 0; + virtual const prbmask_t& get_ul_mask() const = 0; + virtual uint32_t get_tti_tx_ul() const = 0; + virtual bool is_ul_alloc(sched_ue* user) const = 0; +}; + } // namespace srsenb #endif // SRSLTE_SCHEDULER_GRID_H diff --git a/srsenb/hdr/stack/mac/scheduler_metric.h b/srsenb/hdr/stack/mac/scheduler_metric.h index 29332c8a3..c0cff65bb 100644 --- a/srsenb/hdr/stack/mac/scheduler_metric.h +++ b/srsenb/hdr/stack/mac/scheduler_metric.h @@ -32,34 +32,32 @@ class dl_metric_rr : public sched::metric_dl public: void set_log(srslte::log* log_) final; - void sched_users(std::map& ue_db, sched::dl_tti_sched_t* tti_sched) final; + void sched_users(std::map& ue_db, dl_tti_sched_t* tti_sched) final; private: bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask); dl_harq_proc* allocate_user(sched_ue* user); - srslte::log* log_h = nullptr; - sched::dl_tti_sched_t* tti_alloc = nullptr; + srslte::log* log_h = nullptr; + dl_tti_sched_t* tti_alloc = nullptr; }; class ul_metric_rr : public sched::metric_ul { public: void set_log(srslte::log* log_) final; - void sched_users(std::map& ue_db, sched::ul_tti_sched_t* tti_sched) final; + void sched_users(std::map& ue_db, ul_tti_sched_t* tti_sched) final; private: bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc); ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user); - ul_harq_proc* allocate_user_retx_prbs(sched_ue *user); + ul_harq_proc* allocate_user_retx_prbs(sched_ue* user); - srslte::log* log_h = nullptr; - sched::ul_tti_sched_t* tti_alloc = nullptr; - uint32_t current_tti; + srslte::log* log_h = nullptr; + ul_tti_sched_t* tti_alloc = nullptr; + uint32_t current_tti; }; - -} +} // namespace srsenb #endif // SRSENB_SCHEDULER_METRIC_H - diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index 68ed2b5ea..98e35bdaa 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -164,7 +164,7 @@ sched::tti_sched_t::alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, buf_rar, ra_rnti); if (not ret.first) { Warning("SCHED: Could not allocate RAR for L=%d, cause=%s\n", aggr_lvl, ret.first.to_string()); - return {ret.first, NULL}; + return {ret.first, nullptr}; } // Allocation successful @@ -529,7 +529,8 @@ int sched::tti_sched_t::generate_format1a( mcs = i; tbs = srslte_ra_tbs_from_idx(i, 2); break; - } else if (srslte_ra_tbs_from_idx(i, 3) >= tbs) { + } + if (srslte_ra_tbs_from_idx(i, 3) >= tbs) { dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3; mcs = i; tbs = srslte_ra_tbs_from_idx(i, 3); @@ -568,10 +569,10 @@ int sched::tti_sched_t::generate_format1a( sched::sched() : bc_aggr_level(0), rar_aggr_level(0), P(0), si_n_rbg(0), rar_n_rbg(0), nof_rbg(0) { current_tti = 0; - log_h = NULL; - dl_metric = NULL; - ul_metric = NULL; - rrc = NULL; + log_h = nullptr; + dl_metric = nullptr; + ul_metric = nullptr; + rrc = nullptr; bzero(&cfg, sizeof(cfg)); bzero(®s, sizeof(regs)); @@ -584,7 +585,7 @@ sched::sched() : bc_aggr_level(0), rar_aggr_level(0), P(0), si_n_rbg(0), rar_n_r bzero(rar_locations[i], sizeof(sched_ue::sched_dci_cce_t) * 10); } - pthread_rwlock_init(&rwlock, NULL); + pthread_rwlock_init(&rwlock, nullptr); reset(); } @@ -627,7 +628,7 @@ int sched::reset() void sched::set_sched_cfg(sched_interface::sched_args_t* sched_cfg_) { - if (sched_cfg_) { + if (sched_cfg_ != nullptr) { sched_cfg = *sched_cfg_; } } @@ -651,7 +652,7 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) cfg = *cell_cfg; // Get DCI locations - if (srslte_regs_init(®s, cfg.cell)) { + if (srslte_regs_init(®s, cfg.cell) != LIBLTE_SUCCESS) { Error("Getting DCI locations\n"); return SRSLTE_ERROR; } @@ -733,7 +734,7 @@ int sched::ue_rem(uint16_t rnti) { int ret = 0; pthread_rwlock_wrlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db.erase(rnti); } else { Error("User rnti=0x%x not found\n", rnti); @@ -764,7 +765,7 @@ void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd) { void sched::phy_config_enabled(uint16_t rnti, bool enabled) { pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].phy_config_enabled(current_tti, enabled); } else { Error("User rnti=0x%x not found\n", rnti); @@ -776,7 +777,7 @@ int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bear { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_bearer_cfg(lc_id, cfg_); } else { Error("User rnti=0x%x not found\n", rnti); @@ -790,7 +791,7 @@ int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].rem_bearer(lc_id); } else { Error("User rnti=0x%x not found\n", rnti); @@ -804,7 +805,7 @@ uint32_t sched::get_dl_buffer(uint16_t rnti) { uint32_t ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ret = ue_db[rnti].get_pending_dl_new_data(current_tti); } else { Error("User rnti=0x%x not found\n", rnti); @@ -817,7 +818,7 @@ uint32_t sched::get_ul_buffer(uint16_t rnti) { uint32_t ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ret = ue_db[rnti].get_pending_ul_new_data(current_tti); } else { Error("User rnti=0x%x not found\n", rnti); @@ -830,7 +831,7 @@ int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].dl_buffer_state(lc_id, tx_queue, retx_queue); } else { Error("User rnti=0x%x not found\n", rnti); @@ -844,7 +845,7 @@ int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].mac_buffer_state(ce_code); } else { Error("User rnti=0x%x not found\n", rnti); @@ -858,7 +859,7 @@ int sched::dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dl { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_dl_ant_info(dl_ant_info); } else { Error("User rnti=0x%x not found\n", rnti); @@ -872,7 +873,7 @@ int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ret = ue_db[rnti].set_ack_info(tti, tb_idx, ack); } else { Error("User rnti=0x%x not found\n", rnti); @@ -886,7 +887,7 @@ int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_ul_crc(tti, crc); } else { Error("User rnti=0x%x not found\n", rnti); @@ -900,7 +901,7 @@ int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_dl_ri(tti, cqi_value); } else { Error("User rnti=0x%x not found\n", rnti); @@ -914,7 +915,7 @@ int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_dl_pmi(tti, pmi_value); } else { Error("User rnti=0x%x not found\n", rnti); @@ -928,7 +929,7 @@ int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_dl_cqi(tti, cqi_value); } else { Error("User rnti=0x%x not found\n", rnti); @@ -950,7 +951,7 @@ int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_ul_cqi(tti, cqi, ul_ch_code); } else { Error("User rnti=0x%x not found\n", rnti); @@ -964,7 +965,7 @@ int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].ul_buffer_state(lcid, bsr, set_value); } else { Error("User rnti=0x%x not found\n", rnti); @@ -978,7 +979,7 @@ int sched::ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].ul_recv_len(lcid, len); } else { Error("User rnti=0x%x not found\n", rnti); @@ -992,7 +993,7 @@ int sched::ul_phr(uint16_t rnti, int phr) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].ul_phr(phr); } else { Error("User rnti=0x%x not found\n", rnti); @@ -1006,7 +1007,7 @@ int sched::ul_sr_info(uint32_t tti, uint16_t rnti) { int ret = 0; pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].set_sr(); } else { Error("User rnti=0x%x not found\n", rnti); @@ -1024,7 +1025,7 @@ void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) void sched::tpc_inc(uint16_t rnti) { pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].tpc_inc(); } else { Error("User rnti=0x%x not found\n", rnti); @@ -1035,7 +1036,7 @@ void sched::tpc_inc(uint16_t rnti) void sched::tpc_dec(uint16_t rnti) { pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti)) { + if (ue_db.count(rnti) > 0) { ue_db[rnti].tpc_dec(); } else { Error("User rnti=0x%x not found\n", rnti); @@ -1127,7 +1128,7 @@ void sched::dl_sched_bc(tti_sched_t* tti_sched) /* Allocate DCIs and RBGs for each SIB */ for (int i = 0; i < MAX_SIBS; i++) { - if (cfg.sibs[i].len && pending_sibs[i].is_in_window && pending_sibs[i].n_tx < 4) { + if (cfg.sibs[i].len > 0 && pending_sibs[i].is_in_window && pending_sibs[i].n_tx < 4) { uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(CEILFRAC(cfg.si_window_ms, 10), 4) : 4; uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start); @@ -1146,9 +1147,9 @@ void sched::dl_sched_bc(tti_sched_t* tti_sched) } /* Allocate DCIs and RBGs for paging */ - if (rrc) { + if (rrc != nullptr) { uint32_t paging_payload = 0; - if (rrc->is_paging_opportunity(current_tti, &paging_payload) and paging_payload) { + if (rrc->is_paging_opportunity(current_tti, &paging_payload) and paging_payload > 0) { tti_sched->alloc_paging(bc_aggr_level, paging_payload); } } @@ -1305,7 +1306,7 @@ void sched::ul_sched_msg3(tti_sched_t* tti_sched) } uint16_t rnti = pending_msg3[pending_tti].rnti; - if (not ue_db.count(rnti)) { + if (ue_db.count(rnti) == 0) { log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti); return; } @@ -1417,6 +1418,4 @@ void sched::generate_cce_location( } } -} - - +} // namespace srsenb diff --git a/srsenb/src/stack/mac/scheduler_metric.cc b/srsenb/src/stack/mac/scheduler_metric.cc index 97fc6febd..60597692f 100644 --- a/srsenb/src/stack/mac/scheduler_metric.cc +++ b/srsenb/src/stack/mac/scheduler_metric.cc @@ -41,7 +41,7 @@ void dl_metric_rr::set_log(srslte::log* log_) log_h = log_; } -void dl_metric_rr::sched_users(std::map& ue_db, sched::dl_tti_sched_t* tti_sched) +void dl_metric_rr::sched_users(std::map& ue_db, dl_tti_sched_t* tti_sched) { typedef std::map::iterator it_t; @@ -102,7 +102,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) return h; } else if (code == alloc_outcome_t::DCI_COLLISION) { // No DCIs available for this user. Move to next - return NULL; + return nullptr; } // If previous mask does not fit, find another with exact same number of rbgs @@ -112,7 +112,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) if (code == alloc_outcome_t::SUCCESS) { return h; } else if (code == alloc_outcome_t::DCI_COLLISION) { - return NULL; + return nullptr; } } } @@ -138,12 +138,12 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) } } - return NULL; + return nullptr; } /***************************************************************** * - * Uplink Metric + * Uplink Metric * *****************************************************************/ @@ -152,7 +152,7 @@ void ul_metric_rr::set_log(srslte::log* log_) log_h = log_; } -void ul_metric_rr::sched_users(std::map& ue_db, sched::ul_tti_sched_t* tti_sched) +void ul_metric_rr::sched_users(std::map& ue_db, ul_tti_sched_t* tti_sched) { typedef std::map::iterator it_t; @@ -225,10 +225,10 @@ bool ul_metric_rr::find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc) return alloc->L == L; } -ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue *user) +ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue* user) { if (tti_alloc->is_ul_alloc(user)) { - return NULL; + return nullptr; } alloc_outcome_t ret; ul_harq_proc* h = user->get_ul_harq(current_tti); @@ -241,34 +241,36 @@ ul_harq_proc* ul_metric_rr::allocate_user_retx_prbs(sched_ue *user) ret = tti_alloc->alloc_ul_user(user, alloc); if (ret == alloc_outcome_t::SUCCESS) { return h; - } else if (ret == alloc_outcome_t::DCI_COLLISION) { + } + if (ret == alloc_outcome_t::DCI_COLLISION) { log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti()); - return NULL; + return nullptr; } if (find_allocation(alloc.L, &alloc)) { ret = tti_alloc->alloc_ul_user(user, alloc); if (ret == alloc_outcome_t::SUCCESS) { return h; - } else if (ret == alloc_outcome_t::DCI_COLLISION) { + } + if (ret == alloc_outcome_t::DCI_COLLISION) { log_h->warning("SCHED: Couldn't find space in PDCCH for UL tx of rnti=0x%x\n", user->get_rnti()); } } } - return NULL; + return nullptr; } ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user) { if (tti_alloc->is_ul_alloc(user)) { - return NULL; + return nullptr; } uint32_t pending_data = user->get_pending_ul_new_data(current_tti); ul_harq_proc* h = user->get_ul_harq(current_tti); // find an empty PID - if (h->is_empty(0) and pending_data) { - uint32_t pending_rb = user->get_required_prb_ul(pending_data); + if (h->is_empty(0) and pending_data > 0) { + uint32_t pending_rb = user->get_required_prb_ul(pending_data); ul_harq_proc::ul_alloc_t alloc; find_allocation(pending_rb, &alloc); @@ -281,7 +283,7 @@ ul_harq_proc* ul_metric_rr::allocate_user_newtx_prbs(sched_ue* user) } } } - return NULL; + return nullptr; } -} +} // namespace srsenb diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 438b6e2dd..94e9492b3 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -117,15 +117,17 @@ log_tester log_out("ALL"); #define Warning(fmt, ...) \ log_out.warning(fmt, ##__VA_ARGS__); \ - warn_counter++; + warn_counter++ #define TestError(fmt, ...) \ log_out.error(fmt, ##__VA_ARGS__); \ - exit(-1); + exit(-1) #define CondError(cond, fmt, ...) \ - if (cond) { \ - log_out.error(fmt, ##__VA_ARGS__); \ - exit(-1); \ - } + do { \ + if (cond) { \ + log_out.error(fmt, ##__VA_ARGS__); \ + exit(-1); \ + } \ + } while (0) /******************* * Dummies * @@ -182,7 +184,6 @@ struct sched_tester : public srsenb::sched { tester_user_results total_ues; ///< stores combined UL/DL buffer state srsenb::sched_interface::ul_sched_res_t sched_result_ul; srsenb::sched_interface::dl_sched_res_t sched_result_dl; - typedef std::map::iterator ue_it_t; }; struct ue_info { int prach_tti = -1, rar_tti = -1, msg3_tti = -1; @@ -246,7 +247,7 @@ void sched_tester::add_user(uint16_t rnti, tester_ues.insert(std::make_pair(rnti, info)); if (ue_cfg(rnti, &ue_cfg_) != SRSLTE_SUCCESS) { - TestError("[TESTER] Registering new user rnti=0x%x to SCHED\n", rnti) + TestError("[TESTER] Registering new user rnti=0x%x to SCHED\n", rnti); } dl_sched_rar_info_t rar_info = {}; rar_info.prach_tti = tti_data.tti_rx; @@ -310,7 +311,7 @@ void sched_tester::process_tti_args() if (e.second.sr_data > 0) { uint32_t tot_ul_data = ue_db[e.first].get_pending_ul_new_data(tti_data.tti_tx_ul) + e.second.sr_data; uint32_t lcid = 0; - ul_bsr(e.first, lcid, tot_ul_data); + ul_bsr(e.first, lcid, tot_ul_data, true); } if (e.second.dl_data > 0) { uint32_t lcid = 0; @@ -599,8 +600,8 @@ void sched_tester::test_harqs() } for (uint32_t i = 0; i < tti_data.sched_result_ul.nof_dci_elems; ++i) { - const auto& pusch = tti_data.sched_result_ul.pusch[i]; - uint16_t rnti = pusch.dci.rnti; + const auto& pusch = tti_data.sched_result_ul.pusch[i]; + uint16_t rnti = pusch.dci.rnti; const auto& ue_data = tti_data.ue_data[rnti]; const srsenb::ul_harq_proc* h = ue_db[rnti].get_ul_harq(tti_data.tti_tx_ul); CondError(h == nullptr or h->is_empty(), "[TESTER] scheduled UL harq does not exist or is empty\n"); @@ -778,7 +779,7 @@ void sched_tester::test_collisions() } srslte::bounded_bitset<100, true> dl_allocs(cfg.cell.nof_prb), alloc_mask(cfg.cell.nof_prb); - srslte_dl_sf_cfg_t dl_sf; + srslte_dl_sf_cfg_t dl_sf; ZERO_OBJECT(dl_sf); for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_data_elems; ++i) { @@ -979,10 +980,10 @@ void test_scheduler_rand(srsenb::sched_interface::cell_cfg_t cell_cfg, const sch log_out.set_level(srslte::LOG_LEVEL_INFO); - tester.sim_args = args; - srslte_cell_t& cell_cfg_phy = cell_cfg.cell; - srsenb::sched_interface::dl_sched_res_t& sched_result_dl = tester.tti_data.sched_result_dl; - srsenb::sched_interface::ul_sched_res_t& sched_result_ul = tester.tti_data.sched_result_ul; + tester.sim_args = args; + // srslte_cell_t& cell_cfg_phy = cell_cfg.cell; + // srsenb::sched_interface::dl_sched_res_t& sched_result_dl = tester.tti_data.sched_result_dl; + // srsenb::sched_interface::ul_sched_res_t& sched_result_ul = tester.tti_data.sched_result_ul; tester.init(nullptr, &log_out); tester.set_metric(&dl_metric, &ul_metric); From 837c16557f6e63eca46b7c6e334f9c234f5cf2ad Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Tue, 15 Oct 2019 19:27:49 +0100 Subject: [PATCH 3/8] started to implement separate SIB scheduler, and its tests. This is particualrly important for CA --- srsenb/hdr/stack/mac/scheduler.h | 1 + srsenb/hdr/stack/mac/scheduler_ctrl.h | 60 ++++++++++++++ srsenb/hdr/stack/mac/scheduler_grid.h | 2 +- srsenb/src/stack/mac/scheduler_ctrl.cc | 109 +++++++++++++++++++++++++ srsenb/src/stack/mac/scheduler_grid.cc | 21 +++-- srsenb/test/mac/scheduler_test.cc | 10 +-- srsenb/test/mac/scheduler_test_rand.cc | 36 ++++++++ 7 files changed, 225 insertions(+), 14 deletions(-) create mode 100644 srsenb/hdr/stack/mac/scheduler_ctrl.h create mode 100644 srsenb/src/stack/mac/scheduler_ctrl.cc diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index a0c7b09bc..5a496f6ca 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -153,6 +153,7 @@ protected: uint32_t window_start; uint32_t n_tx; } sched_sib_t; + class bc_sched_t; class tti_sched_t : public dl_tti_sched_t, public ul_tti_sched_t { diff --git a/srsenb/hdr/stack/mac/scheduler_ctrl.h b/srsenb/hdr/stack/mac/scheduler_ctrl.h new file mode 100644 index 000000000..94f243680 --- /dev/null +++ b/srsenb/hdr/stack/mac/scheduler_ctrl.h @@ -0,0 +1,60 @@ +/* + * Copyright 2013-2019 Software Radio Systems Limited + * + * This file is part of srsLTE. + * + * srsLTE is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * srsLTE is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * A copy of the GNU Affero General Public License can be found in + * the LICENSE file in the top-level directory of this distribution + * and at http://www.gnu.org/licenses/. + * + */ + +#ifndef SRSLTE_SCHEDULER_CTRL_H +#define SRSLTE_SCHEDULER_CTRL_H + +#include "scheduler.h" + +namespace srsenb { + +class sched::bc_sched_t +{ +public: + bc_sched_t(cell_cfg_t* cfg_); + + void dl_sched(tti_sched_t* tti_sched); + void reset(); + +private: + struct sched_sib_t { + bool is_in_window = false; + uint32_t window_start = 0; + uint32_t n_tx = 0; + }; + + void update_si_windows(tti_sched_t* tti_sched); + void alloc_sibs(tti_sched_t* tti_sched); + + // args + cell_cfg_t* cfg; + + std::array pending_sibs; + + // TTI specific + uint32_t current_sfn = 0, current_sf_idx = 0; + uint32_t current_tti = 0; + uint32_t bc_aggr_level = 2; +}; + +} // namespace srsenb + +#endif // SRSLTE_SCHEDULER_CTRL_H diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index e65af20fd..8156058e8 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -100,7 +100,7 @@ private: size_t nof_dci_allocs = 0; }; -//! manages a full TTI grid, namely CCE and RB allocations +//! manages a full TTI grid resources, namely CCE and DL/UL RB allocations class tti_grid_t { public: diff --git a/srsenb/src/stack/mac/scheduler_ctrl.cc b/srsenb/src/stack/mac/scheduler_ctrl.cc new file mode 100644 index 000000000..e7a496fdb --- /dev/null +++ b/srsenb/src/stack/mac/scheduler_ctrl.cc @@ -0,0 +1,109 @@ +/* + * Copyright 2013-2019 Software Radio Systems Limited + * + * This file is part of srsLTE. + * + * srsLTE is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of + * the License, or (at your option) any later version. + * + * srsLTE is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * A copy of the GNU Affero General Public License can be found in + * the LICENSE file in the top-level directory of this distribution + * and at http://www.gnu.org/licenses/. + * + */ + +#include "srsenb/hdr/stack/mac/scheduler_ctrl.h" + +namespace srsenb { + +sched::bc_sched_t::bc_sched_t(cell_cfg_t* cfg_) : cfg(cfg_) {} + +void sched::bc_sched_t::dl_sched(sched::tti_sched_t* tti_sched) +{ + current_sf_idx = tti_sched->get_sf_idx(); + current_sfn = tti_sched->get_sfn(); + current_tti = tti_sched->get_tti_tx_dl(); + bc_aggr_level = 2; + + /* Activate/deactivate SI windows */ + update_si_windows(tti_sched); + + /* Allocate DCIs and RBGs for each SIB */ + alloc_sibs(tti_sched); +} + +void sched::bc_sched_t::update_si_windows(tti_sched_t* tti_sched) +{ + uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl(); + + for (uint32_t i = 0; i < pending_sibs.size(); ++i) { + // There is SIB data + if (cfg->sibs[i].len == 0) { + continue; + } + + if (not pending_sibs[i].is_in_window) { + uint32_t sf = 5; + uint32_t x = 0; + if (i > 0) { + x = (i - 1) * cfg->si_window_ms; + sf = x % 10; + } + if ((current_sfn % (cfg->sibs[i].period_rf)) == x / 10 && current_sf_idx == sf) { + pending_sibs[i].is_in_window = true; + pending_sibs[i].window_start = tti_tx_dl; + pending_sibs[i].n_tx = 0; + } + } else { + if (i > 0) { + if (srslte_tti_interval(tti_tx_dl, pending_sibs[i].window_start) > cfg->si_window_ms) { + // the si window has passed + pending_sibs[i] = {}; + } + } else { + // SIB1 is always in window + if (pending_sibs[0].n_tx == 4) { + pending_sibs[0].n_tx = 0; + } + } + } + } +} + +void sched::bc_sched_t::alloc_sibs(tti_sched_t* tti_sched) +{ + for (uint32_t i = 0; i < pending_sibs.size(); i++) { + if (cfg->sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) { + uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(srslte::ceil_div(cfg->si_window_ms, 10), 4) : 4; + uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start); + + // Check if there is any SIB to tx + bool sib1_flag = (i == 0) and (current_sfn % 2) == 0 and current_sf_idx == 5; + bool other_sibs_flag = + (i > 0) and (n_sf >= (cfg->si_window_ms / nof_tx) * pending_sibs[i].n_tx) and current_sf_idx == 9; + if (not sib1_flag and not other_sibs_flag) { + continue; + } + + // Schedule SIB + tti_sched->alloc_bc(bc_aggr_level, i, pending_sibs[i].n_tx); + pending_sibs[i].n_tx++; + } + } +} + +void sched::bc_sched_t::reset() +{ + for (auto& sib : pending_sibs) { + sib = {}; + } +} + +} // namespace srsenb diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 67c763f97..4d62e5d4d 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -89,7 +89,7 @@ const sched_ue::sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t al case alloc_type_t::UL_DATA: return user->get_locations(current_cfix + 1, sf_idx); } - return NULL; + return nullptr; } bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user) @@ -98,7 +98,7 @@ bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_u /* Get DCI Location Table */ const sched_ue::sched_dci_cce_t* dci_locs = get_cce_loc_table(alloc_type, user); - if (!dci_locs) { + if (dci_locs == nullptr) { return false; } @@ -199,16 +199,17 @@ void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_ { // if alloc tree is empty if (prev_start == prev_end) { - if (vec) + if (vec != nullptr) { vec->clear(); - if (tot_mask) { + } + if (tot_mask != nullptr) { tot_mask->reset(); } return; } // set vector of allocations - if (vec) { + if (vec != nullptr) { vec->clear(); size_t i = prev_start + idx; while (dci_alloc_tree[i].first >= 0) { @@ -220,7 +221,7 @@ void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_ } // set final cce mask - if (tot_mask) { + if (tot_mask != nullptr) { *tot_mask = dci_alloc_tree[prev_start + idx].second.total_mask; } } @@ -293,6 +294,7 @@ void tti_grid_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) pdcch_alloc.new_tti(tti_rx, start_cfi); } +//! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging) alloc_outcome_t tti_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user) { // Check RBG collision @@ -312,6 +314,7 @@ alloc_outcome_t tti_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, return alloc_outcome_t::SUCCESS; } +//! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. tti_grid_t::ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type) { rbg_range_t range; @@ -334,10 +337,11 @@ tti_grid_t::ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type return {alloc_dl(aggr_lvl, alloc_type, new_mask), range}; } +//! Allocates CCEs and RBs for a user DL data alloc. alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask) { srslte_dci_format_t dci_format = user->get_dci_format(); - uint32_t aggr_level = user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, NULL, NULL, dci_format)); + uint32_t aggr_level = user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, nullptr, nullptr, dci_format)); return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user); } @@ -355,7 +359,8 @@ alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc // Generate PDCCH except for RAR and non-adaptive retx if (needs_pdcch) { - uint32_t aggr_idx = user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, NULL, NULL, SRSLTE_DCI_FORMAT0)); + uint32_t aggr_idx = + user->get_aggr_level(srslte_dci_format_sizeof(&cell_cfg->cell, nullptr, nullptr, SRSLTE_DCI_FORMAT0)); if (not pdcch_alloc.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, user)) { return alloc_outcome_t::DCI_COLLISION; } diff --git a/srsenb/test/mac/scheduler_test.cc b/srsenb/test/mac/scheduler_test.cc index 536bd44d9..7b1800538 100644 --- a/srsenb/test/mac/scheduler_test.cc +++ b/srsenb/test/mac/scheduler_test.cc @@ -127,15 +127,15 @@ int main(int argc, char *argv[]) srsenb::sched_interface::ue_bearer_cfg_t bearer_cfg; bzero(&bearer_cfg, sizeof(srsenb::sched_interface::ue_bearer_cfg_t)); - bearer_cfg.direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH; + bearer_cfg.direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH; my_sched.ue_cfg(rnti, &ue_cfg); my_sched.bearer_ue_cfg(rnti, 0, &bearer_cfg); - //my_sched.dl_rlc_buffer_state(rnti, 0, 1e6, 0); - my_sched.ul_bsr(rnti, 0, 1e6); + // my_sched.dl_rlc_buffer_state(rnti, 0, 1e6, 0); + my_sched.ul_bsr(rnti, 0, 1e6, true); - bool running = true; - uint32_t tti = 0; + bool running = true; + uint32_t tti = 0; while(running) { log_out.step(tti); if (tti > 50) { diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 94e9492b3..9123b8f53 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -226,6 +226,7 @@ struct sched_tester : public srsenb::sched { void assert_no_empty_allocs(); void test_collisions(); void test_harqs(); + void test_sibs(); void run_tti(uint32_t tti_rx); private: @@ -381,6 +382,7 @@ void sched_tester::process_results() test_collisions(); assert_no_empty_allocs(); test_harqs(); + test_sibs(); } void sched_tester::run_tti(uint32_t tti_rx) @@ -712,6 +714,40 @@ void sched_tester::test_harqs() // } } +void sched_tester::test_sibs() +{ + uint32_t sfn = tti_data.tti_tx_dl / 10; + uint32_t sf_idx = TTI_TX(tti_data.tti_rx) % 10; + bool sib1_present = ((sfn % 2) == 0) and sf_idx == 5; + + using bc_elem = sched_interface::dl_sched_bc_t; + bc_elem* bc_begin = &tti_data.sched_result_dl.bc[0]; + bc_elem* bc_end = &tti_data.sched_result_dl.bc[tti_data.sched_result_dl.nof_bc_elems]; + + /* Test if SIB1 was correctly scheduled */ + if (sib1_present) { + auto it = std::find_if(bc_begin, bc_end, [](bc_elem& elem) { return elem.index == 0; }); + CondError(it == bc_end, "Failed to allocate SIB1 in even sfn, sf_idx==5\n"); + } + + /* Test if any SIB was scheduled outside of its window */ + for (bc_elem* bc = bc_begin; bc != bc_end; ++bc) { + if (bc->index == 0) { + continue; + } + uint32_t x = (bc->index - 1) * cfg.si_window_ms; + uint32_t sf = x % 10; + uint32_t sfn_start = sfn; + while ((sfn_start % cfg.sibs[bc->index].period_rf) != x / 10) { + sfn_start--; + } + uint32_t win_start = sfn_start * 10 + sf; + uint32_t win_end = win_start + cfg.si_window_ms; + CondError(tti_data.tti_tx_dl < win_start or tti_data.tti_tx_dl > win_end, + "Scheduled SIB is outside of its SIB window\n"); + } +} + void sched_tester::test_collisions() { tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx); From d79e49ed4cdca9e2d436b713585c9fb4a444c2bc Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Wed, 16 Oct 2019 12:03:30 +0100 Subject: [PATCH 4/8] moved the BC scheduling to a separate class/file --- srsenb/hdr/stack/mac/scheduler.h | 18 ++---- srsenb/hdr/stack/mac/scheduler_ctrl.h | 5 +- srsenb/src/stack/mac/scheduler.cc | 90 +++++--------------------- srsenb/src/stack/mac/scheduler_ctrl.cc | 20 ++++++ srsenb/src/stack/rrc/rrc.cc | 4 +- srsenb/test/mac/scheduler_test_rand.cc | 24 +++---- 6 files changed, 60 insertions(+), 101 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index 5a496f6ca..92d6c2851 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -148,11 +148,6 @@ protected: // This is for computing DCI locations srslte_regs_t regs; - typedef struct { - bool is_in_window; - uint32_t window_start; - uint32_t n_tx; - } sched_sib_t; class bc_sched_t; class tti_sched_t : public dl_tti_sched_t, public ul_tti_sched_t @@ -266,16 +261,14 @@ protected: void generate_phich(tti_sched_t* tti_sched); int generate_dl_sched(tti_sched_t* tti_sched); int generate_ul_sched(tti_sched_t* tti_sched); - void dl_sched_bc(tti_sched_t* tti_sched); void dl_sched_rar(tti_sched_t* tti_sched); void dl_sched_data(tti_sched_t* tti_sched); void ul_sched_msg3(tti_sched_t* tti_sched); - std::map ue_db; - sched_sib_t pending_sibs[MAX_SIBS]; + std::map ue_db; typedef struct { - bool enabled; + bool enabled; uint16_t rnti; uint32_t L; uint32_t n_prb; @@ -297,9 +290,10 @@ protected: prbmask_t prach_mask; prbmask_t pucch_mask; - uint32_t bc_aggr_level; - uint32_t rar_aggr_level; - + uint32_t bc_aggr_level; + uint32_t rar_aggr_level; + std::unique_ptr bc_sched; + uint32_t pdsch_re[10]; uint32_t current_tti; diff --git a/srsenb/hdr/stack/mac/scheduler_ctrl.h b/srsenb/hdr/stack/mac/scheduler_ctrl.h index 94f243680..5890d2078 100644 --- a/srsenb/hdr/stack/mac/scheduler_ctrl.h +++ b/srsenb/hdr/stack/mac/scheduler_ctrl.h @@ -30,6 +30,7 @@ class sched::bc_sched_t { public: bc_sched_t(cell_cfg_t* cfg_); + void init(rrc_interface_mac* rrc_); void dl_sched(tti_sched_t* tti_sched); void reset(); @@ -43,9 +44,11 @@ private: void update_si_windows(tti_sched_t* tti_sched); void alloc_sibs(tti_sched_t* tti_sched); + void alloc_paging(tti_sched_t* tti_sched); // args - cell_cfg_t* cfg; + cell_cfg_t* cfg; + rrc_interface_mac* rrc = nullptr; std::array pending_sibs; diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index 98e35bdaa..7b9ce8c5d 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -23,6 +23,7 @@ #include #include "srsenb/hdr/stack/mac/scheduler.h" +#include "srsenb/hdr/stack/mac/scheduler_ctrl.h" #include "srslte/common/pdu.h" #include "srslte/srslte.h" @@ -562,11 +563,18 @@ int sched::tti_sched_t::generate_format1a( } /******************************************************* - * - * Initialization and sched configuration functions - * + * + * Initialization and sched configuration functions + * *******************************************************/ -sched::sched() : bc_aggr_level(0), rar_aggr_level(0), P(0), si_n_rbg(0), rar_n_rbg(0), nof_rbg(0) +sched::sched() : + bc_aggr_level(0), + rar_aggr_level(0), + P(0), + si_n_rbg(0), + rar_n_rbg(0), + nof_rbg(0), + bc_sched(new bc_sched_t{&cfg}) { current_tti = 0; log_h = nullptr; @@ -608,13 +616,14 @@ void sched::init(rrc_interface_mac* rrc_, srslte::log* log) sched_cfg.max_aggr_level = 3; log_h = log; rrc = rrc_; + + bc_sched->init(rrc); reset(); } int sched::reset() { bzero(pending_msg3, sizeof(pending_msg3_t) * TTIMOD_SZ); - bzero(pending_sibs, sizeof(sched_sib_t) * MAX_SIBS); while (not pending_rars.empty()) { pending_rars.pop(); } @@ -1088,73 +1097,6 @@ sched::tti_sched_t* sched::new_tti(uint32_t tti_rx) return tti_sched; } -// Schedules Broadcast messages (SIB) -void sched::dl_sched_bc(tti_sched_t* tti_sched) -{ - /* Activate/Deactivate SI windows */ - for (int i = 0; i < MAX_SIBS; i++) { - // There is SIB data - if (cfg.sibs[i].len == 0) { - continue; - } - - if (!pending_sibs[i].is_in_window) { - uint32_t sf = 5; - uint32_t x = 0; - if (i > 0) { - x = (i - 1) * cfg.si_window_ms; - sf = x % 10; - } - if ((tti_sched->get_sfn() % (cfg.sibs[i].period_rf)) == x / 10 && tti_sched->get_sf_idx() == sf) { - pending_sibs[i].is_in_window = true; - pending_sibs[i].window_start = tti_sched->get_tti_tx_dl(); - pending_sibs[i].n_tx = 0; - } - } else { - if (i > 0) { - if (srslte_tti_interval(tti_sched->get_tti_tx_dl(), pending_sibs[i].window_start) > cfg.si_window_ms) { - // the si window has passed - pending_sibs[i].is_in_window = false; - pending_sibs[i].window_start = 0; - } - } else { - // SIB1 is always in window - if (pending_sibs[0].n_tx == 4) { - pending_sibs[0].n_tx = 0; - } - } - } - } - - /* Allocate DCIs and RBGs for each SIB */ - for (int i = 0; i < MAX_SIBS; i++) { - if (cfg.sibs[i].len > 0 && pending_sibs[i].is_in_window && pending_sibs[i].n_tx < 4) { - uint32_t nof_tx = (i > 0) ? SRSLTE_MIN(CEILFRAC(cfg.si_window_ms, 10), 4) : 4; - uint32_t n_sf = (tti_sched->get_tti_tx_dl() - pending_sibs[i].window_start); - - // Check if there is any SIB to tx - bool sib1_flag = i == 0 and (tti_sched->get_sfn() % 2) == 0 and tti_sched->get_sf_idx() == 5; - bool other_sibs_flag = - i > 0 and n_sf >= (cfg.si_window_ms / nof_tx) * pending_sibs[i].n_tx and tti_sched->get_sf_idx() == 9; - if (!sib1_flag and !other_sibs_flag) { - continue; - } - - // Schedule SIB - tti_sched->alloc_bc(bc_aggr_level, i, pending_sibs[i].n_tx); - pending_sibs[i].n_tx++; - } - } - - /* Allocate DCIs and RBGs for paging */ - if (rrc != nullptr) { - uint32_t paging_payload = 0; - if (rrc->is_paging_opportunity(current_tti, &paging_payload) and paging_payload > 0) { - tti_sched->alloc_paging(bc_aggr_level, paging_payload); - } - } -} - bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2) { tti %= 10240; @@ -1260,8 +1202,8 @@ int sched::generate_dl_sched(tti_sched_t* tti_sched) bc_aggr_level = 2; rar_aggr_level = 2; - /* Schedule Broadcast data */ - dl_sched_bc(tti_sched); + /* Schedule Broadcast data (SIB and paging) */ + bc_sched->dl_sched(tti_sched); /* Schedule RAR */ dl_sched_rar(tti_sched); diff --git a/srsenb/src/stack/mac/scheduler_ctrl.cc b/srsenb/src/stack/mac/scheduler_ctrl.cc index e7a496fdb..0c6aa6626 100644 --- a/srsenb/src/stack/mac/scheduler_ctrl.cc +++ b/srsenb/src/stack/mac/scheduler_ctrl.cc @@ -25,6 +25,11 @@ namespace srsenb { sched::bc_sched_t::bc_sched_t(cell_cfg_t* cfg_) : cfg(cfg_) {} +void sched::bc_sched_t::init(srsenb::rrc_interface_mac* rrc_) +{ + rrc = rrc_; +} + void sched::bc_sched_t::dl_sched(sched::tti_sched_t* tti_sched) { current_sf_idx = tti_sched->get_sf_idx(); @@ -37,6 +42,10 @@ void sched::bc_sched_t::dl_sched(sched::tti_sched_t* tti_sched) /* Allocate DCIs and RBGs for each SIB */ alloc_sibs(tti_sched); + + /* Allocate Paging */ + // NOTE: It blocks + alloc_paging(tti_sched); } void sched::bc_sched_t::update_si_windows(tti_sched_t* tti_sched) @@ -99,6 +108,17 @@ void sched::bc_sched_t::alloc_sibs(tti_sched_t* tti_sched) } } +void sched::bc_sched_t::alloc_paging(srsenb::sched::tti_sched_t* tti_sched) +{ + /* Allocate DCIs and RBGs for paging */ + if (rrc != nullptr) { + uint32_t paging_payload = 0; + if (rrc->is_paging_opportunity(current_tti, &paging_payload) and paging_payload > 0) { + tti_sched->alloc_paging(bc_aggr_level, paging_payload); + } + } +} + void sched::bc_sched_t::reset() { for (auto& sib : pending_sibs) { diff --git a/srsenb/src/stack/rrc/rrc.cc b/srsenb/src/stack/rrc/rrc.cc index ad18ca663..870742a4a 100644 --- a/srsenb/src/stack/rrc/rrc.cc +++ b/srsenb/src/stack/rrc/rrc.cc @@ -556,8 +556,8 @@ bool rrc::is_paging_opportunity(uint32_t tti, uint32_t *payload_len) } } - for (uint32_t i = 0; i < ue_to_remove.size(); i++) { - pending_paging.erase(ue_to_remove[i]); + for (unsigned int i : ue_to_remove) { + pending_paging.erase(i); } pthread_mutex_unlock(&paging_mutex); diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 9123b8f53..307422912 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -426,7 +426,7 @@ void sched_tester::test_ra() if (tti_data.tti_tx_dl >= window[0]) { for (uint32_t i = 0; i < tti_data.sched_result_dl.nof_rar_elems; ++i) { for (uint32_t j = 0; j < tti_data.sched_result_dl.rar[i].nof_grants; ++j) { - if (tti_data.sched_result_dl.rar[i].msg3_grant[j].data.prach_tti == tti_data.tti_tx_dl) { + if (tti_data.sched_result_dl.rar[i].msg3_grant[j].data.prach_tti + TX_DELAY == tti_data.tti_tx_dl) { userinfo.rar_tti = tti_data.tti_tx_dl; } } @@ -701,17 +701,17 @@ void sched_tester::test_harqs() to_ul_ack.insert(std::make_pair(ack_data.tti_tx_ul, ack_data)); } - // // Check whether some pids got old - // for (auto& user : ue_db) { - // for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) { - // if (not(user.second.get_dl_harq(i)->is_empty(0) and user.second.get_dl_harq(1))) { - // if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i)->get_tti()) > 49) { - // TestError("[TESTER] The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i)->get_id(), - // user.first); - // } - // } - // } - // } +// // Check whether some pids got old +// for (auto& user : ue_db) { +// for (int i = 0; i < 2 * FDD_HARQ_DELAY_MS; i++) { +// if (not(user.second.get_dl_harq(i)->is_empty(0) and user.second.get_dl_harq(1))) { +// if (srslte_tti_interval(tti_data.tti_tx_dl, user.second.get_dl_harq(i)->get_tti()) > 49) { +// TestError("[TESTER] The pid=%d for rnti=0x%x got old.\n", user.second.get_dl_harq(i)->get_id(), +// user.first); +// } +// } +// } +// } } void sched_tester::test_sibs() From e0b8be61e5ae4ad0ca9df1e39b0d4261a8cb7b26 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Wed, 16 Oct 2019 14:08:14 +0100 Subject: [PATCH 5/8] provided a general way to access ue_db from the scheduler --- srsenb/hdr/stack/mac/scheduler.h | 4 + srsenb/src/stack/mac/scheduler.cc | 159 +++++++----------------------- 2 files changed, 37 insertions(+), 126 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index 92d6c2851..d7f6b6f19 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -265,6 +265,10 @@ protected: void dl_sched_data(tti_sched_t* tti_sched); void ul_sched_msg3(tti_sched_t* tti_sched); + // Helper methods + template + int ue_db_access(uint16_t rnti, Func); + std::map ue_db; typedef struct { diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index 7b9ce8c5d..0752d285b 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -755,10 +755,7 @@ int sched::ue_rem(uint16_t rnti) bool sched::ue_exists(uint16_t rnti) { - pthread_rwlock_rdlock(&rwlock); - bool ret = (ue_db.count(rnti) == 1); - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [](sched_ue& ue) {}) >= 0; } void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd) { @@ -773,179 +770,72 @@ void sched::ue_needs_ta_cmd(uint16_t rnti, uint32_t nof_ta_cmd) { void sched::phy_config_enabled(uint16_t rnti, bool enabled) { - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].phy_config_enabled(current_tti, enabled); - } else { - Error("User rnti=0x%x not found\n", rnti); - } - pthread_rwlock_unlock(&rwlock); + ue_db_access(rnti, [this, enabled](sched_ue& ue) { ue.phy_config_enabled(current_tti, enabled); }); } int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_bearer_cfg(lc_id, cfg_); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [lc_id, cfg_](sched_ue& ue) { ue.set_bearer_cfg(lc_id, cfg_); }); } int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].rem_bearer(lc_id); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [lc_id](sched_ue& ue) { ue.rem_bearer(lc_id); }); } uint32_t sched::get_dl_buffer(uint16_t rnti) { uint32_t ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ret = ue_db[rnti].get_pending_dl_new_data(current_tti); - } else { - Error("User rnti=0x%x not found\n", rnti); - } - pthread_rwlock_unlock(&rwlock); + ue_db_access(rnti, [this, &ret](sched_ue& ue) { ret = ue.get_pending_dl_new_data(current_tti); }); return ret; } uint32_t sched::get_ul_buffer(uint16_t rnti) { uint32_t ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ret = ue_db[rnti].get_pending_ul_new_data(current_tti); - } else { - Error("User rnti=0x%x not found\n", rnti); - } - pthread_rwlock_unlock(&rwlock); + ue_db_access(rnti, [this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(current_tti); }); return ret; } int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].dl_buffer_state(lc_id, tx_queue, retx_queue); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, + [lc_id, tx_queue, retx_queue](sched_ue& ue) { ue.dl_buffer_state(lc_id, tx_queue, retx_queue); }); } int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].mac_buffer_state(ce_code); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [ce_code](sched_ue& ue) { ue.mac_buffer_state(ce_code); }); } int sched::dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dl_ant_info) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_dl_ant_info(dl_ant_info); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [dl_ant_info](sched_ue& ue) { ue.set_dl_ant_info(dl_ant_info); }); } int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ret = ue_db[rnti].set_ack_info(tti, tb_idx, ack); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [tti, tb_idx, ack](sched_ue& ue) { ue.set_ack_info(tti, tb_idx, ack); }); } int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_ul_crc(tti, crc); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [tti, crc](sched_ue& ue) { ue.set_ul_crc(tti, crc); }); } -int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) +int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_dl_ri(tti, cqi_value); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [tti, ri_value](sched_ue& ue) { ue.set_dl_ri(tti, ri_value); }); } int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_dl_pmi(tti, pmi_value); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [tti, pmi_value](sched_ue& ue) { ue.set_dl_pmi(tti, pmi_value); }); } int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_dl_cqi(tti, cqi_value); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [tti, cqi_value](sched_ue& ue) { ue.set_dl_cqi(tti, cqi_value); }); } int sched::dl_rach_info(dl_sched_rar_info_t rar_info) @@ -1360,4 +1250,21 @@ void sched::generate_cce_location( } } +// Common way to access ue_db elements in a read locking way +template +int sched::ue_db_access(uint16_t rnti, Func f) +{ + int ret = 0; + pthread_rwlock_rdlock(&rwlock); + auto it = ue_db.find(rnti); + if (it != ue_db.end()) { + f(it->second); + } else { + Error("User rnti=0x%x not found\n", rnti); + ret = -1; + } + pthread_rwlock_unlock(&rwlock); + return ret; +} + } // namespace srsenb From 388776d1fa13585e09a1056762321f956394c395 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Wed, 16 Oct 2019 14:50:34 +0100 Subject: [PATCH 6/8] provided a general way to access ue_db from the scheduler - part2 --- srsenb/src/stack/mac/scheduler.cc | 81 +++++-------------------------- 1 file changed, 13 insertions(+), 68 deletions(-) diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index 0752d285b..e3ba7fc90 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -815,7 +815,9 @@ int sched::dl_ant_info(uint16_t rnti, asn1::rrc::phys_cfg_ded_s::ant_info_c_* dl int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack) { - return ue_db_access(rnti, [tti, tb_idx, ack](sched_ue& ue) { ue.set_ack_info(tti, tb_idx, ack); }); + int ret = -1; + ue_db_access(rnti, [tti, tb_idx, ack, &ret](sched_ue& ue) { ret = ue.set_ack_info(tti, tb_idx, ack); }); + return ret; } int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc) @@ -848,72 +850,27 @@ int sched::dl_rach_info(dl_sched_rar_info_t rar_info) int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_ul_cqi(tti, cqi, ul_ch_code); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [tti, cqi, ul_ch_code](sched_ue& ue) { ue.set_ul_cqi(tti, cqi, ul_ch_code); }); } int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].ul_buffer_state(lcid, bsr, set_value); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [lcid, bsr, set_value](sched_ue& ue) { ue.ul_buffer_state(lcid, bsr, set_value); }); } int sched::ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].ul_recv_len(lcid, len); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [lcid, len](sched_ue& ue) { ue.ul_recv_len(lcid, len); }); } int sched::ul_phr(uint16_t rnti, int phr) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].ul_phr(phr); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [phr](sched_ue& ue) { ue.ul_phr(phr); }); } int sched::ul_sr_info(uint32_t tti, uint16_t rnti) { - int ret = 0; - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].set_sr(); - } else { - Error("User rnti=0x%x not found\n", rnti); - ret = -1; - } - pthread_rwlock_unlock(&rwlock); - return ret; + return ue_db_access(rnti, [](sched_ue& ue) { ue.set_sr(); }); } void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) @@ -923,30 +880,18 @@ void sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) void sched::tpc_inc(uint16_t rnti) { - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].tpc_inc(); - } else { - Error("User rnti=0x%x not found\n", rnti); - } - pthread_rwlock_unlock(&rwlock); + ue_db_access(rnti, [](sched_ue& ue) { ue.tpc_inc(); }); } void sched::tpc_dec(uint16_t rnti) { - pthread_rwlock_rdlock(&rwlock); - if (ue_db.count(rnti) > 0) { - ue_db[rnti].tpc_dec(); - } else { - Error("User rnti=0x%x not found\n", rnti); - } - pthread_rwlock_unlock(&rwlock); + ue_db_access(rnti, [](sched_ue& ue) { ue.tpc_dec(); }); } /******************************************************* - * - * Main sched functions - * + * + * Main sched functions + * *******************************************************/ sched::tti_sched_t* sched::new_tti(uint32_t tti_rx) From c19e0d94234b248a67429cde7b86d05925ff522a Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Wed, 16 Oct 2019 16:55:37 +0100 Subject: [PATCH 7/8] moved the RAR/Msg3 scheduling to a separate class --- srsenb/hdr/stack/mac/scheduler.h | 55 +++--- srsenb/hdr/stack/mac/scheduler_ctrl.h | 47 ++++- srsenb/src/stack/mac/scheduler.cc | 226 +++++++------------------ srsenb/src/stack/mac/scheduler_ctrl.cc | 145 +++++++++++++++- srsenb/test/mac/scheduler_test_rand.cc | 19 ++- 5 files changed, 280 insertions(+), 212 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index d7f6b6f19..4d34f082b 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -35,6 +35,20 @@ namespace srsenb { +namespace sched_utils { + +inline bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2) +{ + tti %= 10240; + tti1 %= 10240; + tti2 %= 10240; + if (tti1 <= tti2) { + return tti >= tti1 and tti <= tti2; + } + return tti >= tti1 or tti <= tti2; +} + +} // namespace sched_utils /* Caution: User addition (ue_cfg) and removal (ue_rem) are not thread-safe * Rest of operations are thread-safe @@ -149,8 +163,9 @@ protected: srslte_regs_t regs; class bc_sched_t; + class ra_sched_t; - class tti_sched_t : public dl_tti_sched_t, public ul_tti_sched_t + class tti_sched_result_t : public dl_tti_sched_t, public ul_tti_sched_t { public: struct ctrl_alloc_t { @@ -227,10 +242,10 @@ protected: bool is_dl_alloc(sched_ue* user) const final; bool is_ul_alloc(sched_ue* user) const final; ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); - alloc_outcome_t alloc_ul(sched_ue* user, - ul_harq_proc::ul_alloc_t alloc, - tti_sched_t::ul_alloc_t::type_t alloc_type, - uint32_t msg3 = 0); + alloc_outcome_t alloc_ul(sched_ue* user, + ul_harq_proc::ul_alloc_t alloc, + tti_sched_result_t::ul_alloc_t::type_t alloc_type, + uint32_t msg3 = 0); int generate_format1a( uint32_t rb_start, uint32_t l_crb, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci); void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); @@ -253,17 +268,15 @@ protected: }; const static uint32_t nof_sched_ttis = 10; - tti_sched_t tti_scheds[nof_sched_ttis]; - tti_sched_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % nof_sched_ttis]; } + tti_sched_result_t tti_scheds[nof_sched_ttis]; + tti_sched_result_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % nof_sched_ttis]; } std::vector tti_dl_mask; - tti_sched_t* new_tti(uint32_t tti_rx); - void generate_phich(tti_sched_t* tti_sched); - int generate_dl_sched(tti_sched_t* tti_sched); - int generate_ul_sched(tti_sched_t* tti_sched); - void dl_sched_rar(tti_sched_t* tti_sched); - void dl_sched_data(tti_sched_t* tti_sched); - void ul_sched_msg3(tti_sched_t* tti_sched); + tti_sched_result_t* new_tti(uint32_t tti_rx); + void generate_phich(tti_sched_result_t* tti_sched); + int generate_dl_sched(tti_sched_result_t* tti_sched); + int generate_ul_sched(tti_sched_result_t* tti_sched); + void dl_sched_data(tti_sched_result_t* tti_sched); // Helper methods template @@ -271,17 +284,6 @@ protected: std::map ue_db; - typedef struct { - bool enabled; - uint16_t rnti; - uint32_t L; - uint32_t n_prb; - uint32_t mcs; - } pending_msg3_t; - - std::queue pending_rars; - pending_msg3_t pending_msg3[TTIMOD_SZ]; - // Allowed DCI locations for SIB and RAR per CFI sched_ue::sched_dci_cce_t common_locations[3]; sched_ue::sched_dci_cce_t rar_locations[3][10]; @@ -294,9 +296,8 @@ protected: prbmask_t prach_mask; prbmask_t pucch_mask; - uint32_t bc_aggr_level; - uint32_t rar_aggr_level; std::unique_ptr bc_sched; + std::unique_ptr rar_sched; uint32_t pdsch_re[10]; uint32_t current_tti; diff --git a/srsenb/hdr/stack/mac/scheduler_ctrl.h b/srsenb/hdr/stack/mac/scheduler_ctrl.h index 5890d2078..b7281462c 100644 --- a/srsenb/hdr/stack/mac/scheduler_ctrl.h +++ b/srsenb/hdr/stack/mac/scheduler_ctrl.h @@ -32,7 +32,7 @@ public: bc_sched_t(cell_cfg_t* cfg_); void init(rrc_interface_mac* rrc_); - void dl_sched(tti_sched_t* tti_sched); + void dl_sched(tti_sched_result_t* tti_sched); void reset(); private: @@ -42,9 +42,9 @@ private: uint32_t n_tx = 0; }; - void update_si_windows(tti_sched_t* tti_sched); - void alloc_sibs(tti_sched_t* tti_sched); - void alloc_paging(tti_sched_t* tti_sched); + void update_si_windows(tti_sched_result_t* tti_sched); + void alloc_sibs(tti_sched_result_t* tti_sched); + void alloc_paging(tti_sched_result_t* tti_sched); // args cell_cfg_t* cfg; @@ -58,6 +58,45 @@ private: uint32_t bc_aggr_level = 2; }; +class sched::ra_sched_t +{ +public: + struct pending_msg3_t { + bool enabled = false; + uint16_t rnti = 0; + uint32_t L = 0; + uint32_t n_prb = 0; + uint32_t mcs = 0; + }; + + explicit ra_sched_t(cell_cfg_t* cfg_); + void init(srslte::log* log_, std::map& ue_db_); + void dl_sched(tti_sched_result_t* tti_sched); + void ul_sched(tti_sched_result_t* tti_sched); + int dl_rach_info(dl_sched_rar_info_t rar_info); + void reset(); + const pending_msg3_t& find_pending_msg3(uint32_t tti); + +private: + struct sched_rar_t { + int buf_rar = 0; + uint16_t rnti = 0; + uint32_t ra_id = 0; + uint32_t rar_tti = 0; + }; + + // args + srslte::log* log_h = nullptr; + cell_cfg_t* cfg; + std::map* ue_db = nullptr; + + + std::queue pending_rars; + std::array pending_msg3; + uint32_t tti_tx_dl = 0; + uint32_t rar_aggr_level = 2; +}; + } // namespace srsenb #endif // SRSLTE_SCHEDULER_CTRL_H diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index e3ba7fc90..930581956 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -38,7 +38,7 @@ namespace srsenb { * TTI resource Scheduling Methods *******************************************************/ -void sched::tti_sched_t::init(sched* parent_) +void sched::tti_sched_result_t::init(sched* parent_) { parent = parent_; log_h = parent->log_h; @@ -50,7 +50,7 @@ void sched::tti_sched_t::init(sched* parent_) tti_alloc.init(log_h, &parent->cfg, pdcch_alloc); } -void sched::tti_sched_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) +void sched::tti_sched_result_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) { tti_alloc.new_tti(tti_rx_, start_cfi); @@ -67,7 +67,7 @@ void sched::tti_sched_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) bzero(&ul_sched_result, sizeof(ul_sched_result)); } -bool sched::tti_sched_t::is_dl_alloc(sched_ue* user) const +bool sched::tti_sched_result_t::is_dl_alloc(sched_ue* user) const { for (const auto& a : data_allocs) { if (a.user_ptr == user) { @@ -77,7 +77,7 @@ bool sched::tti_sched_t::is_dl_alloc(sched_ue* user) const return false; } -bool sched::tti_sched_t::is_ul_alloc(sched_ue* user) const +bool sched::tti_sched_result_t::is_ul_alloc(sched_ue* user) const { for (const auto& a : ul_data_allocs) { if (a.user_ptr == user) { @@ -87,7 +87,8 @@ bool sched::tti_sched_t::is_ul_alloc(sched_ue* user) const return false; } -sched::tti_sched_t::ctrl_code_t sched::tti_sched_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti) +sched::tti_sched_result_t::ctrl_code_t +sched::tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti) { ctrl_alloc_t ctrl_alloc{}; @@ -115,7 +116,7 @@ sched::tti_sched_t::ctrl_code_t sched::tti_sched_t::alloc_dl_ctrl(uint32_t aggr_ return {ret.first, ctrl_alloc}; } -alloc_outcome_t sched::tti_sched_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) +alloc_outcome_t sched::tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) { uint32_t sib_len = sibs_cfg[sib_idx].len; uint32_t rv = get_rvidx(sib_ntx); @@ -138,7 +139,7 @@ alloc_outcome_t sched::tti_sched_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx return ret.first; } -alloc_outcome_t sched::tti_sched_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload) +alloc_outcome_t sched::tti_sched_result_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload) { ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI); if (not ret.first) { @@ -154,8 +155,10 @@ alloc_outcome_t sched::tti_sched_t::alloc_paging(uint32_t aggr_lvl, uint32_t pag return ret.first; } -sched::tti_sched_t::rar_code_t -sched::tti_sched_t::alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant, uint32_t prach_tti, uint32_t buf_rar) +sched::tti_sched_result_t::rar_code_t sched::tti_sched_result_t::alloc_rar(uint32_t aggr_lvl, + const dl_sched_rar_t& rar_grant, + uint32_t prach_tti, + uint32_t buf_rar) { // RA-RNTI = 1 + t_id + f_id // t_id = index of first subframe specified by PRACH (0<=t_id<10) @@ -176,7 +179,7 @@ sched::tti_sched_t::alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant return {ret.first, &rar_allocs.back()}; } -alloc_outcome_t sched::tti_sched_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) +alloc_outcome_t sched::tti_sched_result_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) { if (is_dl_alloc(user)) { log_h->warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x\n", user->get_rnti()); @@ -200,10 +203,10 @@ alloc_outcome_t sched::tti_sched_t::alloc_dl_user(sched_ue* user, const rbgmask_ return alloc_outcome_t::SUCCESS; } -alloc_outcome_t sched::tti_sched_t::alloc_ul(sched_ue* user, - ul_harq_proc::ul_alloc_t alloc, - tti_sched_t::ul_alloc_t::type_t alloc_type, - uint32_t mcs) +alloc_outcome_t sched::tti_sched_result_t::alloc_ul(sched_ue* user, + ul_harq_proc::ul_alloc_t alloc, + tti_sched_result_t::ul_alloc_t::type_t alloc_type, + uint32_t mcs) { // Check whether user was already allocated if (is_ul_alloc(user)) { @@ -229,12 +232,12 @@ alloc_outcome_t sched::tti_sched_t::alloc_ul(sched_ue* use return alloc_outcome_t::SUCCESS; } -alloc_outcome_t sched::tti_sched_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) +alloc_outcome_t sched::tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) { // check whether adaptive/non-adaptive retx/newtx - tti_sched_t::ul_alloc_t::type_t alloc_type; - ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul()); - bool has_retx = h->has_pending_retx(); + tti_sched_result_t::ul_alloc_t::type_t alloc_type; + ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul()); + bool has_retx = h->has_pending_retx(); if (has_retx) { ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc(); if (prev_alloc.L == alloc.L and prev_alloc.RB_start == prev_alloc.L) { @@ -249,12 +252,12 @@ alloc_outcome_t sched::tti_sched_t::alloc_ul_user(sched_ue* user, ul_harq_proc:: return alloc_ul(user, alloc, alloc_type); } -alloc_outcome_t sched::tti_sched_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs) +alloc_outcome_t sched::tti_sched_result_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs) { return alloc_ul(user, alloc, ul_alloc_t::MSG3, mcs); } -void sched::tti_sched_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sched::tti_sched_result_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& bc_alloc : bc_allocs) { sched_interface::dl_sched_bc_t* bc = &dl_sched_result.bc[dl_sched_result.nof_bc_elems]; @@ -323,7 +326,7 @@ void sched::tti_sched_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& } } -void sched::tti_sched_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sched::tti_sched_result_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& rar_alloc : rar_allocs) { sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems]; @@ -353,8 +356,7 @@ void sched::tti_sched_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t // Print RAR allocation result for (uint32_t i = 0; i < rar->nof_grants; ++i) { const auto& msg3_grant = rar->msg3_grant[i]; - uint32_t pending_tti = (get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ; - uint16_t expected_rnti = parent->pending_msg3[pending_tti].rnti; // FIXME + uint16_t expected_rnti = parent->rar_sched->find_pending_msg3(get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY).rnti; log_h->info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=(%d,%d), dci=(%d,%d), rar_grant_rba=%d, " "rar_grant_mcs=%d\n", expected_rnti, @@ -371,7 +373,7 @@ void sched::tti_sched_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t } } -void sched::tti_sched_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sched::tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& data_alloc : data_allocs) { sched_interface::dl_sched_data_t* data = &dl_sched_result.data[dl_sched_result.nof_data_elems]; @@ -429,7 +431,7 @@ void sched::tti_sched_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu } } -void sched::tti_sched_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sched::tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { /* Set UL data DCI locs and format */ for (const auto& ul_alloc : ul_data_allocs) { @@ -492,7 +494,7 @@ void sched::tti_sched_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& } } -void sched::tti_sched_t::generate_dcis() +void sched::tti_sched_result_t::generate_dcis() { /* Pick one of the possible DCI masks */ pdcch_grid_t::alloc_result_t dci_result; @@ -512,12 +514,12 @@ void sched::tti_sched_t::generate_dcis() set_ul_sched_result(dci_result); } -uint32_t sched::tti_sched_t::get_nof_ctrl_symbols() const +uint32_t sched::tti_sched_result_t::get_nof_ctrl_symbols() const { return tti_alloc.get_cfi() + ((parent->cfg.cell.nof_prb <= 10) ? 1 : 0); } -int sched::tti_sched_t::generate_format1a( +int sched::tti_sched_result_t::generate_format1a( uint32_t rb_start, uint32_t l_crb, uint32_t tbs_bytes, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci) { /* Calculate I_tbs for this TBS */ @@ -568,13 +570,12 @@ int sched::tti_sched_t::generate_format1a( * *******************************************************/ sched::sched() : - bc_aggr_level(0), - rar_aggr_level(0), P(0), si_n_rbg(0), rar_n_rbg(0), nof_rbg(0), - bc_sched(new bc_sched_t{&cfg}) + bc_sched(new bc_sched_t{&cfg}), + rar_sched(new ra_sched_t{&cfg}) { current_tti = 0; log_h = nullptr; @@ -618,17 +619,18 @@ void sched::init(rrc_interface_mac* rrc_, srslte::log* log) rrc = rrc_; bc_sched->init(rrc); + rar_sched->init(log_h, ue_db); reset(); } int sched::reset() { - bzero(pending_msg3, sizeof(pending_msg3_t) * TTIMOD_SZ); - while (not pending_rars.empty()) { - pending_rars.pop(); - } - configured = false; + { + std::lock_guard lock(sched_mutex); + rar_sched->reset(); + bc_sched->reset(); + } pthread_rwlock_wrlock(&rwlock); ue_db.clear(); pthread_rwlock_unlock(&rwlock); @@ -691,10 +693,8 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) } // Initiate the tti_scheduler for each TTI - for (uint32_t i = 0; i < nof_sched_ttis; ++i) { - pdcch_grid_t pdcch_alloc; - pdcch_alloc.init(log_h, ®s, common_locations, rar_locations); - tti_scheds[i].init(this); + for (tti_sched_result_t& tti_sched : tti_scheds) { + tti_sched.init(this); } configured = true; @@ -842,10 +842,8 @@ int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value) int sched::dl_rach_info(dl_sched_rar_info_t rar_info) { - Info("SCHED: New RAR tti=%d, preamble=%d, temp_crnti=0x%x, ta_cmd=%d, msg3_size=%d\n", - rar_info.prach_tti, rar_info.preamble_idx, rar_info.temp_crnti, rar_info.ta_cmd, rar_info.msg3_size); - pending_rars.push(rar_info); - return 0; + std::lock_guard lock(sched_mutex); + return rar_sched->dl_rach_info(rar_info); } int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code) @@ -894,9 +892,9 @@ void sched::tpc_dec(uint16_t rnti) * *******************************************************/ -sched::tti_sched_t* sched::new_tti(uint32_t tti_rx) +sched::tti_sched_result_t* sched::new_tti(uint32_t tti_rx) { - tti_sched_t* tti_sched = get_tti_sched(tti_rx); + tti_sched_result_t* tti_sched = get_tti_sched(tti_rx); // if it is the first time tti is run, reset vars if (tti_rx != tti_sched->get_tti_rx()) { @@ -932,96 +930,13 @@ sched::tti_sched_t* sched::new_tti(uint32_t tti_rx) return tti_sched; } -bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2) -{ - tti %= 10240; - tti1 %= 10240; - tti2 %= 10240; - if (tti1 <= tti2) { - return tti >= tti1 and tti <= tti2; - } - return tti >= tti1 or tti <= tti2; -} - -// Schedules RAR -// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we discard it. -void sched::dl_sched_rar(tti_sched_t* tti_sched) -{ - // Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit - while (!pending_rars.empty()) { - dl_sched_rar_info_t rar = pending_rars.front(); - if (not is_in_tti_interval(tti_sched->get_tti_tx_dl(), - rar.prach_tti + 3, - rar.prach_tti + 3 + cfg.prach_rar_window)) - { - if (tti_sched->get_tti_tx_dl() >= rar.prach_tti + 3 + cfg.prach_rar_window) { - log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", - rar.prach_tti, - cfg.prach_rar_window, - current_tti); - log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", - rar.prach_tti, - cfg.prach_rar_window, - current_tti); - // Remove from pending queue and get next one if window has passed already - pending_rars.pop(); - continue; - } - // If window not yet started do not look for more pending RARs - return; - } - - /* Since we do a fixed Msg3 scheduling for all RAR, we can only allocate 1 RAR per TTI. - * If we have enough space in the window, every call to this function we'll allocate 1 pending RAR and associate a - * Msg3 transmission - */ - dl_sched_rar_t rar_grant; - uint32_t L_prb = 3; - uint32_t n_prb = cfg.nrb_pucch>0?cfg.nrb_pucch:2; - bzero(&rar_grant, sizeof(rar_grant)); - uint32_t rba = srslte_ra_type2_to_riv(L_prb, n_prb, cfg.cell.nof_prb); - - dl_sched_rar_grant_t *grant = &rar_grant.msg3_grant[0]; - grant->grant.tpc_pusch = 3; - grant->grant.trunc_mcs = 0; - grant->grant.rba = rba; - grant->data = rar; - rar_grant.nof_grants++; - - // Try to schedule DCI + RBGs for RAR Grant - tti_sched_t::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level, - rar_grant, - rar.prach_tti, - 7 * rar_grant.nof_grants); //fixme: check RAR size - - // If we can allocate, schedule Msg3 and remove from pending - if (!ret.first) { - return; - } - - // Schedule Msg3 only if there is a requirement for Msg3 data - uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ; - pending_msg3[pending_tti].enabled = true; - pending_msg3[pending_tti].rnti = rar.temp_crnti; // FIXME - pending_msg3[pending_tti].L = L_prb; - pending_msg3[pending_tti].n_prb = n_prb; - dl_sched_rar_grant_t *last_msg3 = &rar_grant.msg3_grant[rar_grant.nof_grants - 1]; - pending_msg3[pending_tti].mcs = last_msg3->grant.trunc_mcs; - Info("SCHED: Allocating Msg3 for rnti=%d at tti=%d\n", rar.temp_crnti, tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY); - - // Remove pending RAR and exit - pending_rars.pop(); - return; - } -} - -void sched::dl_sched_data(tti_sched_t* tti_sched) +void sched::dl_sched_data(tti_sched_result_t* tti_sched) { // NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions - uint32_t tti_rx_ack = TTI_RX_ACK(tti_sched->get_tti_rx()); - uint32_t pending_tti = tti_rx_ack % TTIMOD_SZ; - if (cfg.cell.nof_prb == 6 and (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_rx_ack, -1) or - pending_msg3[pending_tti].enabled)) { + uint32_t tti_rx_ack = TTI_RX_ACK(tti_sched->get_tti_rx()); + bool msg3_enabled = rar_sched->find_pending_msg3(tti_rx_ack).enabled; + if (cfg.cell.nof_prb == 6 and + (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_rx_ack, -1) or msg3_enabled)) { tti_sched->get_dl_mask().fill(0, tti_sched->get_dl_mask().size()); } @@ -1030,18 +945,16 @@ void sched::dl_sched_data(tti_sched_t* tti_sched) } // Compute DL scheduler result -int sched::generate_dl_sched(tti_sched_t* tti_sched) +int sched::generate_dl_sched(tti_sched_result_t* tti_sched) { /* Initialize variables */ - current_tti = tti_sched->get_tti_tx_dl(); - bc_aggr_level = 2; - rar_aggr_level = 2; + current_tti = tti_sched->get_tti_tx_dl(); /* Schedule Broadcast data (SIB and paging) */ bc_sched->dl_sched(tti_sched); /* Schedule RAR */ - dl_sched_rar(tti_sched); + rar_sched->dl_sched(tti_sched); /* Schedule pending RLC data */ dl_sched_data(tti_sched); @@ -1049,7 +962,7 @@ int sched::generate_dl_sched(tti_sched_t* tti_sched) return 0; } -void sched::generate_phich(tti_sched_t* tti_sched) +void sched::generate_phich(tti_sched_result_t* tti_sched) { // Allocate user PHICHs uint32_t nof_phich_elems = 0; @@ -1075,31 +988,8 @@ void sched::generate_phich(tti_sched_t* tti_sched) tti_sched->ul_sched_result.nof_phich_elems = nof_phich_elems; } -void sched::ul_sched_msg3(tti_sched_t* tti_sched) -{ - uint32_t pending_tti = tti_sched->get_tti_tx_ul() % TTIMOD_SZ; - if (not pending_msg3[pending_tti].enabled) { - return; - } - - uint16_t rnti = pending_msg3[pending_tti].rnti; - if (ue_db.count(rnti) == 0) { - log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti); - return; - } - sched_ue* user = &ue_db[rnti]; - - /* Allocate RBGs and HARQ for Msg3 */ - ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[pending_tti].n_prb, pending_msg3[pending_tti].L}; - if (not tti_sched->alloc_ul_msg3(user, msg3, pending_msg3[pending_tti].mcs)) { - log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L); - return; - } - pending_msg3[pending_tti].enabled = false; -} - // Compute UL scheduler result -int sched::generate_ul_sched(tti_sched_t* tti_sched) +int sched::generate_ul_sched(tti_sched_result_t* tti_sched) { /* Initialize variables */ current_tti = tti_sched->get_tti_tx_ul(); @@ -1112,7 +1002,7 @@ int sched::generate_ul_sched(tti_sched_t* tti_sched) } // Update available allocation if there's a pending RAR - ul_sched_msg3(tti_sched); + rar_sched->ul_sched(tti_sched); // reserve PRBs for PUCCH if (cfg.cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) { @@ -1143,7 +1033,7 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result) uint32_t tti_rx = TTI_SUB(tti, TX_DELAY); // Compute scheduling Result for tti_rx - tti_sched_t* tti_sched = new_tti(tti_rx); + tti_sched_result_t* tti_sched = new_tti(tti_rx); // copy result *sched_result = tti_sched->dl_sched_result; @@ -1159,8 +1049,8 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched } // Compute scheduling Result for tti_rx - uint32_t tti_rx = (tti + 10240 - 2 * FDD_HARQ_DELAY_MS) % 10240; - tti_sched_t* tti_sched = new_tti(tti_rx); + uint32_t tti_rx = (tti + 10240 - 2 * FDD_HARQ_DELAY_MS) % 10240; + tti_sched_result_t* tti_sched = new_tti(tti_rx); // Copy results *sched_result = tti_sched->ul_sched_result; diff --git a/srsenb/src/stack/mac/scheduler_ctrl.cc b/srsenb/src/stack/mac/scheduler_ctrl.cc index 0c6aa6626..b0bc19112 100644 --- a/srsenb/src/stack/mac/scheduler_ctrl.cc +++ b/srsenb/src/stack/mac/scheduler_ctrl.cc @@ -30,7 +30,7 @@ void sched::bc_sched_t::init(srsenb::rrc_interface_mac* rrc_) rrc = rrc_; } -void sched::bc_sched_t::dl_sched(sched::tti_sched_t* tti_sched) +void sched::bc_sched_t::dl_sched(sched::tti_sched_result_t* tti_sched) { current_sf_idx = tti_sched->get_sf_idx(); current_sfn = tti_sched->get_sfn(); @@ -48,7 +48,7 @@ void sched::bc_sched_t::dl_sched(sched::tti_sched_t* tti_sched) alloc_paging(tti_sched); } -void sched::bc_sched_t::update_si_windows(tti_sched_t* tti_sched) +void sched::bc_sched_t::update_si_windows(tti_sched_result_t* tti_sched) { uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl(); @@ -86,7 +86,7 @@ void sched::bc_sched_t::update_si_windows(tti_sched_t* tti_sched) } } -void sched::bc_sched_t::alloc_sibs(tti_sched_t* tti_sched) +void sched::bc_sched_t::alloc_sibs(tti_sched_result_t* tti_sched) { for (uint32_t i = 0; i < pending_sibs.size(); i++) { if (cfg->sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) { @@ -108,7 +108,7 @@ void sched::bc_sched_t::alloc_sibs(tti_sched_t* tti_sched) } } -void sched::bc_sched_t::alloc_paging(srsenb::sched::tti_sched_t* tti_sched) +void sched::bc_sched_t::alloc_paging(srsenb::sched::tti_sched_result_t* tti_sched) { /* Allocate DCIs and RBGs for paging */ if (rrc != nullptr) { @@ -126,4 +126,141 @@ void sched::bc_sched_t::reset() } } +/******************************************************* + * RAR scheduling + *******************************************************/ + +sched::ra_sched_t::ra_sched_t(cell_cfg_t* cfg_) : cfg(cfg_) {} + +void sched::ra_sched_t::init(srslte::log* log_, std::map& ue_db_) +{ + log_h = log_; + ue_db = &ue_db_; +} + +// Schedules RAR +// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we discard it. +void sched::ra_sched_t::dl_sched(srsenb::sched::tti_sched_result_t* tti_sched) +{ + tti_tx_dl = tti_sched->get_tti_tx_dl(); + rar_aggr_level = 2; + + // Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit + while (!pending_rars.empty()) { + dl_sched_rar_info_t rar = pending_rars.front(); + if (not sched_utils::is_in_tti_interval(tti_tx_dl, + rar.prach_tti + 3, + rar.prach_tti + 3 + cfg->prach_rar_window)) + { + if (tti_tx_dl >= rar.prach_tti + 3 + cfg->prach_rar_window) { + log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", + rar.prach_tti, + cfg->prach_rar_window, + tti_tx_dl); + log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n", + rar.prach_tti, + cfg->prach_rar_window, + tti_tx_dl); + // Remove from pending queue and get next one if window has passed already + pending_rars.pop(); + continue; + } + // If window not yet started do not look for more pending RARs + return; + } + + /* Since we do a fixed Msg3 scheduling for all RAR, we can only allocate 1 RAR per TTI. + * If we have enough space in the window, every call to this function we'll allocate 1 pending RAR and associate a + * Msg3 transmission + */ + dl_sched_rar_t rar_grant; + uint32_t L_prb = 3; + uint32_t n_prb = cfg->nrb_pucch>0?cfg->nrb_pucch:2; + bzero(&rar_grant, sizeof(rar_grant)); + uint32_t rba = srslte_ra_type2_to_riv(L_prb, n_prb, cfg->cell.nof_prb); + + dl_sched_rar_grant_t *grant = &rar_grant.msg3_grant[0]; + grant->grant.tpc_pusch = 3; + grant->grant.trunc_mcs = 0; + grant->grant.rba = rba; + grant->data = rar; + rar_grant.nof_grants++; + + // Try to schedule DCI + RBGs for RAR Grant + tti_sched_result_t::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level, + rar_grant, + rar.prach_tti, + 7 * rar_grant.nof_grants); //fixme: check RAR size + + // If we can allocate, schedule Msg3 and remove from pending + if (!ret.first) { + return; + } + + // Schedule Msg3 only if there is a requirement for Msg3 data + uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ; + pending_msg3[pending_tti].enabled = true; + pending_msg3[pending_tti].rnti = rar.temp_crnti; // FIXME + pending_msg3[pending_tti].L = L_prb; + pending_msg3[pending_tti].n_prb = n_prb; + dl_sched_rar_grant_t *last_msg3 = &rar_grant.msg3_grant[rar_grant.nof_grants - 1]; + pending_msg3[pending_tti].mcs = last_msg3->grant.trunc_mcs; + log_h->info("SCHED: Allocating Msg3 for rnti=%d at tti=%d\n", rar.temp_crnti, tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY); + + // Remove pending RAR and exit + pending_rars.pop(); + return; + } +} + +// Schedules Msg3 +void sched::ra_sched_t::ul_sched(srsenb::sched::tti_sched_result_t* tti_sched) +{ + uint32_t pending_tti = tti_sched->get_tti_tx_ul() % TTIMOD_SZ; + + // check if there is a Msg3 to allocate + if (not pending_msg3[pending_tti].enabled) { + return; + } + + uint16_t rnti = pending_msg3[pending_tti].rnti; + auto user_it = ue_db->find(rnti); + if (user_it == ue_db->end()) { + log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti); + return; + } + + /* Allocate RBGs and HARQ for Msg3 */ + ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[pending_tti].n_prb, pending_msg3[pending_tti].L}; + if (not tti_sched->alloc_ul_msg3(&user_it->second, msg3, pending_msg3[pending_tti].mcs)) { + log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L); + return; + } + pending_msg3[pending_tti].enabled = false; +} + +int sched::ra_sched_t::dl_rach_info(dl_sched_rar_info_t rar_info) +{ + log_h->info("SCHED: New RAR tti=%d, preamble=%d, temp_crnti=0x%x, ta_cmd=%d, msg3_size=%d\n", + rar_info.prach_tti, rar_info.preamble_idx, rar_info.temp_crnti, rar_info.ta_cmd, rar_info.msg3_size); + pending_rars.push(rar_info); + return 0; +} + +void sched::ra_sched_t::reset() +{ + for (auto& msg3 : pending_msg3) { + msg3 = {}; + } + while (not pending_rars.empty()) { + pending_rars.pop(); + } +} + +const sched::ra_sched_t::pending_msg3_t& sched::ra_sched_t::find_pending_msg3(uint32_t tti) +{ + uint32_t pending_tti = tti % TTIMOD_SZ; + return pending_msg3[pending_tti]; +} + } // namespace srsenb diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 307422912..6d1fd3381 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -20,6 +20,7 @@ */ #include "srsenb/hdr/stack/mac/scheduler.h" +#include "srsenb/hdr/stack/mac/scheduler_ctrl.h" #include "srsenb/hdr/stack/mac/scheduler_ue.h" #include #include @@ -177,7 +178,7 @@ struct sched_tester : public srsenb::sched { uint32_t tti_tx_dl; uint32_t tti_tx_ul; uint32_t current_cfi; - pending_msg3_t ul_pending_msg3; + ra_sched_t::pending_msg3_t ul_pending_msg3; srslte::bounded_bitset<128, true> used_cce; // std::vector used_cce; std::map ue_data; ///< stores buffer state of each user @@ -280,7 +281,7 @@ void sched_tester::new_test_tti(uint32_t tti_) } else { tti_data.ul_sf_idx = (tti_data.tti_tx_ul + 10240 - FDD_HARQ_DELAY_MS) % 10; } - tti_data.ul_pending_msg3 = pending_msg3[tti_data.tti_tx_ul % TTIMOD_SZ]; + tti_data.ul_pending_msg3 = rar_sched->find_pending_msg3(tti_data.tti_tx_ul); tti_data.current_cfi = sched_cfg.nof_ctrl_symbols; tti_data.used_cce.resize(srslte_regs_pdcch_ncce(®s, tti_data.current_cfi)); tti_data.used_cce.reset(); @@ -493,7 +494,7 @@ void sched_tester::assert_no_empty_allocs() */ void sched_tester::test_tti_result() { - tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx); + tti_sched_result_t* tti_sched = get_tti_sched(tti_data.tti_rx); // Helper Function: checks if there is any collision. If not, fills the mask auto try_cce_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) { @@ -543,11 +544,11 @@ void sched_tester::test_tti_result() try_cce_fill(rar.dci.location, "DL RAR"); CondError(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", rar.tbs); for (uint32_t j = 0; j < rar.nof_grants; ++j) { - const auto& msg3_grant = rar.msg3_grant[j]; - uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ; - CondError(not pending_msg3[pending_tti].enabled, "Pending Msg3 should have been set\n"); - uint32_t rba = - srslte_ra_type2_to_riv(pending_msg3[pending_tti].L, pending_msg3[pending_tti].n_prb, cfg.cell.nof_prb); + const auto& msg3_grant = rar.msg3_grant[j]; + const ra_sched_t::pending_msg3_t& p = + rar_sched->find_pending_msg3(tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY); + CondError(not p.enabled, "Pending Msg3 should have been set\n"); + uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, cfg.cell.nof_prb); CondError(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n"); } } @@ -750,7 +751,7 @@ void sched_tester::test_sibs() void sched_tester::test_collisions() { - tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx); + tti_sched_result_t* tti_sched = get_tti_sched(tti_data.tti_rx); srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb); From 6aec98e140db1a8de15bd69378e48d62d19eec47 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Wed, 16 Oct 2019 18:11:38 +0100 Subject: [PATCH 8/8] cleanup of some type names, and other minor changes --- srsenb/hdr/stack/mac/scheduler.h | 94 +++++++++++++------------- srsenb/hdr/stack/mac/scheduler_ctrl.h | 2 +- srsenb/hdr/stack/mac/scheduler_grid.h | 21 +++--- srsenb/src/stack/mac/scheduler.cc | 34 ++++------ srsenb/src/stack/mac/scheduler_ctrl.cc | 1 + srsenb/src/stack/mac/scheduler_grid.cc | 4 +- 6 files changed, 76 insertions(+), 80 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index 4d34f082b..07eb9de62 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -48,6 +48,11 @@ inline bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2) return tti >= tti1 or tti <= tti2; } +inline uint32_t tti_subtract(uint32_t tti1, uint32_t tti2) +{ + return (tti1 + 10240 - tti2) % 10240; +} + } // namespace sched_utils /* Caution: User addition (ue_cfg) and removal (ue_rem) are not thread-safe @@ -68,29 +73,27 @@ public: class metric_dl { - public: - + public: /* Virtual methods for user metric calculation */ virtual void set_log(srslte::log* log_) = 0; virtual void sched_users(std::map& ue_db, dl_tti_sched_t* tti_sched) = 0; }; - class metric_ul { - public: + public: /* Virtual methods for user metric calculation */ virtual void set_log(srslte::log* log_) = 0; virtual void sched_users(std::map& ue_db, ul_tti_sched_t* tti_sched) = 0; }; /************************************************************* - * - * FAPI-like Interface - * + * + * FAPI-like Interface + * ************************************************************/ - - sched(); + + sched(); ~sched(); void init(rrc_interface_mac* rrc, srslte::log* log); @@ -135,29 +138,30 @@ public: /* Custom functions */ void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) final; - void tpc_inc(uint16_t rnti); + void tpc_inc(uint16_t rnti); void tpc_dec(uint16_t rnti); // Static Methods - static uint32_t get_rvidx(uint32_t retx_idx) { - const static int rv_idx[4] = {0, 2, 3, 1}; - return rv_idx[retx_idx%4]; + static uint32_t get_rvidx(uint32_t retx_idx) + { + const static int rv_idx[4] = {0, 2, 3, 1}; + return rv_idx[retx_idx % 4]; } static void generate_cce_location( srslte_regs_t* regs, sched_ue::sched_dci_cce_t* location, uint32_t cfi, uint32_t sf_idx = 0, uint16_t rnti = 0); static uint32_t aggr_level(uint32_t aggr_idx) { return 1u << aggr_idx; } protected: - metric_dl *dl_metric; - metric_ul *ul_metric; - srslte::log *log_h; - rrc_interface_mac *rrc; + metric_dl* dl_metric; + metric_ul* ul_metric; + srslte::log* log_h; + rrc_interface_mac* rrc; pthread_rwlock_t rwlock; std::mutex sched_mutex; - cell_cfg_t cfg; - sched_args_t sched_cfg; + cell_cfg_t cfg; + sched_args_t sched_cfg; // This is for computing DCI locations srslte_regs_t regs; @@ -169,8 +173,8 @@ protected: { public: struct ctrl_alloc_t { - size_t dci_idx; - rbg_range_t rbg_range; + size_t dci_idx; + rbg_range_t rbg_range; uint16_t rnti; uint32_t req_bytes; alloc_type_t alloc_type; @@ -181,27 +185,27 @@ protected: explicit rar_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {} }; struct bc_alloc_t : public ctrl_alloc_t { - uint32_t rv = 0; - uint32_t sib_idx = 0; - bc_alloc_t() = default; + uint32_t rv = 0; + uint32_t sib_idx = 0; + bc_alloc_t() = default; explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {} }; struct dl_alloc_t { - size_t dci_idx; - sched_ue* user_ptr; - rbgmask_t user_mask; - uint32_t pid; + size_t dci_idx; + sched_ue* user_ptr; + rbgmask_t user_mask; + uint32_t pid; }; struct ul_alloc_t { enum type_t { NEWTX, NOADAPT_RETX, ADAPT_RETX, MSG3 }; - size_t dci_idx; - type_t type; - sched_ue* user_ptr; + size_t dci_idx; + type_t type; + sched_ue* user_ptr; ul_harq_proc::ul_alloc_t alloc; uint32_t mcs = 0; - bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; } - bool is_msg3() const { return type == MSG3; } - bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; } + bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; } + bool is_msg3() const { return type == MSG3; } + bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; } }; typedef std::pair rar_code_t; typedef std::pair ctrl_code_t; @@ -254,10 +258,10 @@ protected: void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); // consts - sched* parent = NULL; - srslte::log* log_h = NULL; + sched* parent = nullptr; + srslte::log* log_h = nullptr; uint32_t P; - cell_cfg_sib_t* sibs_cfg = NULL; + cell_cfg_sib_t* sibs_cfg = nullptr; // internal state tti_grid_t tti_alloc; @@ -267,10 +271,9 @@ protected: std::vector ul_data_allocs; }; - const static uint32_t nof_sched_ttis = 10; - tti_sched_result_t tti_scheds[nof_sched_ttis]; - tti_sched_result_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % nof_sched_ttis]; } - std::vector tti_dl_mask; + std::array tti_scheds; + tti_sched_result_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % tti_scheds.size()]; } + std::vector tti_dl_mask; tti_sched_result_t* new_tti(uint32_t tti_rx); void generate_phich(tti_sched_result_t* tti_sched); @@ -290,8 +293,6 @@ protected: // derived from args uint32_t P; - uint32_t si_n_rbg; - uint32_t rar_n_rbg; uint32_t nof_rbg; prbmask_t prach_mask; prbmask_t pucch_mask; @@ -299,13 +300,12 @@ protected: std::unique_ptr bc_sched; std::unique_ptr rar_sched; - uint32_t pdsch_re[10]; - uint32_t current_tti; + std::array pdsch_re; + uint32_t current_tti; bool configured; - }; -} +} // namespace srsenb #endif // SRSENB_SCHEDULER_H diff --git a/srsenb/hdr/stack/mac/scheduler_ctrl.h b/srsenb/hdr/stack/mac/scheduler_ctrl.h index b7281462c..4505cea72 100644 --- a/srsenb/hdr/stack/mac/scheduler_ctrl.h +++ b/srsenb/hdr/stack/mac/scheduler_ctrl.h @@ -29,7 +29,7 @@ namespace srsenb { class sched::bc_sched_t { public: - bc_sched_t(cell_cfg_t* cfg_); + explicit bc_sched_t(cell_cfg_t* cfg_); void init(rrc_interface_mac* rrc_); void dl_sched(tti_sched_result_t* tti_sched); diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index 8156058e8..3ef181a17 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -49,7 +49,7 @@ class pdcch_grid_t { public: struct alloc_t { - uint16_t rnti; + uint16_t rnti = 0; srslte_dci_location_t dci_pos = {0, 0}; pdcch_mask_t current_mask; pdcch_mask_t total_mask; @@ -74,8 +74,8 @@ public: uint32_t get_sf_idx() const { return sf_idx; } private: - const static uint32_t nof_cfis = 3; - typedef std::pair tree_node_t; + const static uint32_t nof_cfis = 3; + using tree_node_t = std::pair; ///< First represents the parent node idx, and second the alloc tree node void reset(); const sched_ue::sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user) const; @@ -86,10 +86,10 @@ private: const sched_ue::sched_dci_cce_t* dci_locs); // consts - srslte::log* log_h = nullptr; - sched_ue::sched_dci_cce_t* common_locations = nullptr; - sched_ue::sched_dci_cce_t* rar_locations[10] = {nullptr}; - uint32_t cce_size_array[nof_cfis] = {0}; + srslte::log* log_h = nullptr; + sched_ue::sched_dci_cce_t* common_locations = nullptr; + sched_ue::sched_dci_cce_t* rar_locations[10] = {nullptr}; + std::array cce_size_array{}; // tti vars uint32_t tti_rx = 0; @@ -104,11 +104,14 @@ private: class tti_grid_t { public: - using ctrl_alloc_t = std::pair; + struct dl_ctrl_alloc_t { + alloc_outcome_t outcome; + rbg_range_t rbg_range; + }; void init(srslte::log* log_, sched_interface::cell_cfg_t* cell_, const pdcch_grid_t& pdcch_grid); void new_tti(uint32_t tti_rx_, uint32_t start_cfi); - ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); + dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask); alloc_outcome_t alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch); diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index 930581956..8a6fd2811 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -101,19 +101,19 @@ sched::tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, } /* Allocate space in the DL RBG and PDCCH grids */ - tti_grid_t::ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type); - if (not ret.first) { - return {ret.first, ctrl_alloc}; + tti_grid_t::dl_ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type); + if (not ret.outcome) { + return {ret.outcome, ctrl_alloc}; } // Allocation Successful - ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; - ctrl_alloc.rbg_range = ret.second; + ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; + ctrl_alloc.rbg_range = ret.rbg_range; ctrl_alloc.rnti = rnti; ctrl_alloc.req_bytes = tbs_bytes; ctrl_alloc.alloc_type = alloc_type; - return {ret.first, ctrl_alloc}; + return {ret.outcome, ctrl_alloc}; } alloc_outcome_t sched::tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) @@ -188,7 +188,7 @@ alloc_outcome_t sched::tti_sched_result_t::alloc_dl_user(sched_ue* user, const r // Try to allocate RBGs and DCI alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask); - if (not ret) { + if (ret != alloc_outcome_t::SUCCESS) { return ret; } @@ -217,7 +217,7 @@ alloc_outcome_t sched::tti_sched_result_t::alloc_ul(sched_ue* // Allocate RBGs and DCI space bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or alloc_type == ul_alloc_t::NEWTX; alloc_outcome_t ret = tti_alloc.alloc_ul_data(user, alloc, needs_pdcch); - if (not ret) { + if (ret != alloc_outcome_t::SUCCESS) { return ret; } @@ -569,13 +569,7 @@ int sched::tti_sched_result_t::generate_format1a( * Initialization and sched configuration functions * *******************************************************/ -sched::sched() : - P(0), - si_n_rbg(0), - rar_n_rbg(0), - nof_rbg(0), - bc_sched(new bc_sched_t{&cfg}), - rar_sched(new ra_sched_t{&cfg}) +sched::sched() : P(0), nof_rbg(0), bc_sched(new bc_sched_t{&cfg}), rar_sched(new ra_sched_t{&cfg}) { current_tti = 0; log_h = nullptr; @@ -668,10 +662,8 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg) return SRSLTE_ERROR; } - P = srslte_ra_type0_P(cfg.cell.nof_prb); - si_n_rbg = srslte::ceil_div(4, P); - rar_n_rbg = srslte::ceil_div(3, P); - nof_rbg = srslte::ceil_div(cfg.cell.nof_prb, P); + P = srslte_ra_type0_P(cfg.cell.nof_prb); + nof_rbg = srslte::ceil_div(cfg.cell.nof_prb, P); pucch_mask.resize(cfg.cell.nof_prb); if (cfg.nrb_pucch > 0) { pucch_mask.fill(0, (uint32_t)cfg.nrb_pucch); @@ -1030,7 +1022,7 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result) return 0; } - uint32_t tti_rx = TTI_SUB(tti, TX_DELAY); + uint32_t tti_rx = sched_utils::tti_subtract(tti, TX_DELAY); // Compute scheduling Result for tti_rx tti_sched_result_t* tti_sched = new_tti(tti_rx); @@ -1049,7 +1041,7 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched } // Compute scheduling Result for tti_rx - uint32_t tti_rx = (tti + 10240 - 2 * FDD_HARQ_DELAY_MS) % 10240; + uint32_t tti_rx = sched_utils::tti_subtract(tti, 2 * FDD_HARQ_DELAY_MS); tti_sched_result_t* tti_sched = new_tti(tti_rx); // Copy results diff --git a/srsenb/src/stack/mac/scheduler_ctrl.cc b/srsenb/src/stack/mac/scheduler_ctrl.cc index b0bc19112..b26195fec 100644 --- a/srsenb/src/stack/mac/scheduler_ctrl.cc +++ b/srsenb/src/stack/mac/scheduler_ctrl.cc @@ -249,6 +249,7 @@ int sched::ra_sched_t::dl_rach_info(dl_sched_rar_info_t rar_info) void sched::ra_sched_t::reset() { + tti_tx_dl = 0; for (auto& msg3 : pending_msg3) { msg3 = {}; } diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 4d62e5d4d..80db35fb5 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -56,7 +56,7 @@ void pdcch_grid_t::init(srslte::log* log_, } // precompute nof_cces - for (uint32_t cfix = 0; cfix < nof_cfis; ++cfix) { + for (uint32_t cfix = 0; cfix < cce_size_array.size(); ++cfix) { int ret = srslte_regs_pdcch_ncce(regs, cfix + 1); if (ret < 0) { log_h->error("SCHED: Failed to calculate the number of CCEs in the PDCCH\n"); @@ -315,7 +315,7 @@ alloc_outcome_t tti_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, } //! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. -tti_grid_t::ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type) +tti_grid_t::dl_ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type) { rbg_range_t range; range.rbg_start = nof_rbgs - avail_rbg;