diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index 28cdd0036..106c7f54e 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -72,7 +72,6 @@ public: void new_tti(const tti_params_t& tti_params_); bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr); bool set_cfi(uint32_t cfi); - void set_max_cfi(uint32_t cfi) { max_user_cfi = cfi; } // getters uint32_t get_cfi() const { return current_cfix + 1; } @@ -121,7 +120,7 @@ private: // tti vars const tti_params_t* tti_params = nullptr; - uint32_t current_cfix = 0, max_user_cfi = MAX_CFI; + uint32_t current_cfix = 0; std::vector alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index std::vector dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far }; diff --git a/srsenb/hdr/stack/mac/scheduler_ue.h b/srsenb/hdr/stack/mac/scheduler_ue.h index 96643b8e3..4aae0a10d 100644 --- a/srsenb/hdr/stack/mac/scheduler_ue.h +++ b/srsenb/hdr/stack/mac/scheduler_ue.h @@ -133,7 +133,7 @@ public: uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes); - rbg_range_t get_required_dl_rbgs(uint32_t ue_cc_idx, uint32_t nof_ctrl_symbols); + rbg_range_t get_required_dl_rbgs(uint32_t ue_cc_idx); std::pair get_requested_dl_bytes(uint32_t ue_cc_idx); uint32_t get_pending_dl_new_data(); uint32_t get_pending_ul_new_data(uint32_t tti); diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index d417264e8..96b1d0451 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -90,7 +90,6 @@ void pdcch_grid_t::new_tti(const tti_params_t& tti_params_) } dci_record_list.clear(); current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1; - max_user_cfi = cc_cfg->sched_cfg->max_nof_ctrl_symbols; } const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const @@ -117,16 +116,18 @@ bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_u // TODO: Make the alloc tree update lazy alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type}; - // Try to allocate user in PDCCH for given CFI. If it fails, increment CFI + // Try to allocate user in PDCCH for given CFI. If it fails, increment CFI. uint32_t first_cfi = get_cfi(); - bool success = alloc_dci_record(record, get_cfi() - 1); - while (not success and get_cfi() < max_user_cfi) { - set_cfi(get_cfi() + 1); + bool success; + do { success = alloc_dci_record(record, get_cfi() - 1); - } + } while (not success and get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols and set_cfi(get_cfi() + 1)); + if (not success) { // DCI allocation failed. go back to original CFI - set_cfi(first_cfi); + if (get_cfi() != first_cfi and not set_cfi(first_cfi)) { + log_h->error("SCHED: Failed to return back to original PDCCH state\n"); + } return false; } @@ -227,7 +228,7 @@ bool pdcch_grid_t::set_cfi(uint32_t cfi) { if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) { srslte::logmap::get("MAC")->error("Invalid CFI value. Defaulting to current CFI.\n"); - return true; + return false; } uint32_t new_cfix = cfi - 1; @@ -408,22 +409,11 @@ alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_m { // Check if allocation would cause segmentation uint32_t ue_cc_idx = user->get_cell_index(cc_cfg->enb_cc_idx).second; - rbg_range_t r = user->get_required_dl_rbgs(ue_cc_idx, pdcch_alloc.get_cfi()); + rbg_range_t r = user->get_required_dl_rbgs(ue_cc_idx); if (r.rbg_min > user_mask.count()) { log_h->error("The number of RBGs allocated will force segmentation\n"); return alloc_outcome_t::NOF_RB_INVALID; } - // Place an upper bound in CFI if necessary, to avoid segmentation - if (pdcch_alloc.get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols) { - for (uint32_t cfi = cc_cfg->sched_cfg->max_nof_ctrl_symbols; cfi >= pdcch_alloc.get_cfi() + 1; --cfi) { - r = user->get_required_dl_rbgs(ue_cc_idx, cfi); - if (r.rbg_min <= user_mask.count()) { - break; - } - // decrease max CFI - pdcch_alloc.set_max_cfi(cfi); - } - } srslte_dci_format_t dci_format = user->get_dci_format(); uint32_t nof_bits = srslte_dci_format_sizeof(&cc_cfg->cfg.cell, nullptr, nullptr, dci_format); diff --git a/srsenb/src/stack/mac/scheduler_metric.cc b/srsenb/src/stack/mac/scheduler_metric.cc index fec3a9617..ffdc50bbc 100644 --- a/srsenb/src/stack/mac/scheduler_metric.cc +++ b/srsenb/src/stack/mac/scheduler_metric.cc @@ -133,7 +133,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) h = user->get_empty_dl_harq(tti_dl, cell_idx); if (h != nullptr) { // Allocate resources based on pending data - rbg_range_t req_rbgs = user->get_required_dl_rbgs(cell_idx, tti_alloc->get_nof_ctrl_symbols()); + rbg_range_t req_rbgs = user->get_required_dl_rbgs(cell_idx); if (req_rbgs.rbg_min > 0) { rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size()); if (find_allocation(req_rbgs.rbg_min, req_rbgs.rbg_max, &newtx_mask)) { diff --git a/srsenb/src/stack/mac/scheduler_ue.cc b/srsenb/src/stack/mac/scheduler_ue.cc index 950fe613b..b0ea69fa8 100644 --- a/srsenb/src/stack/mac/scheduler_ue.cc +++ b/srsenb/src/stack/mac/scheduler_ue.cc @@ -813,14 +813,20 @@ uint32_t sched_ue::get_pending_dl_new_data_total() return req_bytes; } -rbg_range_t sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx, uint32_t nof_ctrl_symbols) +/** + * Compute the range of RBGs that avoids segmentation of TM and MAC subheader data. Always computed for highest CFI + * @param ue_cc_idx carrier of the UE + * @return range of number of RBGs that a UE can allocate in a given subframe + */ +rbg_range_t sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx) { std::pair req_bytes = get_requested_dl_bytes(ue_cc_idx); if (req_bytes.first == 0 and req_bytes.second == 0) { return {0, 0}; } - const auto* cellparams = carriers[ue_cc_idx].get_cell_cfg(); - int pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.first, nof_ctrl_symbols); + const auto* cellparams = carriers[ue_cc_idx].get_cell_cfg(); + int pending_prbs = + carriers[ue_cc_idx].get_required_prb_dl(req_bytes.first, cellparams->sched_cfg->max_nof_ctrl_symbols); if (pending_prbs < 0) { // Cannot fit allocation in given PRBs log_h->error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. " @@ -830,8 +836,8 @@ rbg_range_t sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx, uint32_t nof_ctrl return {cellparams->nof_prb(), cellparams->nof_prb()}; } uint32_t min_pending_rbg = cellparams->prb_to_rbg(pending_prbs); - pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.second, nof_ctrl_symbols); - pending_prbs = (pending_prbs < 0) ? cellparams->nof_prb() : pending_prbs; + pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.second, cellparams->sched_cfg->max_nof_ctrl_symbols); + pending_prbs = (pending_prbs < 0) ? cellparams->nof_prb() : pending_prbs; uint32_t max_pending_rbg = cellparams->prb_to_rbg(pending_prbs); return {min_pending_rbg, max_pending_rbg}; }