From f8b6eae2bf88fd89a6b92e0888937f9a7ec65a9e Mon Sep 17 00:00:00 2001 From: Francisco Date: Wed, 12 May 2021 19:30:42 +0100 Subject: [PATCH] use of min DL cqi across all PRBS to compute required DL PRBs --- srsenb/hdr/stack/mac/sched_ue.h | 2 +- .../stack/mac/sched_ue_ctrl/sched_dl_cqi.h | 24 ++++++++++++------- .../stack/mac/sched_ue_ctrl/sched_ue_cell.h | 2 +- srsenb/src/stack/mac/sched_ue.cc | 11 ++++----- .../stack/mac/sched_ue_ctrl/sched_dl_cqi.cc | 12 +++++++--- .../stack/mac/sched_ue_ctrl/sched_ue_cell.cc | 18 +++++++------- 6 files changed, 42 insertions(+), 27 deletions(-) diff --git a/srsenb/hdr/stack/mac/sched_ue.h b/srsenb/hdr/stack/mac/sched_ue.h index 91b31e551..55b6e66bc 100644 --- a/srsenb/hdr/stack/mac/sched_ue.h +++ b/srsenb/hdr/stack/mac/sched_ue.h @@ -153,7 +153,7 @@ private: tbs_info compute_mcs_and_tbs(uint32_t enb_cc_idx, tti_point tti_tx_dl, - uint32_t nof_alloc_prbs, + const rbgmask_t& rbgs, uint32_t cfi, const srsran_dci_dl_t& dci); diff --git a/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h b/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h index 1d54e0c7d..f071f4c50 100644 --- a/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h +++ b/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_dl_cqi.h @@ -41,12 +41,14 @@ public: srsran_assert(K <= 4, "K=%d outside of {0, 4}", K); } + /// Set K value from upper layers. See TS 36.331, CQI-ReportPeriodic void set_K(uint32_t K_) { srsran_assert(K <= 4, "K=%d outside of {0, 4}", K); K = K_; } + /// Update wideband CQI void cqi_wb_info(tti_point tti, uint32_t cqi_value) { if (cqi_value > 0) { @@ -57,6 +59,7 @@ public: wb_cqi_avg = static_cast(cqi_value); } + /// Update subband CQI for subband "sb_index" void cqi_sb_info(tti_point tti, uint32_t sb_index, uint32_t cqi_value) { if (cqi_value > 0) { @@ -95,7 +98,7 @@ public: return static_cast(wb_cqi_avg); } uint32_t sb_idx = rbg_to_sb_index(rbg); - return bp_list[get_bp_index(sb_idx)].last_feedback_tti.is_valid() ? subband_cqi[sb_idx] : wb_cqi_avg; + return get_subband_cqi_(sb_idx); } /// Get average CQI in given RBG interval @@ -107,7 +110,7 @@ public: float cqi = 0; uint32_t sbstart = rbg_to_sb_index(interv.start()), sbend = rbg_to_sb_index(interv.stop() - 1) + 1; for (uint32_t sb = sbstart; sb < sbend; ++sb) { - cqi += bp_list[get_bp_index(sb)].last_feedback_tti.is_valid() ? subband_cqi[sb] : wb_cqi_avg; + cqi += get_subband_cqi_(sb); } return static_cast(cqi / (sbend - sbstart)); } @@ -128,20 +131,20 @@ public: uint32_t count = 0; for (int rbg = mask.find_lowest(0, mask.size()); rbg != -1; rbg = mask.find_lowest(rbg, mask.size())) { uint32_t sb = rbg_to_sb_index(rbg); - cqi += bp_list[get_bp_index(sb)].last_feedback_tti.is_valid() ? subband_cqi[sb] : wb_cqi_avg; + cqi += get_subband_cqi_(sb); count++; rbg = static_cast(((sb + 1U) * cell_nof_rbg + N() - 1U) / N()); // skip to next subband index } return static_cast(cqi / count); } - /// Get CQI-optimal RBG mask - rbgmask_t get_optim_rbgmask(uint32_t req_rbgs) const + /// Get CQI-optimal RBG mask with at most "req_rbgs" RBGs + rbgmask_t get_optim_rbgmask(uint32_t req_rbgs, bool max_min_flag = true) const { rbgmask_t rbgmask(cell_nof_rbg); - return get_optim_rbgmask(rbgmask, req_rbgs); + return get_optim_rbgmask(rbgmask, req_rbgs, max_min_flag); } - rbgmask_t get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs) const; + rbgmask_t get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs, bool max_min_flag = true) const; /// TS 36.321, 7.2.2 - Parameter N uint32_t nof_subbands() const { return subband_cqi.size(); } @@ -172,7 +175,7 @@ private: uint32_t get_bp_index(uint32_t sb_index) const { return sb_index * J() / N(); } - uint32_t get_sb_index(uint32_t prb_index) const { return prb_index * N() / cell_nof_prb; } + uint32_t prb_to_sb_index(uint32_t prb_index) const { return prb_index * N() / cell_nof_prb; } uint32_t rbg_to_sb_index(uint32_t rbg_index) const { return rbg_index * N() / cell_nof_rbg; } @@ -181,6 +184,11 @@ private: return srsran::interval{bp_idx * N() / J(), (bp_idx + 1) * N() / J()}; } + float get_subband_cqi_(uint32_t sb_idx) const + { + return bp_list[get_bp_index(sb_idx)].last_feedback_tti.is_valid() ? subband_cqi[sb_idx] : wb_cqi_avg; + } + uint32_t cell_nof_prb; uint32_t cell_nof_rbg; uint32_t K; ///< set in RRC diff --git a/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_ue_cell.h b/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_ue_cell.h index 4567480e7..cbcdd9fe2 100644 --- a/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_ue_cell.h +++ b/srsenb/hdr/stack/mac/sched_ue_ctrl/sched_ue_cell.h @@ -90,7 +90,7 @@ private: /// Compute DL grant optimal TBS and MCS given UE cell context and DL grant parameters tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell, - uint32_t nof_prb, + const rbgmask_t& rbgs, uint32_t nof_re, srsran_dci_format_t dci_format, int req_bytes = -1); diff --git a/srsenb/src/stack/mac/sched_ue.cc b/srsenb/src/stack/mac/sched_ue.cc index 395b4185c..0ae5d9fea 100644 --- a/srsenb/src/stack/mac/sched_ue.cc +++ b/srsenb/src/stack/mac/sched_ue.cc @@ -322,8 +322,7 @@ tbs_info sched_ue::allocate_new_dl_mac_pdu(sched::dl_sched_data_t* data, uint32_t tb) { srsran_dci_dl_t* dci = &data->dci; - uint32_t nof_prb = count_prb_per_tb(user_mask); - tbs_info tb_info = compute_mcs_and_tbs(enb_cc_idx, tti_tx_dl, nof_prb, cfi, *dci); + tbs_info tb_info = compute_mcs_and_tbs(enb_cc_idx, tti_tx_dl, user_mask, cfi, *dci); // Allocate MAC PDU (subheaders, CEs, and SDUS) int rem_tbs = tb_info.tbs_bytes; @@ -470,25 +469,25 @@ int sched_ue::generate_format1(uint32_t pid, * Based on the amount of tx data, allocated PRBs, DCI params, etc. compute a valid MCS and resulting TBS * @param enb_cc_idx user carrier index * @param tti_tx_dl tti when the tx will occur - * @param nof_alloc_prbs number of PRBs that were allocated + * @param rbgs RBG mask * @param cfi Number of control symbols in Subframe * @param dci contains the RBG mask, and alloc type * @return pair with MCS and TBS (in bytes) */ tbs_info sched_ue::compute_mcs_and_tbs(uint32_t enb_cc_idx, tti_point tti_tx_dl, - uint32_t nof_alloc_prbs, + const rbgmask_t& rbg_mask, uint32_t cfi, const srsran_dci_dl_t& dci) { - assert(cells[enb_cc_idx].configured()); + srsran_assert(cells[enb_cc_idx].configured(), "computation of MCS/TBS called for non-configured CC"); srsran::interval req_bytes = get_requested_dl_bytes(enb_cc_idx); // Calculate exact number of RE for this PRB allocation uint32_t nof_re = cells[enb_cc_idx].cell_cfg->get_dl_nof_res(tti_tx_dl, dci, cfi); // Compute MCS+TBS - tbs_info tb = cqi_to_tbs_dl(cells[enb_cc_idx], nof_alloc_prbs, nof_re, dci.format, req_bytes.stop()); + tbs_info tb = cqi_to_tbs_dl(cells[enb_cc_idx], rbg_mask, nof_re, dci.format, req_bytes.stop()); if (tb.tbs_bytes > 0 and tb.tbs_bytes < (int)req_bytes.start()) { logger.info("SCHED: Could not get PRB allocation that avoids MAC CE or RLC SRB0 PDU segmentation"); diff --git a/srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc b/srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc index f3702b332..8ca0e6fd1 100644 --- a/srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc +++ b/srsenb/src/stack/mac/sched_ue_ctrl/sched_dl_cqi.cc @@ -15,7 +15,7 @@ using namespace srsenb; -rbgmask_t sched_dl_cqi::get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs) const +rbgmask_t sched_dl_cqi::get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req_rbgs, bool max_flag) const { req_rbgs = std::min(req_rbgs, cell_nof_rbg); if (not subband_cqi_enabled()) { @@ -38,8 +38,14 @@ rbgmask_t sched_dl_cqi::get_optim_rbgmask(const rbgmask_t& dl_mask, uint32_t req std::stable_sort(sorted_cqi_pos.begin(), sorted_cqi_pos.end(), [&sorted_cqis](uint32_t lhs, uint32_t rhs) { return sorted_cqis[lhs] > sorted_cqis[rhs]; }); - for (size_t i = req_rbgs; i < sorted_cqi_pos.size(); ++i) { - emptymask.set(sorted_cqi_pos[i], false); + if (max_flag) { + for (size_t i = req_rbgs; i < sorted_cqi_pos.size(); ++i) { + emptymask.set(sorted_cqi_pos[i], false); + } + } else { + for (size_t i = 0; i < sorted_cqi_pos.size() - req_rbgs; ++i) { + emptymask.set(sorted_cqi_pos[i], false); + } } return emptymask; diff --git a/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc b/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc index 3bfc5e2c7..de82796be 100644 --- a/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc +++ b/srsenb/src/stack/mac/sched_ue_ctrl/sched_ue_cell.cc @@ -231,31 +231,32 @@ std::tuple false_position_method(int x1, int x2, YType y } tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell, - uint32_t nof_prb, + const rbgmask_t& rbgs, uint32_t nof_re, srsran_dci_format_t dci_format, int req_bytes) { - bool use_tbs_index_alt = cell.get_ue_cfg()->use_tbs_index_alt and dci_format != SRSRAN_DCI_FORMAT1A; + bool use_tbs_index_alt = cell.get_ue_cfg()->use_tbs_index_alt and dci_format != SRSRAN_DCI_FORMAT1A; + uint32_t nof_prbs = count_prb_per_tb(rbgs); tbs_info ret; if (cell.fixed_mcs_dl < 0 or not cell.dl_cqi().is_cqi_info_received()) { // Dynamic MCS configured or first Tx - uint32_t dl_cqi_avg = cell.dl_cqi().get_grant_avg_cqi(prb_interval(0, nof_prb)); + uint32_t dl_cqi_avg = cell.dl_cqi().get_grant_avg_cqi(rbgs); ret = compute_min_mcs_and_tbs_from_required_bytes( - nof_prb, nof_re, dl_cqi_avg, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt); + nof_prbs, nof_re, dl_cqi_avg, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt); // If coderate > SRSRAN_MIN(max_coderate, 0.932 * Qm) we should set TBS=0. We don't because it's not correctly // handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR if (ret.tbs_bytes < 0) { ret.mcs = 0; - ret.tbs_bytes = get_tbs_bytes((uint32_t)ret.mcs, nof_prb, use_tbs_index_alt, false); + ret.tbs_bytes = get_tbs_bytes((uint32_t)ret.mcs, nof_prbs, use_tbs_index_alt, false); } } else { // Fixed MCS configured ret.mcs = cell.fixed_mcs_dl; - ret.tbs_bytes = get_tbs_bytes((uint32_t)cell.fixed_mcs_dl, nof_prb, use_tbs_index_alt, false); + ret.tbs_bytes = get_tbs_bytes((uint32_t)cell.fixed_mcs_dl, nof_prbs, use_tbs_index_alt, false); } return ret; } @@ -293,8 +294,9 @@ int get_required_prb_dl(const sched_ue_cell& cell, uint32_t req_bytes) { auto compute_tbs_approx = [tti_tx_dl, &cell, dci_format](uint32_t nof_prb) { - uint32_t nof_re = cell.cell_cfg->get_dl_lb_nof_re(tti_tx_dl, nof_prb); - tbs_info tb = cqi_to_tbs_dl(cell, nof_prb, nof_re, dci_format, -1); + uint32_t nof_re = cell.cell_cfg->get_dl_lb_nof_re(tti_tx_dl, nof_prb); + rbgmask_t min_cqi_rbgs = cell.dl_cqi().get_optim_rbgmask(nof_prb, false); + tbs_info tb = cqi_to_tbs_dl(cell, min_cqi_rbgs, nof_re, dci_format, -1); return tb.tbs_bytes; };