diff --git a/lib/include/srslte/interfaces/sched_interface.h b/lib/include/srslte/interfaces/sched_interface.h index 2aca48296..994727182 100644 --- a/lib/include/srslte/interfaces/sched_interface.h +++ b/lib/include/srslte/interfaces/sched_interface.h @@ -60,7 +60,6 @@ public: uint32_t min_nof_ctrl_symbols = 1; uint32_t max_nof_ctrl_symbols = 3; int max_aggr_level = 3; - int uci_mcs_dec = 3; }; struct cell_cfg_t { @@ -140,7 +139,7 @@ public: /* ue capabilities, etc */ uint32_t maxharq_tx = 5; bool continuous_pusch = false; - srslte_uci_offset_cfg_t uci_offset = {}; + srslte_uci_offset_cfg_t uci_offset = {15, 12, 10}; srslte_pucch_cfg_t pucch_cfg = {}; std::array ue_bearers = {}; std::vector supported_cc_list; ///< list of UE supported CCs. First index for PCell diff --git a/lib/include/srslte/phy/phch/sch.h b/lib/include/srslte/phy/phch/sch.h index 858906837..aff76f7bd 100644 --- a/lib/include/srslte/phy/phch/sch.h +++ b/lib/include/srslte/phy/phch/sch.h @@ -116,6 +116,8 @@ SRSLTE_API int srslte_ulsch_decode(srslte_sch_t* q, SRSLTE_API float srslte_sch_beta_cqi(uint32_t I_cqi); +SRSLTE_API float srslte_sch_beta_ack(uint32_t I_harq); + SRSLTE_API uint32_t srslte_sch_find_Ioffset_ack(float beta); SRSLTE_API uint32_t srslte_sch_find_Ioffset_cqi(float beta); diff --git a/lib/include/srslte/phy/phch/uci.h b/lib/include/srslte/phy/phch/uci.h index ea4544195..ab9c366f7 100644 --- a/lib/include/srslte/phy/phch/uci.h +++ b/lib/include/srslte/phy/phch/uci.h @@ -150,6 +150,17 @@ SRSLTE_API int srslte_uci_decode_ack_ri(srslte_pusch_cfg_t* cfg, uint32_t nof_bits, bool is_ri); +/** + * Calculates the maximum number of coded symbols used by CQI-UCI over PUSCH + */ +SRSLTE_API uint32_t srslte_qprime_cqi_ext(uint32_t L_prb, uint32_t nof_symbols, uint32_t tbs, float beta); + +/** + * Calculates the maximum number of coded symbols used by ACK/RI over PUSCH + */ +SRSLTE_API uint32_t +srslte_qprime_ack_ext(uint32_t L_prb, uint32_t nof_symbols, uint32_t tbs, uint32_t nof_ack, float beta); + /** * Calculates the number of acknowledgements carried by the Uplink Control Information (UCI) deduced from the number of * transport blocks indicated in the UCI's configuration. diff --git a/lib/src/phy/common/phy_common.c b/lib/src/phy/common/phy_common.c index 5fe1505bd..148abbef9 100644 --- a/lib/src/phy/common/phy_common.c +++ b/lib/src/phy/common/phy_common.c @@ -303,7 +303,7 @@ uint32_t srslte_N_ta_new(uint32_t N_ta_old, uint32_t ta) float srslte_coderate(uint32_t tbs, uint32_t nof_re) { - return (float)(tbs + 24) / (nof_re); + return (float)tbs / nof_re; } /* Returns the new time advance as indicated by the random access response diff --git a/lib/src/phy/phch/sch.c b/lib/src/phy/phch/sch.c index 6ca588ffe..868a0a35f 100644 --- a/lib/src/phy/phch/sch.c +++ b/lib/src/phy/phch/sch.c @@ -95,6 +95,15 @@ float srslte_sch_beta_cqi(uint32_t I_cqi) } } +float srslte_sch_beta_ack(uint32_t I_harq) +{ + if (I_harq < 16) { + return get_beta_harq_offset(I_harq); + } else { + return 0; + } +} + uint32_t srslte_sch_find_Ioffset_ack(float beta) { for (int i = 0; i < 16; i++) { diff --git a/lib/src/phy/phch/uci.c b/lib/src/phy/phch/uci.c index 8c9ceefad..1238b18ff 100644 --- a/lib/src/phy/phch/uci.c +++ b/lib/src/phy/phch/uci.c @@ -344,6 +344,16 @@ static uint32_t Q_prime_cqi(srslte_pusch_cfg_t* cfg, uint32_t O, float beta, uin return Q_prime; } +uint32_t srslte_qprime_cqi_ext(uint32_t L_prb, uint32_t nof_symbols, uint32_t tbs, float beta) +{ + srslte_pusch_cfg_t cfg = {}; + cfg.grant.L_prb = L_prb; + cfg.grant.nof_symb = nof_symbols; + cfg.K_segm = tbs; + // O is the number of CQI + CRC len (8). See 5.2.2.6 + return Q_prime_cqi(&cfg, SRSLTE_UCI_CQI_CODED_PUCCH_B + 8, beta, 0); +} + /* Encode UCI CQI/PMI for payloads equal or lower to 11 bits (Sec 5.2.2.6.4) */ int encode_cqi_short(srslte_uci_cqi_pusch_t* q, uint8_t* data, uint32_t nof_bits, uint8_t* q_bits, uint32_t Q) @@ -620,6 +630,15 @@ static uint32_t Q_prime_ri_ack(srslte_pusch_cfg_t* cfg, uint32_t O, uint32_t O_c return Q_prime; } +uint32_t srslte_qprime_ack_ext(uint32_t L_prb, uint32_t nof_symbols, uint32_t tbs, uint32_t nof_ack, float beta) +{ + srslte_pusch_cfg_t cfg = {}; + cfg.grant.L_prb = L_prb; + cfg.grant.nof_symb = nof_symbols; + cfg.K_segm = tbs; + return Q_prime_ri_ack(&cfg, nof_ack, 0, beta); +} + static uint32_t encode_ri_ack(const uint8_t data[2], uint32_t O_ack, uint8_t Qm, srslte_uci_bit_t* q_encoded_bits) { uint32_t i = 0; diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index 90d81b8fd..fb2878871 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -257,7 +257,7 @@ public: type_t type; sched_ue* user_ptr; prb_interval alloc; - uint32_t mcs = 0; + int msg3_mcs = -1; bool is_retx() const { return type == NOADAPT_RETX or type == ADAPT_RETX; } bool is_msg3() const { return type == MSG3; } bool needs_pdcch() const { return type == NEWTX or type == ADAPT_RETX; } @@ -290,7 +290,7 @@ public: // UL alloc methods alloc_outcome_t alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant); - alloc_outcome_t alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, uint32_t mcs = 0); + alloc_outcome_t alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, int msg3_mcs = -1); bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); } bool alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_sf_result); diff --git a/srsenb/hdr/stack/mac/scheduler_ue.h b/srsenb/hdr/stack/mac/scheduler_ue.h index aa82c6207..f30de2a1b 100644 --- a/srsenb/hdr/stack/mac/scheduler_ue.h +++ b/srsenb/hdr/stack/mac/scheduler_ue.h @@ -33,6 +33,8 @@ namespace srsenb { +typedef enum { UCI_PUSCH_NONE = 0, UCI_PUSCH_CQI, UCI_PUSCH_ACK, UCI_PUSCH_ACK_CQI } uci_pusch_t; + struct cc_sched_ue { const static int SCHED_MAX_HARQ_PROC = FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS; @@ -52,6 +54,7 @@ struct cc_sched_ue { const sched_cell_params_t* get_cell_cfg() const { return cell_params; } bool is_active() const { return active; } void set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi); + int cqi_to_tbs(uint32_t nof_prb, uint32_t nof_re, bool use_tbs_index_alt, bool is_ul, uint32_t* mcs); harq_entity harq_ent; @@ -65,6 +68,9 @@ struct cc_sched_ue { uint32_t ul_cqi_tti = 0; bool dl_cqi_rx = false; + // Enables or disables uplink 64QAM. Not yet functional. + bool ul_64qam_enabled = false; + uint32_t max_mcs_dl = 28, max_mcs_dl_alt = 27, max_mcs_ul = 28; uint32_t max_aggr_level = 3; int fixed_mcs_ul = 0, fixed_mcs_dl = 0; @@ -209,7 +215,7 @@ public: bool needs_pdcch, srslte_dci_location_t cce_range, int explicit_mcs = -1, - bool carriers_uci = false); + uci_pusch_t uci_type = UCI_PUSCH_NONE); srslte_dci_format_t get_dci_format(); sched_dci_cce_t* get_locations(uint32_t enb_cc_idx, uint32_t current_cfi, uint32_t sf_idx); @@ -220,15 +226,6 @@ public: bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce); - static int cqi_to_tbs(uint32_t cqi, - uint32_t nof_prb, - uint32_t nof_re, - uint32_t max_mcs, - uint32_t max_Qm, - bool use_tbs_index_alt, - bool is_ul, - uint32_t* mcs); - private: bool is_sr_triggered(); diff --git a/srsenb/src/main.cc b/srsenb/src/main.cc index 18d839062..2d03d9045 100644 --- a/srsenb/src/main.cc +++ b/srsenb/src/main.cc @@ -139,7 +139,6 @@ void parse_args(all_args_t* args, int argc, char* argv[]) ("scheduler.max_aggr_level", bpo::value(&args->stack.mac.sched.max_aggr_level)->default_value(-1), "Optional maximum aggregation level index (l=log2(L)) ") ("scheduler.max_nof_ctrl_symbols", bpo::value(&args->stack.mac.sched.max_nof_ctrl_symbols)->default_value(3), "Number of control symbols") ("scheduler.min_nof_ctrl_symbols", bpo::value(&args->stack.mac.sched.min_nof_ctrl_symbols)->default_value(1), "Minimum number of control symbols") - ("scheduler.mcs_uci_dec", bpo::value(&args->stack.mac.sched.uci_mcs_dec)->default_value(3), "Decrement of MCS in case UL grant carries UCI.") /* Downlink Channel emulator section */ ("channel.dl.enable", bpo::value(&args->phy.dl_channel_args.enable)->default_value(false), "Enable/Disable internal Downlink channel emulator") diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index f8ddb170b..87ba51990 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -796,7 +796,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma return alloc_outcome_t::SUCCESS; } -alloc_outcome_t sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, uint32_t mcs) +alloc_outcome_t sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, int msg3_mcs) { // Check whether user was already allocated if (is_ul_alloc(user)) { @@ -816,7 +816,7 @@ alloc_outcome_t sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_ ul_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1; ul_alloc.user_ptr = user; ul_alloc.alloc = alloc; - ul_alloc.mcs = mcs; + ul_alloc.msg3_mcs = msg3_mcs; ul_data_allocs.push_back(ul_alloc); return alloc_outcome_t::SUCCESS; @@ -1031,19 +1031,21 @@ void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_ } //! Finds eNB CC Idex that currently holds UCI -int get_enb_cc_idx_with_uci(const sf_sched* sf_sched, +uci_pusch_t is_uci_included(const sf_sched* sf_sched, const sf_sched_result& other_cc_results, const sched_ue* user, uint32_t current_enb_cc_idx) { + uci_pusch_t uci_alloc = UCI_PUSCH_NONE; + if (not user->get_cell_index(current_enb_cc_idx).first) { - return -1; + return UCI_PUSCH_NONE; } // Check if UCI needs to be allocated - bool needs_uci = false; const sched_interface::ue_cfg_t& ue_cfg = user->get_ue_cfg(); - for (uint32_t enbccidx = 0; enbccidx < other_cc_results.enb_cc_list.size() and not needs_uci; ++enbccidx) { + for (uint32_t enbccidx = 0; enbccidx < other_cc_results.enb_cc_list.size() and uci_alloc != UCI_PUSCH_ACK_CQI; + ++enbccidx) { auto p = user->get_cell_index(enbccidx); if (not p.first) { continue; @@ -1052,26 +1054,38 @@ int get_enb_cc_idx_with_uci(const sf_sched* sf_sched, // Check if CQI is pending for this CC const srslte_cqi_report_cfg_t& cqi_report = ue_cfg.supported_cc_list[ueccidx].dl_cfg.cqi_report; - needs_uci = srslte_cqi_periodic_send(&cqi_report, sf_sched->get_tti_tx_ul(), SRSLTE_FDD); - if (needs_uci) { - break; + if (srslte_cqi_periodic_send(&cqi_report, sf_sched->get_tti_tx_ul(), SRSLTE_FDD)) { + if (uci_alloc == UCI_PUSCH_ACK) { + uci_alloc = UCI_PUSCH_ACK_CQI; + } else { + uci_alloc = UCI_PUSCH_CQI; + } } // Check if DL alloc is pending + bool needs_ack_uci = false; if (enbccidx == current_enb_cc_idx) { - needs_uci = sf_sched->is_dl_alloc(user); + needs_ack_uci = sf_sched->is_dl_alloc(user); } else { auto& dl_result = other_cc_results.enb_cc_list[enbccidx].dl_sched_result; for (uint32_t j = 0; j < dl_result.nof_data_elems; ++j) { if (dl_result.data[j].dci.rnti == user->get_rnti()) { - needs_uci = true; + needs_ack_uci = true; break; } } } + if (needs_ack_uci) { + if (uci_alloc == UCI_PUSCH_CQI) { + // Once we include ACK and CQI, stop the search + uci_alloc = UCI_PUSCH_ACK_CQI; + } else { + uci_alloc = UCI_PUSCH_ACK; + } + } } - if (not needs_uci) { - return -1; + if (uci_alloc == UCI_PUSCH_NONE) { + return uci_alloc; } // If UL grant allocated in current carrier @@ -1095,7 +1109,11 @@ int get_enb_cc_idx_with_uci(const sf_sched* sf_sched, } } } - return sel_enb_cc_idx; + if (sel_enb_cc_idx == (int)current_enb_cc_idx) { + return uci_alloc; + } else { + return UCI_PUSCH_NONE; + } } void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, @@ -1113,20 +1131,23 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul cce_range = dci_result[ul_alloc.dci_idx]->dci_pos; } - // Set fixed mcs if specified - int fixed_mcs = (ul_alloc.type == ul_alloc_t::MSG3) ? ul_alloc.mcs : -1; - // If UCI is encoded in the current carrier - int uci_enb_cc_idx = get_enb_cc_idx_with_uci(this, *cc_results, user, cc_cfg->enb_cc_idx); - bool carries_uci = uci_enb_cc_idx == (int)cc_cfg->enb_cc_idx; + uci_pusch_t uci_type = is_uci_included(this, *cc_results, user, cc_cfg->enb_cc_idx); /* Generate DCI Format1A */ uint32_t pending_data_before = user->get_pending_ul_new_data(get_tti_tx_ul()); - int tbs = user->generate_format0( - pusch, get_tti_tx_ul(), cell_index, ul_alloc.alloc, ul_alloc.needs_pdcch(), cce_range, fixed_mcs, carries_uci); + int tbs = user->generate_format0(pusch, + get_tti_tx_ul(), + cell_index, + ul_alloc.alloc, + ul_alloc.needs_pdcch(), + cce_range, + ul_alloc.msg3_mcs, + uci_type); ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cell_index); - if (tbs <= 0) { + // Allow TBS=0 in case of UCI-only PUSCH + if (tbs < 0 || (tbs == 0 && pusch->dci.tb.mcs_idx != 29)) { log_h->warning("SCHED: Error %s %s rnti=0x%x, pid=%d, dci=(%d,%d), prb=%s, bsr=%d\n", ul_alloc.type == ul_alloc_t::MSG3 ? "Msg3" : "UL", ul_alloc.is_retx() ? "retx" : "tx", diff --git a/srsenb/src/stack/mac/scheduler_ue.cc b/srsenb/src/stack/mac/scheduler_ue.cc index ee68b01f7..f1008050b 100644 --- a/srsenb/src/stack/mac/scheduler_ue.cc +++ b/srsenb/src/stack/mac/scheduler_ue.cc @@ -670,7 +670,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, bool needs_pdcch, srslte_dci_location_t dci_pos, int explicit_mcs, - bool carries_uci) + uci_pusch_t uci_type) { ul_harq_proc* h = get_ul_harq(tti, cc_idx); srslte_dci_ul_t* dci = &data->dci; @@ -697,43 +697,81 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, // dynamic mcs uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti); uint32_t N_srs = 0; - uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs) * alloc.length() * SRSLTE_NRE; + uint32_t nof_symb = 2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs; + uint32_t nof_re = nof_symb * alloc.length() * SRSLTE_NRE; tbs = carriers[cc_idx].alloc_tbs_ul(alloc.length(), nof_re, req_bytes, &mcs); - if (carries_uci) { - // Reduce MCS to fit UCI - mcs -= std::min(main_cc_params->sched_cfg->uci_mcs_dec, mcs); - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8; + // Reduce MCS to fit UCI if transmitted in this grant + if (uci_type != UCI_PUSCH_NONE) { + // Calculate an approximation of the number of RE used by UCI + uint32_t nof_uci_re = 0; + // Add the RE for ACK + if (uci_type == UCI_PUSCH_ACK || uci_type == UCI_PUSCH_ACK_CQI) { + float beta = srslte_sch_beta_ack(cfg.uci_offset.I_offset_ack); + nof_uci_re += srslte_qprime_ack_ext(alloc.length(), nof_symb, 8 * tbs, carriers.size(), beta); + } + // Add the RE for CQI report (RI reports are transmitted on CQI slots. We do a conservative estimate here) + if (uci_type == UCI_PUSCH_CQI || uci_type == UCI_PUSCH_ACK_CQI || cqi_request) { + float beta = srslte_sch_beta_cqi(cfg.uci_offset.I_offset_cqi); + nof_uci_re += srslte_qprime_cqi_ext(alloc.length(), nof_symb, 8 * tbs, beta); + } + // Recompute again the MCS and TBS with the new spectral efficiency (based on the available RE for data) + if (nof_re >= nof_uci_re) { + tbs = carriers[cc_idx].alloc_tbs_ul(alloc.length(), nof_re - nof_uci_re, req_bytes, &mcs); + } else { + tbs = 0; + } } } h->new_tx(tti, mcs, tbs, alloc, nof_retx); - - // Un-trigger SR - unset_sr(); + // Un-trigger the SR if data is allocated + if (tbs > 0) { + unset_sr(); + } } else { // retx h->new_retx(0, tti, &mcs, nullptr, alloc); tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8; } - data->tbs = tbs; - - if (tbs > 0) { - dci->rnti = rnti; - dci->format = SRSLTE_DCI_FORMAT0; - dci->ue_cc_idx = cc_idx; - dci->type2_alloc.riv = srslte_ra_type2_to_riv(alloc.length(), alloc.start(), cell.nof_prb); - dci->tb.rv = sched_utils::get_rvidx(h->nof_retx(0)); - if (!is_newtx && h->is_adaptive_retx()) { - dci->tb.mcs_idx = 28 + dci->tb.rv; - } else { - dci->tb.mcs_idx = mcs; - } + if (tbs >= 0) { + data->tbs = tbs; + dci->rnti = rnti; + dci->format = SRSLTE_DCI_FORMAT0; + dci->ue_cc_idx = cc_idx; dci->tb.ndi = h->get_ndi(0); dci->cqi_request = cqi_request; dci->freq_hop_fl = srslte_dci_ul_t::SRSLTE_RA_PUSCH_HOP_DISABLED; dci->tpc_pusch = next_tpc_pusch; next_tpc_pusch = 1; + + dci->type2_alloc.riv = srslte_ra_type2_to_riv(alloc.length(), alloc.start(), cell.nof_prb); + + // If there are no RE available for ULSCH but there is UCI to transmit, allocate PUSCH becuase + // resources have been reserved already and in CA it will be used to ACK other carriers + if (tbs == 0 && (cqi_request || uci_type != UCI_PUSCH_NONE)) { + // 8.6.1 and 8.6.2 36.213 second paragraph + dci->cqi_request = true; + dci->tb.mcs_idx = 29; + dci->tb.rv = 0; // No data is being transmitted + + // Empty TBS PUSCH only accepts a maximum of 4 PRB. Resize the grant. This doesn't affect the MCS selection + // because there is no TB in this grant + if (alloc.length() > 4) { + alloc.set(alloc.start(), alloc.start() + 4); + } + } else if (tbs > 0) { + dci->tb.rv = sched_utils::get_rvidx(h->nof_retx(0)); + if (!is_newtx && h->is_adaptive_retx()) { + dci->tb.mcs_idx = 28 + dci->tb.rv; + } else { + dci->tb.mcs_idx = mcs; + } + } else if (tbs == 0) { + log_h->warning("SCHED: No space for ULSCH while allocating format0. Discarding grant.\n"); + } else { + log_h->error("SCHED: Unkown error while allocating format0\n"); + } } return tbs; @@ -1096,21 +1134,19 @@ cc_sched_ue* sched_ue::get_ue_carrier(uint32_t enb_cc_idx) return &carriers[p.second]; } -int sched_ue::cqi_to_tbs(uint32_t cqi, - uint32_t nof_prb, - uint32_t nof_re, - uint32_t max_mcs, - uint32_t max_Qm, - bool use_tbs_index_alt, - bool is_ul, - uint32_t* mcs) +int cc_sched_ue::cqi_to_tbs(uint32_t nof_prb, uint32_t nof_re, bool use_tbs_index_alt, bool is_ul, uint32_t* mcs) { - float max_coderate = srslte_cqi_to_coderate(cqi); + + uint32_t cqi = is_ul ? ul_cqi : dl_cqi; + uint32_t max_mcs = is_ul ? max_mcs_ul : (cfg->use_tbs_index_alt) ? max_mcs_dl_alt : max_mcs_dl; + uint32_t max_Qm = is_ul and not ul_64qam_enabled ? 4 : 6; //< TODO: Allow PUSCH 64QAM + + // Take the upper bound code-rate + float max_coderate = srslte_cqi_to_coderate(cqi < 15 ? cqi + 1 : 15); int sel_mcs = max_mcs + 1; float coderate = 99; - float eff_coderate = 99; - uint32_t Qm = 1; int tbs = 0; + uint32_t Qm = 0; do { sel_mcs--; @@ -1119,15 +1155,18 @@ int sched_ue::cqi_to_tbs(uint32_t cqi, coderate = srslte_coderate(tbs, nof_re); srslte_mod_t mod = (is_ul) ? srslte_ra_ul_mod_from_mcs(sel_mcs) : srslte_ra_dl_mod_from_mcs(sel_mcs, use_tbs_index_alt); - Qm = SRSLTE_MIN(max_Qm, srslte_mod_bits_x_symbol(mod)); - eff_coderate = coderate / Qm; - } while ((sel_mcs > 0 && coderate > max_coderate) || eff_coderate > 0.930); + Qm = SRSLTE_MIN(max_Qm, srslte_mod_bits_x_symbol(mod)); + } while (sel_mcs > 0 && coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm)); if (mcs != nullptr) { *mcs = (uint32_t)sel_mcs; } - return tbs; + if (coderate <= SRSLTE_MIN(max_coderate, 0.930 * Qm)) { + return tbs; + } else { + return 0; + } } /************************************************************************************************ @@ -1223,13 +1262,8 @@ int cc_sched_ue::alloc_tbs(uint32_t nof_prb, uint32_t nof_re, uint32_t req_bytes { uint32_t sel_mcs = 0; - uint32_t cqi = is_ul ? ul_cqi : dl_cqi; - uint32_t max_mcs = is_ul ? max_mcs_ul : (cfg->use_tbs_index_alt) ? max_mcs_dl_alt : max_mcs_dl; - uint32_t max_Qm = is_ul ? 4 : 6; // Allow 16-QAM in PUSCH Only - // TODO: Compute real spectral efficiency based on PUSCH-UCI configuration - int tbs_bytes = - sched_ue::cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, cfg->use_tbs_index_alt, is_ul, &sel_mcs) / 8; + int tbs_bytes = cqi_to_tbs(nof_prb, nof_re, cfg->use_tbs_index_alt, is_ul, &sel_mcs) / 8; /* If less bytes are requested, lower the MCS */ if (tbs_bytes > (int)req_bytes && req_bytes > 0) { @@ -1301,6 +1335,7 @@ uint32_t cc_sched_ue::get_required_prb_ul(uint32_t req_bytes) return 0; } + uint32_t last_valid_n = 0; for (n = 1; n < cell_params->nof_prb() && nbytes < req_bytes + 4; n++) { uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell_params->cfg.cell.cp) - 1) - N_srs) * n * SRSLTE_NRE; int tbs = 0; @@ -1310,15 +1345,24 @@ uint32_t cc_sched_ue::get_required_prb_ul(uint32_t req_bytes) tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_ul, false, true), n) / 8; } if (tbs > 0) { - nbytes = tbs; + nbytes = tbs; + last_valid_n = n; } } - while (!srslte_dft_precoding_valid_prb(n) && n <= cell_params->nof_prb()) { - n++; + if (last_valid_n > 0) { + if (n != last_valid_n) { + n = last_valid_n; + } + while (!srslte_dft_precoding_valid_prb(n) && n <= cell_params->nof_prb()) { + n++; + } + return n; + } else { + // This should never happen. Just in case, return 0 PRB and handle it later + log_h->error("SCHED: Could not obtain any valid number of PRB for an uplink allocation\n"); + return 0; } - - return n; } void cc_sched_ue::set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi_) diff --git a/srsenb/src/stack/rrc/mac_controller.cc b/srsenb/src/stack/rrc/mac_controller.cc index 719b065ea..b3398ef8f 100644 --- a/srsenb/src/stack/rrc/mac_controller.cc +++ b/srsenb/src/stack/rrc/mac_controller.cc @@ -107,6 +107,11 @@ int rrc::ue::mac_controller::apply_basic_conn_cfg(const asn1::rrc::rr_cfg_ded_s& current_sched_ue_cfg.pucch_cfg.n_rb_2 = sib2.rr_cfg_common.pucch_cfg_common.nrb_cqi; current_sched_ue_cfg.pucch_cfg.N_pucch_1 = sib2.rr_cfg_common.pucch_cfg_common.n1_pucch_an; + // PUSCH UCI configuration + current_sched_ue_cfg.uci_offset.I_offset_cqi = rrc_cfg->pusch_cfg.beta_offset_cqi_idx; + current_sched_ue_cfg.uci_offset.I_offset_ack = rrc_cfg->pusch_cfg.beta_offset_ack_idx; + current_sched_ue_cfg.uci_offset.I_offset_ri = rrc_cfg->pusch_cfg.beta_offset_ri_idx; + // Configure MAC // In case of RRC Connection Setup/Reest message (Msg4), we need to resolve the contention by sending a ConRes CE mac->phy_config_enabled(rrc_ue->rnti, false); @@ -145,6 +150,8 @@ void rrc::ue::mac_controller::handle_con_reconf(const asn1::rrc::rrc_conn_recfg_ handle_con_reconf_with_mobility(); } + // Assume rest of parameters in current_sched_ue_cfg do not change in a Reconfig + // Apply changes to MAC scheduler mac->ue_cfg(rrc_ue->rnti, ¤t_sched_ue_cfg); mac->phy_config_enabled(rrc_ue->rnti, false); diff --git a/srsenb/test/mac/scheduler_ca_test.cc b/srsenb/test/mac/scheduler_ca_test.cc index 8ae327782..0dc2971fb 100644 --- a/srsenb/test/mac/scheduler_ca_test.cc +++ b/srsenb/test/mac/scheduler_ca_test.cc @@ -115,7 +115,7 @@ int test_scell_activation(test_scell_activation_params params) sim_args.default_ue_sim_cfg.ue_cfg.supported_cc_list[0].active = true; sim_args.default_ue_sim_cfg.ue_cfg.supported_cc_list[0].enb_cc_idx = cc_idxs[0]; sim_args.default_ue_sim_cfg.ue_cfg.supported_cc_list[0].dl_cfg.cqi_report.periodic_configured = true; - sim_args.default_ue_sim_cfg.ue_cfg.supported_cc_list[0].dl_cfg.cqi_report.pmi_idx = 0; + sim_args.default_ue_sim_cfg.ue_cfg.supported_cc_list[0].dl_cfg.cqi_report.pmi_idx = 37; /* Simulation Objects Setup */ sched_sim_event_generator generator; @@ -225,11 +225,16 @@ int test_scell_activation(test_scell_activation_params params) } generate_data(10, 1.0, 1.0, 1.0); tester.test_next_ttis(generator.tti_events); + uint64_t tot_dl_sched_data = 0; + uint64_t tot_ul_sched_data = 0; for (const auto& c : cc_idxs) { - TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[c] > 0); - TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[c] > 0); + tot_dl_sched_data += tester.sched_stats->users[rnti1].tot_dl_sched_data[c]; + tot_ul_sched_data += tester.sched_stats->users[rnti1].tot_ul_sched_data[c]; } + TESTASSERT(tot_dl_sched_data > 0); + TESTASSERT(tot_ul_sched_data > 0); + log_global->info("[TESTER] Sim1 finished successfully\n"); return SRSLTE_SUCCESS; } diff --git a/srsenb/test/mac/scheduler_test_common.cc b/srsenb/test/mac/scheduler_test_common.cc index 49daa823a..7b95a3d20 100644 --- a/srsenb/test/mac/scheduler_test_common.cc +++ b/srsenb/test/mac/scheduler_test_common.cc @@ -247,7 +247,7 @@ int output_sched_tester::test_dci_values_consistency(const sched_interface::dl_s { for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) { const auto& pusch = ul_result.pusch[i]; - CONDERROR(pusch.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", pusch.tbs); + CONDERROR(pusch.tbs == 0, "Allocated PUSCH with invalid TBS=%d\n", pusch.tbs); // CONDERROR(ue_db.count(pusch.dci.rnti) == 0, "The allocated rnti=0x%x does not exist\n", pusch.dci.rnti); if (not pusch.needs_pdcch) { // In case of non-adaptive retx or Msg3 @@ -643,7 +643,9 @@ int ue_ctxt_test::test_harqs(cc_result result) // non-adaptive retx CONDERROR(pusch.dci.type2_alloc.riv != h.riv, "Non-adaptive retx must keep the same riv\n"); } - CONDERROR(sched_utils::get_rvidx(h.nof_retxs + 1) != (uint32_t)pusch.dci.tb.rv, "Invalid rv index for retx\n"); + if (pusch.tbs > 0) { + CONDERROR(sched_utils::get_rvidx(h.nof_retxs + 1) != (uint32_t)pusch.dci.tb.rv, "Invalid rv index for retx\n"); + } CONDERROR(h.ndi != pusch.dci.tb.ndi, "Invalid ndi for retx\n"); CONDERROR(not h.active, "retx for inactive UL harq pid=%d\n", h.pid); CONDERROR(h.tti_tx > current_tti_rx, "UL harq pid=%d was reused too soon\n", h.pid); diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 65b2fe9ec..2d693a58c 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -261,7 +261,6 @@ int sched_tester::test_pdcch_collisions() std::string mask_str = cc_result->pdcch_mask.to_string(); TESTERROR("The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), used_cce.to_string().c_str()); } - // TODO: Check postponed retxs // typedef std::map::iterator it_t;