From bcbb08ebae9940cdcc407d2ec03cee6a0f27bdd0 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Tue, 10 Mar 2020 15:00:03 +0000 Subject: [PATCH] use initial_dl_cqi for first DL tx. --- srsenb/hdr/stack/mac/scheduler_ue.h | 5 +-- srsenb/src/stack/mac/scheduler_metric.cc | 2 +- srsenb/src/stack/mac/scheduler_ue.cc | 43 +++++++++++++++--------- srsenb/test/mac/scheduler_ca_test.cc | 12 +++---- 4 files changed, 37 insertions(+), 25 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler_ue.h b/srsenb/hdr/stack/mac/scheduler_ue.h index 3485c1bab..d5e2fe03c 100644 --- a/srsenb/hdr/stack/mac/scheduler_ue.h +++ b/srsenb/hdr/stack/mac/scheduler_ue.h @@ -50,7 +50,7 @@ struct sched_ue_carrier { uint32_t get_required_prb_ul(uint32_t req_bytes); const sched_cell_params_t* get_cell_cfg() const { return cell_params; } bool is_active() const { return active; } - void update_cell_activity(); + void set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi); harq_entity harq_ent; @@ -62,6 +62,7 @@ struct sched_ue_carrier { uint32_t dl_cqi_tti = 0; uint32_t ul_cqi = 1; uint32_t ul_cqi_tti = 0; + bool dl_cqi_rx = false; int max_mcs_dl = 28, max_mcs_ul = 28; uint32_t max_aggr_level = 3; @@ -131,7 +132,7 @@ public: uint32_t get_required_prb_ul(uint32_t cc_idx, uint32_t req_bytes); - std::pair get_requested_dl_rbgs(uint32_t ue_cc_idx, uint32_t nof_ctrl_symbols); + std::pair get_required_dl_rbgs(uint32_t ue_cc_idx, uint32_t nof_ctrl_symbols); std::pair get_requested_dl_bytes(uint32_t ue_cc_idx); uint32_t get_pending_dl_new_data(); uint32_t get_pending_ul_new_data(uint32_t tti); diff --git a/srsenb/src/stack/mac/scheduler_metric.cc b/srsenb/src/stack/mac/scheduler_metric.cc index 92d63e7be..efc5f8406 100644 --- a/srsenb/src/stack/mac/scheduler_metric.cc +++ b/srsenb/src/stack/mac/scheduler_metric.cc @@ -133,7 +133,7 @@ dl_harq_proc* dl_metric_rr::allocate_user(sched_ue* user) h = user->get_empty_dl_harq(tti_dl, cell_idx); if (h != nullptr) { // Allocate resources based on pending data - std::pair req_rbgs = user->get_requested_dl_rbgs(cell_idx, tti_alloc->get_nof_ctrl_symbols()); + std::pair req_rbgs = user->get_required_dl_rbgs(cell_idx, tti_alloc->get_nof_ctrl_symbols()); if (req_rbgs.first > 0) { rbgmask_t newtx_mask(tti_alloc->get_dl_mask().size()); if (find_allocation(req_rbgs.first, req_rbgs.second, &newtx_mask)) { diff --git a/srsenb/src/stack/mac/scheduler_ue.cc b/srsenb/src/stack/mac/scheduler_ue.cc index b6a134de7..8d578b69c 100644 --- a/srsenb/src/stack/mac/scheduler_ue.cc +++ b/srsenb/src/stack/mac/scheduler_ue.cc @@ -44,7 +44,8 @@ namespace srsenb { namespace sched_utils { -constexpr uint32_t conres_ce_size = 6; +const uint32_t initial_dl_cqi = 5; +const uint32_t conres_ce_size = 6; //! Obtains TB size *in bytes* for a given MCS and N_{PRB} uint32_t get_tbs_bytes(uint32_t mcs, uint32_t nof_alloc_prb, bool is_ul) @@ -329,9 +330,7 @@ void sched_ue::set_dl_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi) { auto p = get_cell_index(enb_cc_idx); if (p.second != std::numeric_limits::max()) { - carriers[p.second].dl_cqi = cqi; - carriers[p.second].dl_cqi_tti = tti; - carriers[p.second].update_cell_activity(); + carriers[p.second].set_dl_cqi(tti, cqi); } else { log_h->warning("Received DL CQI for invalid cell index %d\n", enb_cc_idx); } @@ -534,14 +533,13 @@ std::pair sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_i uint32_t nof_re = srslte_ra_dl_grant_nof_re(&carriers[ue_cc_idx].get_cell_cfg()->cfg.cell, &dl_sf, &grant); // Compute MCS+TBS - mcs = carriers[ue_cc_idx].fixed_mcs_dl; // Use a higher MCS for the Msg4 to fit in the 6 PRB case - if (mcs < 0) { + if (carriers[ue_cc_idx].fixed_mcs_dl < 0 or not carriers[ue_cc_idx].dl_cqi_rx) { // Dynamic MCS tbs_bytes = carriers[ue_cc_idx].alloc_tbs_dl(nof_alloc_prbs, nof_re, req_bytes.second, &mcs); } else { // Fixed MCS - tbs_bytes = sched_utils::get_tbs_bytes((uint32_t)mcs, nof_alloc_prbs, false); + tbs_bytes = sched_utils::get_tbs_bytes((uint32_t)carriers[ue_cc_idx].fixed_mcs_dl, nof_alloc_prbs, false); } // If the number of prbs is not sufficient to fit minimum required bytes, increase the mcs @@ -818,15 +816,24 @@ uint32_t sched_ue::get_pending_dl_new_data_total() return req_bytes; } -std::pair sched_ue::get_requested_dl_rbgs(uint32_t ue_cc_idx, uint32_t nof_ctrl_symbols) +std::pair sched_ue::get_required_dl_rbgs(uint32_t ue_cc_idx, uint32_t nof_ctrl_symbols) { std::pair req_bytes = get_requested_dl_bytes(ue_cc_idx); if (req_bytes.first == 0 and req_bytes.second == 0) { return {0, 0}; } - uint32_t pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.first, nof_ctrl_symbols); + uint32_t pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.first, nof_ctrl_symbols); + if (pending_prbs > carriers[ue_cc_idx].get_cell_cfg()->nof_prb()) { + // Cannot fit allocation in given PRBs + log_h->error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. " + "Consider increasing initial CQI value.\n", + carriers[ue_cc_idx].dl_cqi, + req_bytes.first); + return {pending_prbs, pending_prbs}; + } uint32_t min_pending_rbg = (*cell_params_list)[cfg.supported_cc_list[ue_cc_idx].enb_cc_idx].prb_to_rbg(pending_prbs); pending_prbs = carriers[ue_cc_idx].get_required_prb_dl(req_bytes.second, nof_ctrl_symbols); + pending_prbs = std::min(pending_prbs, carriers[ue_cc_idx].get_cell_cfg()->nof_prb()); uint32_t max_pending_rbg = (*cell_params_list)[cfg.supported_cc_list[ue_cc_idx].enb_cc_idx].prb_to_rbg(pending_prbs); return {min_pending_rbg, max_pending_rbg}; } @@ -873,7 +880,7 @@ std::pair sched_ue::get_requested_dl_bytes(uint32_t ue_cc_id srb0_data = compute_sdu_total_bytes(0, lch[0].buf_retx); srb0_data += compute_sdu_total_bytes(0, lch[0].buf_tx); if (conres_ce_pending) { - sum_ce_data = sched_utils::conres_ce_size + 1; + sum_ce_data = sched_utils::conres_ce_size + ce_subheader_size; } } // Add pending CEs @@ -1184,8 +1191,9 @@ sched_ue_carrier::sched_ue_carrier(const sched_interface::ue_cfg_t& cfg_, harq_ent(SCHED_MAX_HARQ_PROC, SCHED_MAX_HARQ_PROC) { // only PCell starts active. Remaining ones wait for valid CQI - active = ue_cc_idx == 0; - dl_cqi = (ue_cc_idx == 0) ? 1 : 0; + active = ue_cc_idx == 0; + dl_cqi_rx = false; + dl_cqi = (ue_cc_idx == 0) ? sched_utils::initial_dl_cqi : 0; // set max mcs max_mcs_ul = cell_params->sched_cfg->pusch_max_mcs >= 0 ? cell_params->sched_cfg->pusch_max_mcs : 28; @@ -1314,7 +1322,7 @@ uint32_t sched_ue_carrier::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ uint32_t n; for (n = 0; n < cell_params->nof_prb() and nbytes < req_bytes; ++n) { nof_re = srslte_ra_dl_approx_nof_re(&cell_params->cfg.cell, n + 1, nof_ctrl_symbols); - if (fixed_mcs_dl < 0) { + if (fixed_mcs_dl < 0 or not dl_cqi_rx) { tbs = alloc_tbs_dl(n + 1, nof_re, 0, &mcs); } else { tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl, false), n + 1) / 8; @@ -1326,7 +1334,7 @@ uint32_t sched_ue_carrier::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ } } - return n; + return (nbytes >= req_bytes) ? n : std::numeric_limits::max(); } uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes) @@ -1360,10 +1368,13 @@ uint32_t sched_ue_carrier::get_required_prb_ul(uint32_t req_bytes) return n; } -void sched_ue_carrier::update_cell_activity() +void sched_ue_carrier::set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi_) { + dl_cqi = dl_cqi_; + dl_cqi_tti = tti_tx_dl; + dl_cqi_rx = dl_cqi_rx or dl_cqi > 0; if (ue_cc_idx > 0 and active != cfg->supported_cc_list[ue_cc_idx].active) { - if (dl_cqi > 0) { + if (dl_cqi_rx) { active = cfg->supported_cc_list[ue_cc_idx].active; log_h->info("SCell index=%d is now %s\n", ue_cc_idx, active ? "active" : "inactive"); } diff --git a/srsenb/test/mac/scheduler_ca_test.cc b/srsenb/test/mac/scheduler_ca_test.cc index a901f66be..be40fe958 100644 --- a/srsenb/test/mac/scheduler_ca_test.cc +++ b/srsenb/test/mac/scheduler_ca_test.cc @@ -142,21 +142,21 @@ int test_scell_activation(test_scell_activation_params params) tester.test_next_ttis(generator.tti_events); // Event (20 TTIs): Data back and forth - auto generate_data = [&](uint32_t nof_ttis, float prob_dl, float prob_ul) { + auto generate_data = [&](uint32_t nof_ttis, float prob_dl, float prob_ul, float rand_exp) { for (uint32_t i = 0; i < nof_ttis; ++i) { generator.step_tti(); bool ul_flag = randf() < prob_ul, dl_flag = randf() < prob_dl; if (dl_flag) { - float exp = dl_data_exps[0] + randf() * (dl_data_exps[1] - dl_data_exps[0]); + float exp = dl_data_exps[0] + rand_exp * (dl_data_exps[1] - dl_data_exps[0]); generator.add_dl_data(rnti1, pow(10, exp)); } if (ul_flag) { - float exp = ul_sr_exps[0] + randf() * (ul_sr_exps[1] - ul_sr_exps[0]); + float exp = ul_sr_exps[0] + rand_exp * (ul_sr_exps[1] - ul_sr_exps[0]); generator.add_ul_data(rnti1, pow(10, exp)); } } }; - generate_data(20, P_dl, P_ul_sr); + generate_data(20, P_dl, P_ul_sr, randf()); tester.test_next_ttis(generator.tti_events); // Event: Reconf Complete. Activate SCells. Check if CE correctly transmitted @@ -196,7 +196,7 @@ int test_scell_activation(test_scell_activation_params params) // Event: Generate a bit more data, it should *not* go through SCells until we send a CQI tester.dl_cqi_info(tester.tti_info.tti_params.tti_rx, rnti1, 1, cqi); - generate_data(5, P_dl, P_ul_sr); + generate_data(5, P_dl, P_ul_sr, randf()); tester.test_next_ttis(generator.tti_events); TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[params.pcell_idx] > 0); TESTASSERT(tester.sched_stats->users[rnti1].tot_ul_sched_data[params.pcell_idx] > 0); @@ -210,7 +210,7 @@ int test_scell_activation(test_scell_activation_params params) for (uint32_t i = 1; i < cc_idxs.size(); ++i) { tester.dl_cqi_info(tester.tti_info.tti_params.tti_rx, rnti1, cc_idxs[i], cqi); } - generate_data(10, 1.0, 1.0); + generate_data(10, 1.0, 1.0, 1.0); tester.test_next_ttis(generator.tti_events); for (const auto& c : cc_idxs) { TESTASSERT(tester.sched_stats->users[rnti1].tot_dl_sched_data[c] > 0);