sched,bug: change tbs/mcs derivation functions to forbid the use of alt cqi table for DCI format1A

This commit is contained in:
Francisco 2021-03-05 17:08:57 +00:00 committed by Francisco Paisana
parent 83f24fbf77
commit 77ac69796d
4 changed files with 54 additions and 55 deletions

View File

@ -89,11 +89,18 @@ private:
* TBS/MCS derivation
************************************************************/
tbs_info alloc_tbs_dl(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_re, int req_bytes = -1);
tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
uint32_t nof_prb,
uint32_t nof_re,
srslte_dci_format_t dci_format,
int req_bytes = -1);
tbs_info
alloc_tbs_ul(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_re, int req_bytes = -1, int explicit_mcs = -1);
cqi_to_tbs_ul(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_re, int req_bytes = -1, int explicit_mcs = -1);
int get_required_prb_dl(const sched_ue_cell& cell, tti_point tti_tx_dl, uint32_t req_bytes);
int get_required_prb_dl(const sched_ue_cell& cell,
tti_point tti_tx_dl,
srslte_dci_format_t dci_format,
uint32_t req_bytes);
uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes);
} // namespace srsenb

View File

@ -130,11 +130,9 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb,
bool ulqam64_enabled,
bool use_tbs_index_alt)
{
assert(req_bytes > 0);
// get max MCS/TBS that meets max coderate requirements
tbs_info tb_max = compute_mcs_and_tbs(nof_prb, nof_re, cqi, max_mcs, is_ul, ulqam64_enabled, use_tbs_index_alt);
if (tb_max.tbs_bytes + 8 <= (int)req_bytes or tb_max.mcs == 0) {
if (tb_max.tbs_bytes + 8 <= (int)req_bytes or tb_max.mcs == 0 or req_bytes <= 0) {
// if mcs cannot be lowered or a decrease in TBS index won't meet req_bytes requirement
return tb_max;
}

View File

@ -481,7 +481,7 @@ tbs_info sched_ue::compute_mcs_and_tbs(uint32_t enb_cc_idx,
uint32_t nof_re = cells[enb_cc_idx].cell_cfg->get_dl_nof_res(tti_tx_dl, dci, cfi);
// Compute MCS+TBS
tbs_info tb = alloc_tbs_dl(cells[enb_cc_idx], nof_alloc_prbs, nof_re, req_bytes.stop());
tbs_info tb = cqi_to_tbs_dl(cells[enb_cc_idx], nof_alloc_prbs, nof_re, dci.format, req_bytes.stop());
if (tb.tbs_bytes > 0 and tb.tbs_bytes < (int)req_bytes.start()) {
logger.info("SCHED: Could not get PRB allocation that avoids MAC CE or RLC SRB0 PDU segmentation");
@ -613,7 +613,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
uint32_t N_srs = 0;
uint32_t nof_symb = 2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs;
uint32_t nof_re = nof_symb * alloc.length() * SRSLTE_NRE;
tbinfo = alloc_tbs_ul(cells[enb_cc_idx], alloc.length(), nof_re, req_bytes);
tbinfo = cqi_to_tbs_ul(cells[enb_cc_idx], alloc.length(), nof_re, req_bytes);
// Reduce MCS to fit UCI if transmitted in this grant
if (uci_type != UCI_PUSCH_NONE) {
@ -632,7 +632,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data,
}
// Recompute again the MCS and TBS with the new spectral efficiency (based on the available RE for data)
if (nof_re >= nof_uci_re) {
tbinfo = alloc_tbs_ul(cells[enb_cc_idx], alloc.length(), nof_re - nof_uci_re, req_bytes);
tbinfo = cqi_to_tbs_ul(cells[enb_cc_idx], alloc.length(), nof_re - nof_uci_re, req_bytes);
}
// NOTE: if (nof_re < nof_uci_re) we should set TBS=0
}
@ -738,7 +738,7 @@ rbg_interval sched_ue::get_required_dl_rbgs(uint32_t enb_cc_idx)
if (req_bytes == srslte::interval<uint32_t>{0, 0}) {
return {0, 0};
}
int pending_prbs = get_required_prb_dl(cells[enb_cc_idx], to_tx_dl(current_tti), req_bytes.start());
int pending_prbs = get_required_prb_dl(cells[enb_cc_idx], to_tx_dl(current_tti), get_dci_format(), req_bytes.start());
if (pending_prbs < 0) {
// Cannot fit allocation in given PRBs
logger.error("SCHED: DL CQI=%d does now allow fitting %d non-segmentable DL tx bytes into the cell bandwidth. "
@ -748,8 +748,8 @@ rbg_interval sched_ue::get_required_dl_rbgs(uint32_t enb_cc_idx)
return {cellparams->nof_prb(), cellparams->nof_prb()};
}
uint32_t min_pending_rbg = cellparams->nof_prbs_to_rbgs(pending_prbs);
pending_prbs = get_required_prb_dl(cells[enb_cc_idx], to_tx_dl(current_tti), req_bytes.stop());
pending_prbs = (pending_prbs < 0) ? cellparams->nof_prb() : pending_prbs;
pending_prbs = get_required_prb_dl(cells[enb_cc_idx], to_tx_dl(current_tti), get_dci_format(), req_bytes.stop());
pending_prbs = (pending_prbs < 0) ? cellparams->nof_prb() : pending_prbs;
uint32_t max_pending_rbg = cellparams->nof_prbs_to_rbgs(pending_prbs);
return {min_pending_rbg, max_pending_rbg};
}

View File

@ -221,61 +221,52 @@ std::tuple<int, YType, int, YType> false_position_method(int x1, int x2, YType y
return false_position_method(x1, x2, y0, f, [](int x) { return false; });
}
/**
* In this scheduler we tend to use all the available bandwidth and select the MCS
* that approximates the minimum between the capacity and the requested rate
*/
tbs_info cqi_to_tbs(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_re, int req_bytes, bool is_ul)
tbs_info cqi_to_tbs_dl(const sched_ue_cell& cell,
uint32_t nof_prb,
uint32_t nof_re,
srslte_dci_format_t dci_format,
int req_bytes)
{
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
using ul64qam_cap = sched_interface::ue_cfg_t::ul64qam_cap;
uint32_t cqi = (is_ul) ? cell.ul_cqi : cell.dl_cqi;
uint32_t max_mcs = (is_ul) ? cell.max_mcs_ul : cell.max_mcs_dl;
bool ulqam64_enabled = is_ul and cell.get_ue_cfg()->support_ul64qam == ul64qam_cap::enabled;
bool use_tbs_index_alt = not is_ul and cell.get_ue_cfg()->use_tbs_index_alt;
bool use_tbs_index_alt = cell.get_ue_cfg()->use_tbs_index_alt and dci_format != SRSLTE_DCI_FORMAT1A;
tbs_info tb;
if (req_bytes <= 0) {
tb = compute_mcs_and_tbs(nof_prb, nof_re, cqi, max_mcs, is_ul, ulqam64_enabled, use_tbs_index_alt);
} else {
tb = compute_min_mcs_and_tbs_from_required_bytes(
nof_prb, nof_re, cqi, max_mcs, req_bytes, is_ul, ulqam64_enabled, use_tbs_index_alt);
}
// If coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm) we should set TBS=0. We don't because it's not correctly
// handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR
if (tb.tbs_bytes < 0) {
tb.mcs = 0;
tb.tbs_bytes = get_tbs_bytes((uint32_t)tb.mcs, nof_prb, use_tbs_index_alt, is_ul);
}
return tb;
}
tbs_info alloc_tbs_dl(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_re, int req_bytes)
{
tbs_info ret;
// Use a higher MCS for the Msg4 to fit in the 6 PRB case
if (cell.fixed_mcs_dl < 0 or not cell.dl_cqi_rx) {
// Dynamic MCS
ret = cqi_to_tbs(cell, nof_prb, nof_re, req_bytes, false);
ret = compute_min_mcs_and_tbs_from_required_bytes(
nof_prb, nof_re, cell.dl_cqi, cell.max_mcs_dl, req_bytes, false, false, use_tbs_index_alt);
// If coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm) we should set TBS=0. We don't because it's not correctly
// handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR
if (ret.tbs_bytes < 0) {
ret.mcs = 0;
ret.tbs_bytes = get_tbs_bytes((uint32_t)ret.mcs, nof_prb, use_tbs_index_alt, false);
}
} else {
// Fixed MCS
ret.mcs = cell.fixed_mcs_dl;
ret.tbs_bytes = get_tbs_bytes((uint32_t)cell.fixed_mcs_dl, nof_prb, cell.get_ue_cfg()->use_tbs_index_alt, false);
ret.tbs_bytes = get_tbs_bytes((uint32_t)cell.fixed_mcs_dl, nof_prb, use_tbs_index_alt, false);
}
return ret;
}
tbs_info alloc_tbs_ul(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_re, int req_bytes, int explicit_mcs)
tbs_info cqi_to_tbs_ul(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_re, int req_bytes, int explicit_mcs)
{
tbs_info ret;
int mcs = explicit_mcs >= 0 ? explicit_mcs : cell.fixed_mcs_ul;
using ul64qam_cap = sched_interface::ue_cfg_t::ul64qam_cap;
int mcs = explicit_mcs >= 0 ? explicit_mcs : cell.fixed_mcs_ul;
bool ulqam64_enabled = cell.get_ue_cfg()->support_ul64qam == ul64qam_cap::enabled;
tbs_info ret;
if (mcs < 0) {
// Dynamic MCS
ret = cqi_to_tbs(cell, nof_prb, nof_re, req_bytes, true);
ret = compute_min_mcs_and_tbs_from_required_bytes(
nof_prb, nof_re, cell.ul_cqi, cell.max_mcs_ul, req_bytes, true, ulqam64_enabled, false);
// If coderate > SRSLTE_MIN(max_coderate, 0.930 * Qm) we should set TBS=0. We don't because it's not correctly
// handled by the scheduler, but we might be scheduling undecodable codewords at very low SNR
if (ret.tbs_bytes < 0) {
ret.mcs = 0;
ret.tbs_bytes = get_tbs_bytes((uint32_t)ret.mcs, nof_prb, false, true);
}
} else {
// Fixed MCS
ret.mcs = mcs;
@ -285,11 +276,14 @@ tbs_info alloc_tbs_ul(const sched_ue_cell& cell, uint32_t nof_prb, uint32_t nof_
return ret;
}
int get_required_prb_dl(const sched_ue_cell& cell, tti_point tti_tx_dl, uint32_t req_bytes)
int get_required_prb_dl(const sched_ue_cell& cell,
tti_point tti_tx_dl,
srslte_dci_format_t dci_format,
uint32_t req_bytes)
{
auto compute_tbs_approx = [tti_tx_dl, &cell](uint32_t nof_prb) {
auto compute_tbs_approx = [tti_tx_dl, &cell, dci_format](uint32_t nof_prb) {
uint32_t nof_re = cell.cell_cfg->get_dl_lb_nof_re(tti_tx_dl, nof_prb);
tbs_info tb = alloc_tbs_dl(cell, nof_prb, nof_re, -1);
tbs_info tb = cqi_to_tbs_dl(cell, nof_prb, nof_re, dci_format, -1);
return tb.tbs_bytes;
};
@ -308,7 +302,7 @@ uint32_t get_required_prb_ul(const sched_ue_cell& cell, uint32_t req_bytes)
auto compute_tbs_approx = [&cell](uint32_t nof_prb) {
const uint32_t N_srs = 0;
uint32_t nof_re = (2 * (SRSLTE_CP_NSYMB(cell.cell_cfg->cfg.cell.cp) - 1) - N_srs) * nof_prb * SRSLTE_NRE;
return alloc_tbs_ul(cell, nof_prb, nof_re, -1).tbs_bytes;
return cqi_to_tbs_ul(cell, nof_prb, nof_re, -1).tbs_bytes;
};
// find nof prbs that lead to a tbs just above req_bytes