collapse the 2-stage DCI generation process of SIB/Paging/RAR into one single stage in the scheduler.

This commit is contained in:
Francisco 2021-03-14 23:39:52 +00:00 committed by Francisco Paisana
parent 3a4ae3d69d
commit 76103065f7
8 changed files with 308 additions and 211 deletions

View File

@ -119,7 +119,7 @@ private:
const sched_cell_params_t* cc_cfg = nullptr;
sched_ue_list* ue_db = nullptr;
std::deque<sf_sched::pending_rar_t> pending_rars;
std::deque<pending_rar_t> pending_rars;
uint32_t rar_aggr_level = 2;
static const uint32_t PRACH_RAR_OFFSET = 3; // TS 36.321 Sec. 5.1.4
};

View File

@ -150,9 +150,7 @@ public:
struct ctrl_alloc_t {
size_t dci_idx;
rbg_interval rbg_range;
uint16_t rnti;
uint32_t req_bytes;
alloc_type_t alloc_type;
};
struct rar_alloc_t {
sf_sched::ctrl_alloc_t alloc_data;
@ -161,8 +159,7 @@ public:
{}
};
struct bc_alloc_t : public ctrl_alloc_t {
uint32_t rv = 0;
uint32_t sib_idx = 0;
sched_interface::dl_sched_bc_t bc_grant;
bc_alloc_t() = default;
explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {}
};
@ -189,12 +186,6 @@ public:
uint32_t n_prb = 0;
uint32_t mcs = 0;
};
struct pending_rar_t {
uint16_t ra_rnti = 0;
tti_point prach_tti{};
uint32_t nof_grants = 0;
sched_interface::dl_sched_rar_info_t msg3_grant[sched_interface::MAX_RAR_LIST] = {};
};
typedef std::pair<alloc_outcome_t, const ctrl_alloc_t> ctrl_code_t;
// Control/Configuration Methods
@ -236,7 +227,6 @@ public:
private:
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
int generate_format1a(prb_interval prb_range, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci);
void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result);
void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,

View File

@ -13,7 +13,8 @@
#ifndef SRSLTE_SCHED_DCI_H
#define SRSLTE_SCHED_DCI_H
#include <cstdint>
#include "../sched_common.h"
#include "srslte/adt/bounded_vector.h"
namespace srsenb {
@ -59,6 +60,46 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb,
bool ulqam64_enabled,
bool use_tbs_index_alt);
struct pending_rar_t {
uint16_t ra_rnti = 0;
tti_point prach_tti{};
srslte::bounded_vector<sched_interface::dl_sched_rar_info_t, sched_interface::MAX_RAR_LIST> msg3_grant = {};
};
bool generate_sib_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t sib_idx,
uint32_t sib_ntx,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi);
bool generate_paging_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t req_bytes,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi);
bool generate_rar_dci(sched_interface::dl_sched_rar_t& rar,
tti_point tti_tx_dl,
const pending_rar_t& pending_rar,
rbg_interval rbg_range,
uint32_t nof_grants,
uint32_t start_msg3_prb,
const sched_cell_params_t& cell_params,
uint32_t current_cfi);
void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params);
void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params);
void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rbg_range);
} // namespace srsenb
#endif // SRSLTE_SCHED_DCI_H

View File

@ -144,7 +144,7 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
rar_aggr_level = 2;
while (not pending_rars.empty()) {
sf_sched::pending_rar_t& rar = pending_rars.front();
pending_rar_t& rar = pending_rars.front();
// Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit
srslte::tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET,
@ -179,13 +179,13 @@ void ra_sched::dl_sched(sf_sched* tti_sched)
}
uint32_t nof_rar_allocs = ret.second;
if (nof_rar_allocs == rar.nof_grants) {
if (nof_rar_allocs == rar.msg3_grant.size()) {
// all RAR grants were allocated. Remove pending RAR
pending_rars.pop_front();
} else {
// keep the RAR grants that were not scheduled, so we can schedule in next TTI
std::copy(&rar.msg3_grant[nof_rar_allocs], &rar.msg3_grant[rar.nof_grants], &rar.msg3_grant[0]);
rar.nof_grants -= nof_rar_allocs;
std::copy(rar.msg3_grant.begin() + nof_rar_allocs, rar.msg3_grant.end(), rar.msg3_grant.begin());
rar.msg3_grant.resize(rar.msg3_grant.size() - nof_rar_allocs);
}
}
}
@ -204,24 +204,22 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
uint16_t ra_rnti = 1 + (uint16_t)(rar_info.prach_tti % 10u);
// find pending rar with same RA-RNTI
for (sf_sched::pending_rar_t& r : pending_rars) {
for (pending_rar_t& r : pending_rars) {
if (r.prach_tti.to_uint() == rar_info.prach_tti and ra_rnti == r.ra_rnti) {
if (r.nof_grants >= sched_interface::MAX_RAR_LIST) {
if (r.msg3_grant.size() >= sched_interface::MAX_RAR_LIST) {
logger.warning("PRACH ignored, as the the maximum number of RAR grants per tti has been reached");
return SRSLTE_ERROR;
}
r.msg3_grant[r.nof_grants] = rar_info;
r.nof_grants++;
r.msg3_grant.push_back(rar_info);
return SRSLTE_SUCCESS;
}
}
// create new RAR
sf_sched::pending_rar_t p;
pending_rar_t p;
p.ra_rnti = ra_rnti;
p.prach_tti = tti_point{rar_info.prach_tti};
p.nof_grants = 1;
p.msg3_grant[0] = rar_info;
p.msg3_grant.push_back(rar_info);
pending_rars.push_back(p);
return SRSLTE_SUCCESS;

View File

@ -14,8 +14,6 @@
#include "srsenb/hdr/stack/mac/sched_helpers.h"
#include "srslte/common/string_helpers.h"
using srslte::tti_point;
namespace srsenb {
const char* alloc_outcome_t::to_string() const
@ -401,9 +399,7 @@ sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_by
// Allocation Successful
ctrl_alloc.dci_idx = tti_alloc.get_pdcch_grid().nof_allocs() - 1;
ctrl_alloc.rbg_range = ret.rbg_range;
ctrl_alloc.rnti = rnti;
ctrl_alloc.req_bytes = tbs_bytes;
ctrl_alloc.alloc_type = alloc_type;
return {ret.outcome, ctrl_alloc};
}
@ -424,8 +420,13 @@ alloc_outcome_t sf_sched::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t
// BC allocation successful
bc_alloc_t bc_alloc(ret.second);
bc_alloc.rv = rv;
bc_alloc.sib_idx = sib_idx;
if (not generate_sib_dci(
bc_alloc.bc_grant, get_tti_tx_dl(), sib_idx, sib_ntx, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) {
logger.warning("SCHED: FAIL");
return alloc_outcome_t::ERROR;
}
bc_allocs.push_back(bc_alloc);
return ret.first;
@ -446,6 +447,13 @@ alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payloa
// Paging allocation successful
bc_alloc_t bc_alloc(ret.second);
if (not generate_paging_dci(
bc_alloc.bc_grant, get_tti_tx_dl(), paging_payload, ret.second.rbg_range, *cc_cfg, tti_alloc.get_cfi())) {
logger.warning("SCHED: FAIL");
return alloc_outcome_t::ERROR;
}
bc_allocs.push_back(bc_alloc);
return ret.first;
@ -460,7 +468,7 @@ std::pair<alloc_outcome_t, uint32_t> sf_sched::alloc_rar(uint32_t aggr_lvl, cons
return ret;
}
for (uint32_t nof_grants = rar.nof_grants; nof_grants > 0; nof_grants--) {
for (uint32_t nof_grants = rar.msg3_grant.size(); nof_grants > 0; nof_grants--) {
uint32_t buf_rar = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff
uint32_t total_msg3_size = msg3_grant_size * nof_grants;
@ -475,35 +483,30 @@ std::pair<alloc_outcome_t, uint32_t> sf_sched::alloc_rar(uint32_t aggr_lvl, cons
ret.first = ret2.first.result;
ret.second = nof_grants;
// if there was no space for the RAR, try again
if (ret.first == alloc_outcome_t::RB_COLLISION) {
continue;
if (ret.first == alloc_outcome_t::SUCCESS) {
sched_interface::dl_sched_rar_t rar_grant;
if (generate_rar_dci(rar_grant,
get_tti_tx_dl(),
rar,
ret2.second.rbg_range,
nof_grants,
last_msg3_prb,
*cc_cfg,
tti_alloc.get_cfi())) {
// RAR allocation successful
rar_allocs.emplace_back(ret2.second, rar_grant);
last_msg3_prb += msg3_grant_size * nof_grants;
return ret;
}
// if any other error, return
if (ret.first != alloc_outcome_t::SUCCESS) {
} else if (ret.first != alloc_outcome_t::RB_COLLISION) {
logger.warning("SCHED: Could not allocate RAR for L=%d, cause=%s", aggr_lvl, ret.first.to_string());
return ret;
}
// RAR allocation successful
sched_interface::dl_sched_rar_t rar_grant = {};
rar_grant.msg3_grant.resize(nof_grants);
for (uint32_t i = 0; i < nof_grants; ++i) {
rar_grant.msg3_grant[i].data = rar.msg3_grant[i];
rar_grant.msg3_grant[i].grant.tpc_pusch = 3;
rar_grant.msg3_grant[i].grant.trunc_mcs = 0;
uint32_t rba = srslte_ra_type2_to_riv(msg3_grant_size, last_msg3_prb, cc_cfg->cfg.cell.nof_prb);
rar_grant.msg3_grant[i].grant.rba = rba;
last_msg3_prb += msg3_grant_size;
// if there was no space for the RAR, try again with a lower number of grants
}
rar_allocs.emplace_back(ret2.second, rar_grant);
break;
}
if (ret.first != alloc_outcome_t::SUCCESS) {
logger.info("SCHED: RAR allocation postponed due to lack of RBs");
}
return ret;
}
@ -652,7 +655,7 @@ alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc)
bool has_retx = h->has_pending_retx();
if (not has_retx) {
alloc_type = ul_alloc_t::NEWTX;
} else if (h->retx_requires_pdcch(tti_point{get_tti_tx_ul()}, alloc)) {
} else if (h->retx_requires_pdcch(get_tti_tx_ul(), alloc)) {
alloc_type = ul_alloc_t::ADAPT_RETX;
} else {
alloc_type = ul_alloc_t::NOADAPT_RETX;
@ -694,68 +697,12 @@ void sf_sched::set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
for (const auto& bc_alloc : bc_allocs) {
sched_interface::dl_sched_bc_t* bc = &dl_result->bc[dl_result->nof_bc_elems];
*bc = bc_alloc.bc_grant;
// assign NCCE/L
bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_interval prb_range = prb_interval::rbgs_to_prbs(bc_alloc.rbg_range, cc_cfg->nof_prb());
int tbs = generate_format1a(prb_range, bc_alloc.req_bytes, bc_alloc.rv, bc_alloc.rnti, &bc->dci);
// Setup BC/Paging processes
if (bc_alloc.alloc_type == alloc_type_t::DL_BC) {
if (tbs <= (int)bc_alloc.req_bytes) {
logger.warning("SCHED: Error SIB%d, rbgs=(%d,%d), dci=(%d,%d), len=%d",
bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.start(),
bc_alloc.rbg_range.stop(),
bc->dci.location.L,
bc->dci.location.ncce,
bc_alloc.req_bytes);
continue;
}
// Setup BC process
bc->index = bc_alloc.sib_idx;
bc->type = sched_interface::dl_sched_bc_t::BCCH;
bc->tbs = (uint32_t)bc_alloc.req_bytes;
logger.debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d",
bc_alloc.sib_idx + 1,
bc_alloc.rbg_range.start(),
bc_alloc.rbg_range.stop(),
bc->dci.location.L,
bc->dci.location.ncce,
bc_alloc.rv,
bc_alloc.req_bytes,
cc_cfg->cfg.sibs[bc_alloc.sib_idx].period_rf,
bc->dci.tb[0].mcs_idx);
} else {
// Paging
if (tbs <= 0) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", bc_alloc.rbg_range);
logger.warning("SCHED: Error Paging, rbgs=%s, dci=(%d,%d)",
srslte::to_c_str(str_buffer),
bc->dci.location.L,
bc->dci.location.ncce);
continue;
}
// Setup Paging process
bc->type = sched_interface::dl_sched_bc_t::PCCH;
bc->tbs = (uint32_t)tbs;
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", bc_alloc.rbg_range);
logger.info("SCHED: PCH, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d",
srslte::to_c_str(str_buffer),
bc->dci.location.L,
bc->dci.location.ncce,
tbs,
bc->dci.tb[0].mcs_idx);
}
dl_result->nof_bc_elems++;
log_broadcast_allocation(*bc, bc_alloc.rbg_range, *cc_cfg);
}
}
@ -765,45 +712,13 @@ void sf_sched::set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_
for (const auto& rar_alloc : rar_allocs) {
sched_interface::dl_sched_rar_t* rar = &dl_result->rar[dl_result->nof_rar_elems];
// Setup RAR process
*rar = rar_alloc.rar_grant;
// Assign NCCE/L
rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos;
/* Generate DCI format1A */
prb_interval prb_range = prb_interval::rbgs_to_prbs(rar_alloc.alloc_data.rbg_range, cc_cfg->nof_prb());
int tbs = generate_format1a(prb_range, rar_alloc.alloc_data.req_bytes, 0, rar_alloc.alloc_data.rnti, &rar->dci);
if (tbs <= 0) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", rar_alloc.alloc_data.rbg_range);
logger.warning("SCHED: Error RAR, ra_rnti_idx=%d, rbgs=%s, dci=(%d,%d)",
rar_alloc.alloc_data.rnti,
srslte::to_c_str(str_buffer),
rar->dci.location.L,
rar->dci.location.ncce);
continue;
}
// Setup RAR process
rar->tbs = rar_alloc.alloc_data.req_bytes;
rar->msg3_grant = rar_alloc.rar_grant.msg3_grant;
// Print RAR allocation result
for (uint32_t i = 0; i < rar->msg3_grant.size(); ++i) {
const auto& msg3_grant = rar->msg3_grant[i];
uint16_t expected_rnti = msg3_grant.data.temp_crnti;
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", rar_alloc.alloc_data.rbg_range);
logger.info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=%s, dci=(%d,%d), rar_grant_rba=%d, "
"rar_grant_mcs=%d",
expected_rnti,
rar_alloc.alloc_data.rnti,
srslte::to_c_str(str_buffer),
rar->dci.location.L,
rar->dci.location.ncce,
msg3_grant.grant.rba,
msg3_grant.grant.trunc_mcs);
}
dl_result->nof_rar_elems++;
log_rar_allocation(*rar, rar_alloc.alloc_data.rbg_range);
}
}
@ -1093,53 +1008,4 @@ uint32_t sf_sched::get_nof_ctrl_symbols() const
return tti_alloc.get_cfi() + ((cc_cfg->cfg.cell.nof_prb <= 10) ? 1 : 0);
}
int sf_sched::generate_format1a(prb_interval prb_range,
uint32_t tbs_bytes,
uint32_t rv,
uint16_t rnti,
srslte_dci_dl_t* dci)
{
/* Calculate I_tbs for this TBS */
int tbs = tbs_bytes * 8;
int i;
int mcs = -1;
for (i = 0; i < 27; i++) {
if (srslte_ra_tbs_from_idx(i, 2) >= tbs) {
dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 2);
break;
}
if (srslte_ra_tbs_from_idx(i, 3) >= tbs) {
dci->type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 3);
break;
}
}
if (i == 28) {
logger.error("Can't allocate Format 1A for TBS=%d", tbs);
return -1;
}
logger.debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d",
srslte_ra_tbs_from_idx(mcs, 2),
srslte_ra_tbs_from_idx(mcs, 3),
tbs_bytes,
tbs,
mcs);
dci->alloc_type = SRSLTE_RA_ALLOC_TYPE2;
dci->type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC;
dci->type2_alloc.riv = srslte_ra_type2_to_riv(prb_range.length(), prb_range.start(), cc_cfg->cfg.cell.nof_prb);
dci->pid = 0;
dci->tb[0].mcs_idx = mcs;
dci->tb[0].rv = rv;
dci->format = SRSLTE_DCI_FORMAT1A;
dci->rnti = rnti;
dci->ue_cc_idx = std::numeric_limits<uint32_t>::max();
return tbs;
}
} // namespace srsenb

View File

@ -383,8 +383,6 @@ sched_cell_params_t::get_dl_nof_res(srslte::tti_point tti_tx_dl, const srslte_dc
}
}
// sanity check
assert(nof_re == srslte_ra_dl_grant_nof_re(&cfg.cell, &dl_sf, &grant));
return nof_re;
}

View File

@ -12,11 +12,16 @@
#include "srsenb/hdr/stack/mac/sched_phy_ch/sched_dci.h"
#include "srsenb/hdr/stack/mac/sched_common.h"
#include "srsenb/hdr/stack/mac/sched_helpers.h"
#include "srslte/common/string_helpers.h"
#include <cmath>
#include <cstdint>
namespace srsenb {
static srslog::basic_logger& logger = srslog::fetch_basic_logger("MAC");
/// Compute max TBS based on max coderate
int coderate_to_tbs(float max_coderate, uint32_t nof_re)
{
@ -162,4 +167,203 @@ tbs_info compute_min_mcs_and_tbs_from_required_bytes(uint32_t nof_prb,
return tb_max;
}
int generate_ra_bc_dci_format1a_common(srslte_dci_dl_t& dci,
uint16_t rnti,
tti_point tti_tx_dl,
uint32_t req_bytes,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
static const uint32_t Qm = 2;
// Calculate I_tbs for this TBS
int tbs = static_cast<int>(req_bytes) * 8;
int mcs = -1;
for (uint32_t i = 0; i < 27; i++) {
if (srslte_ra_tbs_from_idx(i, 2) >= tbs) {
dci.type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_2;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 2);
break;
}
if (srslte_ra_tbs_from_idx(i, 3) >= tbs) {
dci.type2_alloc.n_prb1a = srslte_ra_type2_t::SRSLTE_RA_TYPE2_NPRB1A_3;
mcs = i;
tbs = srslte_ra_tbs_from_idx(i, 3);
break;
}
}
if (mcs < 0) {
// logger.error("Can't allocate Format 1A for TBS=%d", tbs);
return -1;
}
// Generate remaining DCI Format1A content
dci.alloc_type = SRSLTE_RA_ALLOC_TYPE2;
dci.type2_alloc.mode = srslte_ra_type2_t::SRSLTE_RA_TYPE2_LOC;
prb_interval prb_range = prb_interval::rbgs_to_prbs(rbg_range, cell_params.nof_prb());
dci.type2_alloc.riv = srslte_ra_type2_to_riv(prb_range.length(), prb_range.start(), cell_params.nof_prb());
dci.pid = 0;
dci.tb[0].mcs_idx = mcs;
dci.tb[0].rv = 0; // used for SIBs
dci.format = SRSLTE_DCI_FORMAT1A;
dci.rnti = rnti;
dci.ue_cc_idx = std::numeric_limits<uint32_t>::max();
// Compute effective code rate and verify it doesn't exceed max code rate
uint32_t nof_re = cell_params.get_dl_nof_res(tti_tx_dl, dci, current_cfi);
if (srslte_coderate(tbs, nof_re) >= 0.93F * Qm) {
return -1;
}
logger.debug("ra_tbs=%d/%d, tbs_bytes=%d, tbs=%d, mcs=%d",
srslte_ra_tbs_from_idx(mcs, 2),
srslte_ra_tbs_from_idx(mcs, 3),
req_bytes,
tbs,
mcs);
return tbs;
}
bool generate_sib_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t sib_idx,
uint32_t sib_ntx,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
bc = {};
int tbs_bits = generate_ra_bc_dci_format1a_common(
bc.dci, SRSLTE_SIRNTI, tti_tx_dl, cell_params.cfg.sibs[sib_idx].len, rbg_range, cell_params, current_cfi);
if (tbs_bits < 0) {
return false;
}
// generate SIB-specific fields
bc.index = sib_idx;
bc.type = sched_interface::dl_sched_bc_t::BCCH;
// bc.tbs = sib_len;
bc.tbs = tbs_bits / 8;
bc.dci.tb[0].rv = get_rvidx(sib_ntx);
return true;
}
bool generate_paging_dci(sched_interface::dl_sched_bc_t& bc,
tti_point tti_tx_dl,
uint32_t req_bytes,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
bc = {};
int tbs_bits = generate_ra_bc_dci_format1a_common(
bc.dci, SRSLTE_PRNTI, tti_tx_dl, req_bytes, rbg_range, cell_params, current_cfi);
if (tbs_bits < 0) {
return false;
}
// generate Paging-specific fields
bc.type = sched_interface::dl_sched_bc_t::PCCH;
bc.tbs = tbs_bits / 8;
return true;
}
bool generate_rar_dci(sched_interface::dl_sched_rar_t& rar,
tti_point tti_tx_dl,
const pending_rar_t& pending_rar,
rbg_interval rbg_range,
uint32_t nof_grants,
uint32_t start_msg3_prb,
const sched_cell_params_t& cell_params,
uint32_t current_cfi)
{
const uint32_t msg3_Lcrb = 3;
uint32_t req_bytes = 7 * nof_grants + 1; // 1+6 bytes per RAR subheader+body and 1 byte for Backoff
rar = {};
int tbs_bits = generate_ra_bc_dci_format1a_common(
rar.dci, pending_rar.ra_rnti, tti_tx_dl, req_bytes, rbg_range, cell_params, current_cfi);
if (tbs_bits < 0) {
return false;
}
rar.msg3_grant.resize(nof_grants);
for (uint32_t i = 0; i < nof_grants; ++i) {
rar.msg3_grant[i].data = pending_rar.msg3_grant[i];
rar.msg3_grant[i].grant.tpc_pusch = 3;
rar.msg3_grant[i].grant.trunc_mcs = 0;
rar.msg3_grant[i].grant.rba = srslte_ra_type2_to_riv(msg3_Lcrb, start_msg3_prb, cell_params.nof_prb());
start_msg3_prb += msg3_Lcrb;
}
// rar.tbs = tbs_bits / 8;
rar.tbs = req_bytes;
return true;
}
void log_broadcast_allocation(const sched_interface::dl_sched_bc_t& bc,
rbg_interval rbg_range,
const sched_cell_params_t& cell_params)
{
if (not logger.info.enabled()) {
return;
}
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", rbg_range);
if (bc.type == sched_interface::dl_sched_bc_t::bc_type::BCCH) {
logger.debug("SCHED: SIB%d, rbgs=(%d,%d), dci=(%d,%d), rv=%d, len=%d, period=%d, mcs=%d",
bc.index + 1,
rbg_range.start(),
rbg_range.stop(),
bc.dci.location.L,
bc.dci.location.ncce,
bc.dci.tb[0].rv,
cell_params.cfg.sibs[bc.index].len,
cell_params.cfg.sibs[bc.index].period_rf,
bc.dci.tb[0].mcs_idx);
} else {
logger.info("SCHED: PCH, rbgs=%s, dci=(%d,%d), tbs=%d, mcs=%d",
srslte::to_c_str(str_buffer),
bc.dci.location.L,
bc.dci.location.ncce,
bc.tbs,
bc.dci.tb[0].mcs_idx);
}
}
void log_rar_allocation(const sched_interface::dl_sched_rar_t& rar, rbg_interval rbg_range)
{
if (not logger.info.enabled()) {
return;
}
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", rbg_range);
fmt::memory_buffer str_buffer2;
for (size_t i = 0; i < rar.msg3_grant.size(); ++i) {
fmt::format_to(str_buffer2,
"{}{{c-rnti=0x{:x}, rba={}, mcs={}}}",
i > 0 ? ", " : "",
rar.msg3_grant[i].data.temp_crnti,
rar.msg3_grant[i].grant.rba,
rar.msg3_grant[i].grant.trunc_mcs);
}
logger.info("SCHED: RAR, ra-rnti=%d, rbgs=%s, dci=(%d,%d), msg3 grants=[%s]",
rar.dci.rnti,
srslte::to_c_str(str_buffer),
rar.dci.location.L,
rar.dci.location.ncce,
srslte::to_c_str(str_buffer2));
}
} // namespace srsenb

View File

@ -321,7 +321,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
};
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
auto& bc = dl_result.bc[i];
const sched_interface::dl_sched_bc_t& bc = dl_result.bc[i];
if (bc.type == sched_interface::dl_sched_bc_t::BCCH) {
CONDERROR(bc.tbs < cell_params.cfg.sibs[bc.index].len,
"Allocated BC process with TBS=%d < sib_len=%d",