sched optimization - swap c-arrays for bounded_vector in sched_interface to reduce time performing bzero/memcpy in the scheduler

This commit is contained in:
Francisco 2021-03-17 15:39:03 +00:00 committed by Francisco Paisana
parent c0a90c5aa8
commit 0d91802495
12 changed files with 97 additions and 119 deletions

View File

@ -214,27 +214,22 @@ public:
} dl_sched_bc_t;
typedef struct {
uint32_t cfi;
uint32_t nof_data_elems;
uint32_t nof_rar_elems;
uint32_t nof_bc_elems;
dl_sched_data_t data[MAX_DATA_LIST];
dl_sched_rar_t rar[MAX_RAR_LIST];
dl_sched_bc_t bc[MAX_BC_LIST];
} dl_sched_res_t;
struct dl_sched_res_t {
uint32_t cfi;
srslte::bounded_vector<dl_sched_data_t, MAX_DATA_LIST> data;
srslte::bounded_vector<dl_sched_rar_t, MAX_RAR_LIST> rar;
srslte::bounded_vector<dl_sched_bc_t, MAX_BC_LIST> bc;
};
typedef struct {
uint16_t rnti;
enum phich_elem { ACK, NACK } phich;
} ul_sched_phich_t;
typedef struct {
uint32_t nof_dci_elems;
uint32_t nof_phich_elems;
ul_sched_data_t pusch[MAX_DATA_LIST];
ul_sched_phich_t phich[MAX_PHICH_LIST];
} ul_sched_res_t;
struct ul_sched_res_t {
srslte::bounded_vector<ul_sched_data_t, MAX_DATA_LIST> pusch;
srslte::bounded_vector<ul_sched_phich_t, MAX_PHICH_LIST> phich;
};
/******************* Scheduler Control ****************************/

View File

@ -584,7 +584,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
srslte::rwlock_read_guard lock(rwlock);
// Copy data grants
for (uint32_t i = 0; i < sched_result.nof_data_elems; i++) {
for (uint32_t i = 0; i < sched_result.data.size(); i++) {
uint32_t tb_count = 0;
// Get UE
@ -645,7 +645,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
}
// Copy RAR grants
for (uint32_t i = 0; i < sched_result.nof_rar_elems; i++) {
for (uint32_t i = 0; i < sched_result.rar.size(); i++) {
// Copy dci info
dl_sched_res->pdsch[n].dci = sched_result.rar[i].dci;
@ -680,7 +680,7 @@ int mac::get_dl_sched(uint32_t tti_tx_dl, dl_sched_list_t& dl_sched_res_list)
}
// Copy SI and Paging grants
for (uint32_t i = 0; i < sched_result.nof_bc_elems; i++) {
for (uint32_t i = 0; i < sched_result.bc.size(); i++) {
// Copy dci info
dl_sched_res->pdsch[n].dci = sched_result.bc[i].dci;
@ -900,7 +900,7 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list)
// Copy DCI grants
phy_ul_sched_res->nof_grants = 0;
int n = 0;
for (uint32_t i = 0; i < sched_result.nof_dci_elems; i++) {
for (uint32_t i = 0; i < sched_result.pusch.size(); i++) {
if (sched_result.pusch[i].tbs > 0) {
// Get UE
uint16_t rnti = sched_result.pusch[i].dci.rnti;
@ -943,11 +943,11 @@ int mac::get_ul_sched(uint32_t tti_tx_ul, ul_sched_list_t& ul_sched_res_list)
}
// Copy PHICH actions
for (uint32_t i = 0; i < sched_result.nof_phich_elems; i++) {
for (uint32_t i = 0; i < sched_result.phich.size(); i++) {
phy_ul_sched_res->phich[i].ack = sched_result.phich[i].phich == sched_interface::ul_sched_phich_t::ACK;
phy_ul_sched_res->phich[i].rnti = sched_result.phich[i].rnti;
}
phy_ul_sched_res->nof_phich = sched_result.nof_phich_elems;
phy_ul_sched_res->nof_phich = sched_result.phich.size();
}
// clear old buffers from all users
for (auto& u : ue_db) {

View File

@ -387,7 +387,7 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r
/* Schedule PHICH */
for (auto& ue_pair : *ue_db) {
if (cc_result->ul_sched_result.nof_phich_elems >= MAX_PHICH_LIST) {
if (cc_result->ul_sched_result.phich.size() >= MAX_PHICH_LIST) {
break;
}
tti_sched->alloc_phich(ue_pair.second.get());

View File

@ -63,7 +63,7 @@ void sf_sched_result::new_tti(tti_point tti_rx_)
bool sf_sched_result::is_ul_alloc(uint16_t rnti) const
{
for (const auto& cc : enb_cc_list) {
for (uint32_t j = 0; j < cc.ul_sched_result.nof_dci_elems; ++j) {
for (uint32_t j = 0; j < cc.ul_sched_result.pusch.size(); ++j) {
if (cc.ul_sched_result.pusch[j].dci.rnti == rnti) {
return true;
}
@ -74,7 +74,7 @@ bool sf_sched_result::is_ul_alloc(uint16_t rnti) const
bool sf_sched_result::is_dl_alloc(uint16_t rnti) const
{
for (const auto& cc : enb_cc_list) {
for (uint32_t j = 0; j < cc.dl_sched_result.nof_data_elems; ++j) {
for (uint32_t j = 0; j < cc.dl_sched_result.data.size(); ++j) {
if (cc.dl_sched_result.data[j].dci.rnti == rnti) {
return true;
}
@ -622,11 +622,10 @@ bool sf_sched::alloc_phich(sched_ue* user)
using phich_t = sched_interface::ul_sched_phich_t;
auto* ul_sf_result = &cc_results->get_cc(cc_cfg->enb_cc_idx)->ul_sched_result;
if (ul_sf_result->nof_phich_elems >= sched_interface::MAX_PHICH_LIST) {
if (ul_sf_result->phich.full()) {
logger.warning("SCHED: Maximum number of PHICH allocations has been reached");
return false;
}
phich_t& phich_item = ul_sf_result->phich[ul_sf_result->nof_phich_elems];
auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx);
if (not p.first) {
@ -638,9 +637,9 @@ bool sf_sched::alloc_phich(sched_ue* user)
/* Indicate PHICH acknowledgment if needed */
if (h->has_pending_phich()) {
phich_item.phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK;
phich_item.rnti = user->get_rnti();
ul_sf_result->nof_phich_elems++;
ul_sf_result->phich.emplace_back();
ul_sf_result->phich.back().rnti = user->get_rnti();
ul_sf_result->phich.back().phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK;
return true;
}
return false;
@ -650,14 +649,9 @@ void sf_sched::set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
sched_interface::dl_sched_res_t* dl_result)
{
for (const auto& bc_alloc : bc_allocs) {
sched_interface::dl_sched_bc_t* bc = &dl_result->bc[dl_result->nof_bc_elems];
*bc = bc_alloc.bc_grant;
// assign NCCE/L
bc->dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
dl_result->nof_bc_elems++;
log_broadcast_allocation(*bc, bc_alloc.rbg_range, *cc_cfg);
dl_result->bc.emplace_back(bc_alloc.bc_grant);
dl_result->bc.back().dci.location = dci_result[bc_alloc.dci_idx]->dci_pos;
log_broadcast_allocation(dl_result->bc.back(), bc_alloc.rbg_range, *cc_cfg);
}
}
@ -665,15 +659,9 @@ void sf_sched::set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_
sched_interface::dl_sched_res_t* dl_result)
{
for (const auto& rar_alloc : rar_allocs) {
sched_interface::dl_sched_rar_t* rar = &dl_result->rar[dl_result->nof_rar_elems];
// Setup RAR process
*rar = rar_alloc.rar_grant;
// Assign NCCE/L
rar->dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos;
dl_result->nof_rar_elems++;
log_rar_allocation(*rar, rar_alloc.alloc_data.rbg_range);
dl_result->rar.emplace_back(rar_alloc.rar_grant);
dl_result->rar.back().dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos;
log_rar_allocation(dl_result->rar.back(), rar_alloc.alloc_data.rbg_range);
}
}
@ -682,7 +670,8 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t&
sched_ue_list& ue_list)
{
for (const auto& data_alloc : data_allocs) {
sched_interface::dl_sched_data_t* data = &dl_result->data[dl_result->nof_data_elems];
dl_result->data.emplace_back();
sched_interface::dl_sched_data_t* data = &dl_result->data.back();
// Assign NCCE/L
data->dci.location = dci_result[data_alloc.dci_idx]->dci_pos;
@ -730,8 +719,6 @@ void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t&
data_before,
user->get_requested_dl_bytes(cc_cfg->enb_cc_idx).stop());
logger.info("%s", srslte::to_c_str(str_buffer));
dl_result->nof_data_elems++;
}
}
@ -773,7 +760,7 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched,
needs_ack_uci = sf_sched->is_dl_alloc(user->get_rnti());
} else {
auto& dl_result = other_cc_results.enb_cc_list[enbccidx].dl_sched_result;
for (uint32_t j = 0; j < dl_result.nof_data_elems; ++j) {
for (uint32_t j = 0; j < dl_result.data.size(); ++j) {
if (dl_result.data[j].dci.rnti == user->get_rnti()) {
needs_ack_uci = true;
break;
@ -802,7 +789,7 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched,
}
for (uint32_t enbccidx = 0; enbccidx < other_cc_results.enb_cc_list.size(); ++enbccidx) {
for (uint32_t j = 0; j < other_cc_results.enb_cc_list[enbccidx].ul_sched_result.nof_dci_elems; ++j) {
for (uint32_t j = 0; j < other_cc_results.enb_cc_list[enbccidx].ul_sched_result.pusch.size(); ++j) {
// Checks all the UL grants already allocated for the given rnti
if (other_cc_results.enb_cc_list[enbccidx].ul_sched_result.pusch[j].dci.rnti == user->get_rnti()) {
auto p = user->get_active_cell_index(enbccidx);
@ -827,8 +814,6 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
{
/* Set UL data DCI locs and format */
for (const auto& ul_alloc : ul_data_allocs) {
sched_interface::ul_sched_data_t* pusch = &ul_result->pusch[ul_result->nof_dci_elems];
auto ue_it = ue_list.find(ul_alloc.rnti);
if (ue_it == ue_list.end()) {
continue;
@ -844,8 +829,10 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
uci_pusch_t uci_type = is_uci_included(this, *cc_results, user, cc_cfg->enb_cc_idx);
/* Generate DCI Format1A */
uint32_t total_data_before = user->get_pending_ul_data_total(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
int tbs = user->generate_format0(pusch,
ul_result->pusch.emplace_back();
sched_interface::ul_sched_data_t& pusch = ul_result->pusch.back();
uint32_t total_data_before = user->get_pending_ul_data_total(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
int tbs = user->generate_format0(&pusch,
get_tti_tx_ul(),
cc_cfg->enb_cc_idx,
ul_alloc.alloc,
@ -857,7 +844,7 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
uint32_t new_pending_bytes = user->get_pending_ul_new_data(get_tti_tx_ul(), cc_cfg->enb_cc_idx);
// Allow TBS=0 in case of UCI-only PUSCH
if (tbs < 0 || (tbs == 0 && pusch->dci.tb.mcs_idx != 29)) {
if (tbs < 0 || (tbs == 0 && pusch.dci.tb.mcs_idx != 29)) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer,
"SCHED: Error {} {} rnti=0x{:x}, pid={}, dci=({},{}), prb={}, bsr={}",
@ -865,11 +852,12 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
ul_alloc.is_retx() ? "retx" : "tx",
user->get_rnti(),
h->get_id(),
pusch->dci.location.L,
pusch->dci.location.ncce,
pusch.dci.location.L,
pusch.dci.location.ncce,
ul_alloc.alloc,
new_pending_bytes);
logger.warning("%s", srslte::to_c_str(str_buffer));
ul_result->pusch.pop_back();
continue;
}
@ -884,8 +872,8 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
user->get_rnti(),
cc_cfg->enb_cc_idx,
h->get_id(),
pusch->dci.location.L,
pusch->dci.location.ncce,
pusch.dci.location.L,
pusch.dci.location.ncce,
ul_alloc.alloc,
h->nof_retx(0),
tbs,
@ -895,9 +883,7 @@ void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_r
logger.info("%s", srslte::to_c_str(str_buffer));
}
pusch->current_tx_nb = h->nof_retx(0);
ul_result->nof_dci_elems++;
pusch.current_tx_nb = h->nof_retx(0);
}
}
@ -922,7 +908,7 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db)
/* Resume UL HARQs with pending retxs that did not get allocated */
using phich_t = sched_interface::ul_sched_phich_t;
auto& phich_list = cc_result->ul_sched_result.phich;
for (uint32_t i = 0; i < cc_result->ul_sched_result.nof_phich_elems; ++i) {
for (uint32_t i = 0; i < cc_result->ul_sched_result.phich.size(); ++i) {
auto& phich = phich_list[i];
if (phich.phich == phich_t::NACK) {
auto& ue = *ue_db[phich.rnti];

View File

@ -116,8 +116,7 @@ void log_dl_cc_results(srslog::basic_logger& logger, uint32_t enb_cc_idx, const
}
custom_mem_buffer strbuf;
for (uint32_t i = 0; i < result.nof_data_elems; ++i) {
const dl_sched_data_t& data = result.data[i];
for (const auto& data : result.data) {
if (logger.debug.enabled()) {
fill_dl_cc_result_debug(strbuf, data);
} else {
@ -142,7 +141,7 @@ void log_phich_cc_results(srslog::basic_logger& logger,
return;
}
custom_mem_buffer strbuf;
for (uint32_t i = 0; i < result.nof_phich_elems; ++i) {
for (uint32_t i = 0; i < result.phich.size(); ++i) {
const phich_t& phich = result.phich[i];
const char* prefix = strbuf.size() > 0 ? " | " : "";
const char* val = phich.phich == phich_t::ACK ? "ACK" : "NACK";

View File

@ -126,21 +126,21 @@ public:
{
for (uint32_t cc = 0; cc < get_cell_params().size(); ++cc) {
uint32_t dl_tbs = 0, ul_tbs = 0, dl_mcs = 0, ul_mcs = 0;
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) {
dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[0];
dl_tbs += sf_out.dl_cc_result[cc].data[i].tbs[1];
dl_mcs = std::max(dl_mcs, sf_out.dl_cc_result[cc].data[i].dci.tb[0].mcs_idx);
}
total_stats.mean_dl_tbs.push(dl_tbs);
if (sf_out.dl_cc_result[cc].nof_data_elems > 0) {
if (sf_out.dl_cc_result[cc].data.size() > 0) {
total_stats.avg_dl_mcs.push(dl_mcs);
}
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_dci_elems; ++i) {
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].pusch.size(); ++i) {
ul_tbs += sf_out.ul_cc_result[cc].pusch[i].tbs;
ul_mcs = std::max(ul_mcs, sf_out.ul_cc_result[cc].pusch[i].dci.tb.mcs_idx);
}
total_stats.mean_ul_tbs.push(ul_tbs);
if (sf_out.ul_cc_result[cc].nof_dci_elems) {
if (not sf_out.ul_cc_result[cc].pusch.empty()) {
total_stats.avg_ul_mcs.push(ul_mcs);
}
}

View File

@ -177,7 +177,7 @@ int test_scell_activation(uint32_t sim_number, test_scell_activation_params para
// TEST: When a DL newtx takes place, it should also encode the CE
for (uint32_t i = 0; i < 100; ++i) {
if (tester.tti_info.dl_sched_result[params.pcell_idx].nof_data_elems > 0) {
if (not tester.tti_info.dl_sched_result[params.pcell_idx].data.empty()) {
// DL data was allocated
if (tester.tti_info.dl_sched_result[params.pcell_idx].data[0].nof_pdu_elems[0] > 0) {
// it is a new DL tx

View File

@ -55,7 +55,7 @@ int test_pusch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co
try_ul_fill({cell_params.cfg.cell.nof_prb - pucch_nrb, (uint32_t)cell_params.cfg.cell.nof_prb}, "PUCCH", strict);
/* TEST: check collisions in the UL PUSCH */
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) {
uint32_t L, RBstart;
srslte_ra_type2_from_riv(ul_result.pusch[i].dci.type2_alloc.riv, &L, &RBstart, nof_prb, nof_prb);
strict = ul_result.pusch[i].needs_pdcch or nof_prb != 6; // Msg3 may collide with PUCCH at PRB==6
@ -113,12 +113,12 @@ int test_pdsch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co
};
// Decode BC allocations, check collisions, and fill cumulative mask
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
for (uint32_t i = 0; i < dl_result.bc.size(); ++i) {
TESTASSERT(try_dl_mask_fill(dl_result.bc[i].dci, "BC") == SRSLTE_SUCCESS);
}
// Decode RAR allocations, check collisions, and fill cumulative mask
for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_result.rar.size(); ++i) {
TESTASSERT(try_dl_mask_fill(dl_result.rar[i].dci, "RAR") == SRSLTE_SUCCESS);
}
@ -131,7 +131,7 @@ int test_pdsch_collisions(const sf_output_res_t& sf_out, uint32_t enb_cc_idx, co
}
// Decode Data allocations, check collisions and fill cumulative mask
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result.data.size(); ++i) {
TESTASSERT(try_dl_mask_fill(dl_result.data[i].dci, "data") == SRSLTE_SUCCESS);
}
@ -170,8 +170,8 @@ int test_sib_scheduling(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
bool sib1_expected = ((sfn % 2) == 0) and sf_idx == 5;
using bc_elem = const sched_interface::dl_sched_bc_t;
bc_elem* bc_begin = &dl_result.bc[0];
bc_elem* bc_end = &dl_result.bc[dl_result.nof_bc_elems];
bc_elem* bc_begin = dl_result.bc.begin();
bc_elem* bc_end = dl_result.bc.end();
/* Test if SIB1 was correctly scheduled */
auto it = std::find_if(bc_begin, bc_end, [](bc_elem& elem) { return elem.index == 0; });
@ -229,7 +229,7 @@ int test_pdcch_collisions(const sf_output_res_t& sf_out,
};
/* TEST: verify there are no dci collisions for UL, DL data, BC, RAR */
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) {
const auto& pusch = ul_result.pusch[i];
if (not pusch.needs_pdcch) {
// In case of non-adaptive retx or Msg3
@ -237,13 +237,13 @@ int test_pdcch_collisions(const sf_output_res_t& sf_out,
}
try_cce_fill(pusch.dci.location, "UL");
}
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result.data.size(); ++i) {
try_cce_fill(dl_result.data[i].dci.location, "DL data");
}
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
for (uint32_t i = 0; i < dl_result.bc.size(); ++i) {
try_cce_fill(dl_result.bc[i].dci.location, "DL BC");
}
for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_result.rar.size(); ++i) {
try_cce_fill(dl_result.rar[i].dci.location, "DL RAR");
}
@ -262,7 +262,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
const auto& ul_result = sf_out.ul_cc_result[enb_cc_idx];
std::set<uint16_t> alloc_rntis;
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result.pusch.size(); ++i) {
const auto& pusch = ul_result.pusch[i];
uint16_t rnti = pusch.dci.rnti;
CONDERROR(pusch.tbs == 0, "Allocated PUSCH with invalid TBS=%d", pusch.tbs);
@ -281,7 +281,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
}
alloc_rntis.clear();
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result.data.size(); ++i) {
auto& data = dl_result.data[i];
uint16_t rnti = data.dci.rnti;
CONDERROR(data.tbs[0] == 0 and data.tbs[1] == 0, "Allocated DL data has empty TBS");
@ -321,7 +321,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
return SRSLTE_SUCCESS;
};
for (uint32_t i = 0; i < dl_result.nof_bc_elems; ++i) {
for (uint32_t i = 0; i < dl_result.bc.size(); ++i) {
const sched_interface::dl_sched_bc_t& bc = dl_result.bc[i];
if (bc.type == sched_interface::dl_sched_bc_t::BCCH) {
CONDERROR(bc.tbs < cell_params.cfg.sibs[bc.index].len,
@ -337,7 +337,7 @@ int test_dci_content_common(const sf_output_res_t& sf_out, uint32_t enb_cc_idx)
TESTASSERT(test_ra_bc_coderate(bc.tbs, bc.dci) == SRSLTE_SUCCESS);
}
for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_result.rar.size(); ++i) {
const auto& rar = dl_result.rar[i];
CONDERROR(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d", rar.tbs);

View File

@ -78,7 +78,7 @@ int ue_sim::update(const sf_output_res_t& sf_out)
void ue_sim::update_dl_harqs(const sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < sf_out.cc_params.size(); ++cc) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) {
const auto& data = sf_out.dl_cc_result[cc].data[i];
if (data.dci.rnti != ctxt.rnti) {
continue;
@ -107,7 +107,7 @@ void ue_sim::update_ul_harqs(const sf_output_res_t& sf_out)
uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS);
for (uint32_t cc = 0; cc < sf_out.cc_params.size(); ++cc) {
// Update UL harqs with PHICH info
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_phich_elems; ++i) {
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].phich.size(); ++i) {
const auto& phich = sf_out.ul_cc_result[cc].phich[i];
if (phich.rnti != ctxt.rnti) {
continue;
@ -128,7 +128,7 @@ void ue_sim::update_ul_harqs(const sf_output_res_t& sf_out)
}
// Update UL harqs with PUSCH grants
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].nof_dci_elems; ++i) {
for (uint32_t i = 0; i < sf_out.ul_cc_result[cc].pusch.size(); ++i) {
const auto& data = sf_out.ul_cc_result[cc].pusch[i];
if (data.dci.rnti != ctxt.rnti) {
continue;
@ -171,7 +171,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out)
srslte::tti_interval rar_window{ctxt.prach_tti_rx + 3, ctxt.prach_tti_rx + 3 + rar_win_size};
if (rar_window.contains(tti_tx_dl)) {
for (uint32_t i = 0; i < dl_cc_result.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_result.rar.size(); ++i) {
for (uint32_t j = 0; j < dl_cc_result.rar[i].msg3_grant.size(); ++j) {
const auto& data = dl_cc_result.rar[i].msg3_grant[j].data;
if (data.prach_tti == (uint32_t)ctxt.prach_tti_rx.to_uint() and data.preamble_idx == ctxt.preamble_idx) {
@ -188,7 +188,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out)
srslte::tti_point expected_msg3_tti_rx = ctxt.rar_tti_rx + MSG3_DELAY_MS;
if (expected_msg3_tti_rx == sf_out.tti_rx) {
// Msg3 should exist
for (uint32_t i = 0; i < ul_cc_result.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_cc_result.pusch.size(); ++i) {
if (ul_cc_result.pusch[i].dci.rnti == ctxt.rnti) {
ctxt.msg3_tti_rx = sf_out.tti_rx;
}
@ -198,7 +198,7 @@ void ue_sim::update_conn_state(const sf_output_res_t& sf_out)
if (ctxt.msg3_tti_rx.is_valid() and not ctxt.msg4_tti_rx.is_valid()) {
// Msg3 scheduled, but Msg4 not yet scheduled
for (uint32_t i = 0; i < dl_cc_result.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_result.data.size(); ++i) {
if (dl_cc_result.data[i].dci.rnti == ctxt.rnti) {
for (uint32_t j = 0; j < dl_cc_result.data[i].nof_pdu_elems[0]; ++j) {
if (dl_cc_result.data[i].pdu[0][j].lcid == (uint32_t)srslte::dl_sch_lcid::CON_RES_ID) {

View File

@ -89,12 +89,12 @@ void sched_result_stats::process_results(tti_point
const std::vector<sched_interface::ul_sched_res_t>& ul_result)
{
for (uint32_t ccidx = 0; ccidx < dl_result.size(); ++ccidx) {
for (uint32_t i = 0; i < dl_result[ccidx].nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_result[ccidx].data.size(); ++i) {
user_stats* user = get_user(dl_result[ccidx].data[i].dci.rnti);
user->tot_dl_sched_data[ccidx] += dl_result[ccidx].data[i].tbs[0];
user->tot_dl_sched_data[ccidx] += dl_result[ccidx].data[i].tbs[1];
}
for (uint32_t i = 0; i < ul_result[ccidx].nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_result[ccidx].pusch.size(); ++i) {
user_stats* user = get_user(ul_result[ccidx].pusch[i].dci.rnti);
user->tot_ul_sched_data[ccidx] += ul_result[ccidx].pusch[i].tbs;
}

View File

@ -158,7 +158,7 @@ int sched_tester::process_results()
int sched_tester::test_harqs()
{
/* check consistency of DL harq procedures and allocations */
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].nof_data_elems; ++i) {
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].data.size(); ++i) {
const auto& data = tti_info.dl_sched_result[CARRIER_IDX].data[i];
uint32_t h_id = data.dci.pid;
uint16_t rnti = data.dci.rnti;
@ -171,7 +171,7 @@ int sched_tester::test_harqs()
}
/* Check PHICH allocations */
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems; ++i) {
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].phich.size(); ++i) {
const auto& phich = tti_info.ul_sched_result[CARRIER_IDX].phich[i];
const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq;
const auto* h = ue_db[phich.rnti]->get_ul_harq(srsenb::to_tx_ul(tti_rx), CARRIER_IDX);
@ -182,7 +182,7 @@ int sched_tester::test_harqs()
if (not hprev.is_empty()) {
// In case it was resumed
CONDERROR(h == nullptr or h->is_empty(), "Cannot resume empty UL harq");
for (uint32_t j = 0; j < tti_info.ul_sched_result[CARRIER_IDX].nof_dci_elems; ++j) {
for (uint32_t j = 0; j < tti_info.ul_sched_result[CARRIER_IDX].pusch.size(); ++j) {
auto& pusch = tti_info.ul_sched_result[CARRIER_IDX].pusch[j];
CONDERROR(pusch.dci.rnti == phich.rnti, "Cannot send PHICH::ACK for same harq that got UL grant.");
}
@ -198,7 +198,7 @@ int sched_tester::test_harqs()
int sched_tester::update_ue_stats()
{
// update ue stats with number of allocated UL PRBs
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_dci_elems; ++i) {
for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].pusch.size(); ++i) {
const auto& pusch = tti_info.ul_sched_result[CARRIER_IDX].pusch[i];
uint32_t L, RBstart;
srslte_ra_type2_from_riv(pusch.dci.type2_alloc.riv,
@ -214,7 +214,7 @@ int sched_tester::update_ue_stats()
// update ue stats with number of DL RB allocations
srslte::bounded_bitset<100, true> alloc_mask(sched_cell_params[CARRIER_IDX].cfg.cell.nof_prb);
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].nof_data_elems; ++i) {
for (uint32_t i = 0; i < tti_info.dl_sched_result[CARRIER_IDX].data.size(); ++i) {
auto& data = tti_info.dl_sched_result[CARRIER_IDX].data[i];
TESTASSERT(srsenb::extract_dl_prbmask(sched_cell_params[CARRIER_IDX].cfg.cell,
tti_info.dl_sched_result[CARRIER_IDX].data[i].dci,

View File

@ -43,18 +43,16 @@ int sim_ue_ctxt_t::enb_to_ue_cc_idx(uint32_t enb_cc_idx) const
const pusch_t* find_pusch_grant(uint16_t rnti, const sched_interface::ul_sched_res_t& ul_cc_res)
{
const pusch_t* ptr = std::find_if(&ul_cc_res.pusch[0],
&ul_cc_res.pusch[ul_cc_res.nof_dci_elems],
[rnti](const pusch_t& pusch) { return pusch.dci.rnti == rnti; });
return ptr == &ul_cc_res.pusch[ul_cc_res.nof_dci_elems] ? nullptr : ptr;
const pusch_t* ptr = std::find_if(
ul_cc_res.pusch.begin(), ul_cc_res.pusch.end(), [rnti](const pusch_t& pusch) { return pusch.dci.rnti == rnti; });
return ptr == ul_cc_res.pusch.end() ? nullptr : ptr;
}
const pdsch_t* find_pdsch_grant(uint16_t rnti, const sched_interface::dl_sched_res_t& dl_cc_res)
{
const pdsch_t* ptr = std::find_if(&dl_cc_res.data[0],
&dl_cc_res.data[dl_cc_res.nof_data_elems],
[rnti](const pdsch_t& pdsch) { return pdsch.dci.rnti == rnti; });
return ptr == &dl_cc_res.data[dl_cc_res.nof_data_elems] ? nullptr : ptr;
const pdsch_t* ptr = std::find_if(
dl_cc_res.data.begin(), dl_cc_res.data.end(), [rnti](const pdsch_t& pdsch) { return pdsch.dci.rnti == rnti; });
return ptr == dl_cc_res.data.end() ? nullptr : ptr;
}
int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
@ -121,7 +119,7 @@ int test_pdsch_grant(const sim_enb_ctxt_t& enb_ctxt,
int test_dl_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
{
for (uint32_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_data_elems; ++i) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].data.size(); ++i) {
const sched_interface::dl_sched_data_t& data = sf_out.dl_cc_result[cc].data[i];
CONDERROR(
enb_ctxt.ue_db.count(data.dci.rnti) == 0, "Allocated DL grant for non-existent rnti=0x%x", data.dci.rnti);
@ -136,10 +134,10 @@ int test_ul_sched_result(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t&
uint32_t pid = to_tx_ul(sf_out.tti_rx).to_uint() % (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS);
for (size_t cc = 0; cc < enb_ctxt.cell_params.size(); ++cc) {
const auto* phich_begin = &sf_out.ul_cc_result[cc].phich[0];
const auto* phich_end = &sf_out.ul_cc_result[cc].phich[sf_out.ul_cc_result[cc].nof_phich_elems];
const auto* pusch_begin = &sf_out.ul_cc_result[cc].pusch[0];
const auto* pusch_end = &sf_out.ul_cc_result[cc].pusch[sf_out.ul_cc_result[cc].nof_dci_elems];
const auto* phich_begin = sf_out.ul_cc_result[cc].phich.begin();
const auto* phich_end = sf_out.ul_cc_result[cc].phich.end();
const auto* pusch_begin = sf_out.ul_cc_result[cc].pusch.begin();
const auto* pusch_end = sf_out.ul_cc_result[cc].pusch.end();
// TEST: rnti must exist for all PHICH
CONDERROR(std::any_of(phich_begin,
@ -250,14 +248,14 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
CONDERROR(not ue.rar_tti_rx.is_valid() and tti_tx_dl > rar_window.stop(),
"rnti=0x%x RAR not scheduled within the RAR Window",
rnti);
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].nof_rar_elems; ++i) {
for (uint32_t i = 0; i < sf_out.dl_cc_result[cc].rar.size(); ++i) {
CONDERROR(sf_out.dl_cc_result[cc].rar[i].dci.rnti == rnti,
"No RAR allocations allowed outside of user RAR window");
}
} else {
// Inside RAR window
uint32_t nof_rars = ue.rar_tti_rx.is_valid() ? 1 : 0;
for (uint32_t i = 0; i < dl_cc_res.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.rar.size(); ++i) {
for (const auto& grant : dl_cc_res.rar[i].msg3_grant) {
const auto& data = grant.data;
if (data.prach_tti == (uint32_t)ue.prach_tti_rx.to_uint() and data.preamble_idx == ue.preamble_idx) {
@ -278,7 +276,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
if (expected_msg3_tti_rx == sf_out.tti_rx) {
// Msg3 should exist
uint32_t msg3_count = 0;
for (uint32_t i = 0; i < ul_cc_res.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_cc_res.pusch.size(); ++i) {
if (ul_cc_res.pusch[i].dci.rnti == rnti) {
msg3_count++;
CONDERROR(ul_cc_res.pusch[i].needs_pdcch, "Msg3 allocations do not require PDCCH");
@ -295,7 +293,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
if (ue.msg3_tti_rx.is_valid() and not ue.msg4_tti_rx.is_valid()) {
// Msg3 scheduled, but Msg4 not yet scheduled
uint32_t msg4_count = 0;
for (uint32_t i = 0; i < dl_cc_res.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.data.size(); ++i) {
if (dl_cc_res.data[i].dci.rnti == rnti) {
CONDERROR(to_tx_dl(sf_out.tti_rx) < to_tx_ul(ue.msg3_tti_rx),
"Msg4 cannot be scheduled without Msg3 being tx");
@ -316,7 +314,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
if (not ue.msg4_tti_rx.is_valid()) {
// TEST: No UL allocs except for Msg3 before Msg4
for (uint32_t i = 0; i < ul_cc_res.nof_dci_elems; ++i) {
for (uint32_t i = 0; i < ul_cc_res.pusch.size(); ++i) {
if (ul_cc_res.pusch[i].dci.rnti == rnti) {
CONDERROR(not ue.rar_tti_rx.is_valid(), "No UL allocs before RAR allowed");
srslte::tti_point expected_msg3_tti = ue.rar_tti_rx + MSG3_DELAY_MS;
@ -331,7 +329,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
// TEST: No DL allocs before Msg3
if (not ue.msg3_tti_rx.is_valid()) {
for (uint32_t i = 0; i < dl_cc_res.nof_data_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.data.size(); ++i) {
CONDERROR(dl_cc_res.data[i].dci.rnti == rnti, "No DL data allocs allowed before Msg3 is scheduled");
}
}
@ -339,7 +337,7 @@ int test_ra(const sim_enb_ctxt_t& enb_ctxt, const sf_output_res_t& sf_out)
}
// TEST: Ensure there are no spurious RARs that do not belong to any user
for (uint32_t i = 0; i < dl_cc_res.nof_rar_elems; ++i) {
for (uint32_t i = 0; i < dl_cc_res.rar.size(); ++i) {
for (uint32_t j = 0; j < dl_cc_res.rar[i].msg3_grant.size(); ++j) {
uint32_t prach_tti = dl_cc_res.rar[i].msg3_grant[j].data.prach_tti;
uint32_t preamble_idx = dl_cc_res.rar[i].msg3_grant[j].data.preamble_idx;