fix msg3 delayed allocation and update test

This commit is contained in:
Francisco Paisana 2020-03-12 18:09:45 +00:00 committed by Xavier Arteaga
parent e8b8c9922e
commit 4217dba7e0
5 changed files with 101 additions and 88 deletions

View File

@ -116,10 +116,9 @@ public:
explicit ra_sched(const sched_cell_params_t& cfg_, std::map<uint16_t, sched_ue>& ue_db_); explicit ra_sched(const sched_cell_params_t& cfg_, std::map<uint16_t, sched_ue>& ue_db_);
void dl_sched(sf_sched* tti_sched); void dl_sched(sf_sched* tti_sched);
void ul_sched(sf_sched* tti_sched); void ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched);
int dl_rach_info(dl_sched_rar_info_t rar_info); int dl_rach_info(dl_sched_rar_info_t rar_info);
void reset(); void reset();
void sched_msg3(sf_sched* sf_msg3_sched, const sched_interface::dl_sched_res_t& dl_sched_result);
private: private:
// args // args

View File

@ -235,9 +235,10 @@ public:
alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload); alloc_outcome_t alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload);
std::pair<alloc_outcome_t, uint32_t> alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant); std::pair<alloc_outcome_t, uint32_t> alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant);
bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); }
const std::vector<rar_alloc_t>& get_allocated_rars() const { return rar_allocs; }
// UL alloc methods // UL alloc methods
alloc_outcome_t alloc_msg3(sched_ue* user, const pending_msg3_t& msg3); alloc_outcome_t alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant);
alloc_outcome_t alloc_outcome_t
alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t mcs = 0); alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t mcs = 0);
bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); } bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { return tti_alloc.reserve_ul_prbs(ulmask, strict); }

View File

@ -149,7 +149,7 @@ ra_sched::ra_sched(const sched_cell_params_t& cfg_, std::map<uint16_t, sched_ue>
// Schedules RAR // Schedules RAR
// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we // On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we
// discard it. // discard it.
void ra_sched::dl_sched(srsenb::sf_sched* tti_sched) void ra_sched::dl_sched(sf_sched* tti_sched)
{ {
uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl(); uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl();
rar_aggr_level = 2; rar_aggr_level = 2;
@ -179,22 +179,24 @@ void ra_sched::dl_sched(srsenb::sf_sched* tti_sched)
// Try to schedule DCI + RBGs for RAR Grant // Try to schedule DCI + RBGs for RAR Grant
std::pair<alloc_outcome_t, uint32_t> ret = tti_sched->alloc_rar(rar_aggr_level, rar); std::pair<alloc_outcome_t, uint32_t> ret = tti_sched->alloc_rar(rar_aggr_level, rar);
if (ret.first == alloc_outcome_t::RB_COLLISION) {
if (ret.first == alloc_outcome_t::SUCCESS) {
// if all RAR grant allocations were successful
if (ret.second == rar.nof_grants) {
// Remove pending RAR
pending_rars.pop_front();
} else if (ret.second > 0) {
// keep the RAR grants that were not scheduled, so we can schedule in next TTI
std::copy(&rar.msg3_grant[ret.second], &rar.msg3_grant[rar.nof_grants], &rar.msg3_grant[0]);
rar.nof_grants -= ret.second;
}
} else if (ret.first == alloc_outcome_t::RB_COLLISION) {
// there are not enough RBs for RAR or Msg3 allocation. We can skip this TTI // there are not enough RBs for RAR or Msg3 allocation. We can skip this TTI
return; return;
} }
// try to scheduler next RAR with different RA-RNTI if (ret.first != alloc_outcome_t::SUCCESS) {
// try to scheduler next RAR with different RA-RNTI
continue;
}
uint32_t nof_rar_allocs = ret.second;
if (nof_rar_allocs == rar.nof_grants) {
// all RAR grants were allocated. Remove pending RAR
pending_rars.pop_front();
} else {
// keep the RAR grants that were not scheduled, so we can schedule in next TTI
std::copy(&rar.msg3_grant[nof_rar_allocs], &rar.msg3_grant[rar.nof_grants], &rar.msg3_grant[0]);
rar.nof_grants -= nof_rar_allocs;
}
} }
} }
@ -231,34 +233,31 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info)
return SRSLTE_SUCCESS; return SRSLTE_SUCCESS;
} }
void ra_sched::reset() //! Schedule Msg3 grants in UL based on allocated RARs
void ra_sched::ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched)
{ {
pending_rars.clear(); const std::vector<sf_sched::rar_alloc_t>& alloc_rars = sf_dl_sched->get_allocated_rars();
}
void ra_sched::sched_msg3(sf_sched* sf_msg3_sched, const sched_interface::dl_sched_res_t& dl_sched_result) for (const auto& rar : alloc_rars) {
{ for (uint32_t j = 0; j < rar.rar_grant.nof_grants; ++j) {
// Go through all scheduled RARs, and pre-allocate Msg3s in UL channel accordingly const auto& msg3grant = rar.rar_grant.msg3_grant[j];
for (uint32_t i = 0; i < dl_sched_result.nof_rar_elems; ++i) {
for (uint32_t j = 0; j < dl_sched_result.rar[i].nof_grants; ++j) {
auto& grant = dl_sched_result.rar[i].msg3_grant[j];
sf_sched::pending_msg3_t msg3; uint16_t crnti = msg3grant.data.temp_crnti;
srslte_ra_type2_from_riv(grant.grant.rba, &msg3.L, &msg3.n_prb, cc_cfg->nof_prb(), cc_cfg->nof_prb()); auto user_it = ue_db->find(crnti);
msg3.mcs = grant.grant.trunc_mcs; if (user_it == ue_db->end() or not sf_msg3_sched->alloc_msg3(&user_it->second, msg3grant)) {
msg3.rnti = grant.data.temp_crnti; log_h->error("SCHED: Failed to allocate Msg3 for rnti=0x%x at tti=%d\n", crnti, sf_msg3_sched->get_tti_tx_ul());
auto it = ue_db->find(msg3.rnti);
if (it == ue_db->end() or not sf_msg3_sched->alloc_msg3(&it->second, msg3)) {
log_h->error(
"SCHED: Failed to allocate Msg3 for rnti=0x%x at tti=%d\n", msg3.rnti, sf_msg3_sched->get_tti_tx_ul());
} else { } else {
log_h->debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d\n", msg3.rnti, sf_msg3_sched->get_tti_tx_ul()); log_h->debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d\n", crnti, sf_msg3_sched->get_tti_tx_ul());
} }
} }
} }
} }
void ra_sched::reset()
{
pending_rars.clear();
}
/******************************************************* /*******************************************************
* Carrier scheduling * Carrier scheduling
*******************************************************/ *******************************************************/
@ -338,6 +337,10 @@ const sf_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx
/* Schedule RAR */ /* Schedule RAR */
ra_sched_ptr->dl_sched(tti_sched); ra_sched_ptr->dl_sched(tti_sched);
/* Schedule Msg3 */
sf_sched* sf_msg3_sched = get_sf_sched(tti_rx + MSG3_DELAY_MS);
ra_sched_ptr->ul_sched(tti_sched, sf_msg3_sched);
} }
/* Prioritize PDCCH scheduling for DL and UL data in a RoundRobin fashion */ /* Prioritize PDCCH scheduling for DL and UL data in a RoundRobin fashion */
@ -355,12 +358,6 @@ const sf_sched_result& sched::carrier_sched::generate_tti_result(uint32_t tti_rx
/* Select the winner DCI allocation combination, store all the scheduling results */ /* Select the winner DCI allocation combination, store all the scheduling results */
tti_sched->generate_sched_results(sf_result); tti_sched->generate_sched_results(sf_result);
/* Enqueue Msg3s derived from allocated RARs */
if (dl_active) {
sf_sched* sf_msg3_sched = get_sf_sched(tti_rx + MSG3_DELAY_MS);
ra_sched_ptr->sched_msg3(sf_msg3_sched, sf_result->dl_sched_result);
}
/* Reset ue harq pending ack state, clean-up blocked pids */ /* Reset ue harq pending ack state, clean-up blocked pids */
for (auto& user : *ue_db) { for (auto& user : *ue_db) {
user.second.finish_tti(sf_result->tti_params, enb_cc_idx); user.second.finish_tti(sf_result->tti_params, enb_cc_idx);

View File

@ -558,8 +558,8 @@ std::pair<alloc_outcome_t, uint32_t> sf_sched::alloc_rar(uint32_t aggr_lvl, cons
last_msg3_prb += msg3_grant_size; last_msg3_prb += msg3_grant_size;
} }
rar_allocs.emplace_back(ret2.second, rar_grant); rar_allocs.emplace_back(ret2.second, rar_grant);
break; break;
} }
if (ret.first != alloc_outcome_t::SUCCESS) { if (ret.first != alloc_outcome_t::SUCCESS) {
@ -898,13 +898,17 @@ void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
} }
} }
alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const pending_msg3_t& msg3) alloc_outcome_t sf_sched::alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant)
{ {
// Allocate RBGs and HARQ for pending Msg3 // Derive PRBs from allocated RAR grants
ul_harq_proc::ul_alloc_t msg3_alloc = {msg3.n_prb, msg3.L}; ul_harq_proc::ul_alloc_t msg3_alloc = {};
alloc_outcome_t ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::MSG3, msg3.mcs); srslte_ra_type2_from_riv(
rargrant.grant.rba, &msg3_alloc.L, &msg3_alloc.RB_start, cc_cfg->nof_prb(), cc_cfg->nof_prb());
alloc_outcome_t ret = alloc_ul(user, msg3_alloc, sf_sched::ul_alloc_t::MSG3, rargrant.grant.trunc_mcs);
if (not ret) { if (not ret) {
log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.n_prb, msg3.n_prb + msg3.L); log_h->warning(
"SCHED: Could not allocate msg3 within (%d,%d)\n", msg3_alloc.RB_start, msg3_alloc.RB_start + msg3_alloc.L);
} }
return ret; return ret;
} }

View File

@ -388,40 +388,26 @@ int user_state_sched_tester::test_ra(uint32_t enb_
uint16_t rnti = iter.first; uint16_t rnti = iter.first;
ue_state& userinfo = iter.second; ue_state& userinfo = iter.second;
// No UL allocations before Msg3
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
if (ul_result.pusch[i].dci.rnti == rnti) {
CONDERROR(not userinfo.rar_tic.is_valid(), "No UL allocs allowed before RAR\n");
CONDERROR(ul_result.pusch[i].needs_pdcch and not userinfo.msg3_tic.is_valid() and
userinfo.msg3_tic.tti_rx() > tic.tti_rx(),
"No UL newtxs allocs allowed before Msg3 Rx\n");
tti_counter msg3_tic = userinfo.rar_tic + FDD_HARQ_DELAY_DL_MS + MSG3_DELAY_MS;
CONDERROR(msg3_tic > tic.tic_tx_ul(), "No UL allocs allowed before Msg3 alloc\n");
}
}
// No DL data allocations before Msg3 is received
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
if (dl_result.data[i].dci.rnti == rnti) {
CONDERROR(not userinfo.msg3_tic.is_valid(), "No DL data alloc allowed before Msg3 alloc\n");
CONDERROR(tic + FDD_HARQ_DELAY_DL_MS < userinfo.msg3_tic, "Msg4 cannot be tx without Msg3 being acked\n");
}
}
uint32_t primary_cc_idx = userinfo.user_cfg.supported_cc_list[0].enb_cc_idx; uint32_t primary_cc_idx = userinfo.user_cfg.supported_cc_list[0].enb_cc_idx;
if (enb_cc_idx != primary_cc_idx) { if (enb_cc_idx != primary_cc_idx) {
// only check for RAR/Msg3 presence for a UE's PCell // only check for RAR/Msg3 presence for a UE's PCell
continue; continue;
} }
// No RAR allocations outside of rar_window /* TEST: RAR allocation */
std::array<tti_counter, 2> rar_window = { std::array<tti_counter, 2> rar_window = {
userinfo.prach_tic + 3, userinfo.prach_tic + 3 + (int)cell_params[primary_cc_idx].prach_rar_window}; userinfo.prach_tic + 3, userinfo.prach_tic + 3 + (int)cell_params[primary_cc_idx].prach_rar_window};
tti_counter tic_tx_dl = tic.tic_tx_dl();
tti_counter tic_tx_ul = tic.tic_tx_ul();
bool is_in_rar_window = tic_tx_dl >= rar_window[0] and tic_tx_dl <= rar_window[1];
tti_counter tic_tx_dl = tic.tic_tx_dl(); if (not is_in_rar_window) {
CONDERROR(not userinfo.rar_tic.is_valid() and tic.tic_tx_dl() > rar_window[1], CONDERROR(not userinfo.rar_tic.is_valid() and tic_tx_dl > rar_window[1],
"RAR not scheduled within the RAR Window\n"); "RAR not scheduled within the RAR Window\n");
if (tic_tx_dl <= rar_window[1] and tic_tx_dl >= rar_window[0]) { for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
CONDERROR(dl_result.rar[i].dci.rnti == rnti, "No RAR allocations allowed outside of user RAR window\n");
}
} else {
// Inside RAR window // Inside RAR window
for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) { for (uint32_t i = 0; i < dl_result.nof_rar_elems; ++i) {
for (uint32_t j = 0; j < dl_result.rar[i].nof_grants; ++j) { for (uint32_t j = 0; j < dl_result.rar[i].nof_grants; ++j) {
@ -437,36 +423,62 @@ int user_state_sched_tester::test_ra(uint32_t enb_
} }
/* TEST: Check Msg3 */ /* TEST: Check Msg3 */
if (userinfo.rar_tic.is_valid()) { if (userinfo.rar_tic.is_valid() and not userinfo.msg3_tic.is_valid()) {
// RAR scheduled, Msg3 not yet scheduled
tti_counter expected_msg3_tti = userinfo.rar_tic + FDD_HARQ_DELAY_DL_MS + MSG3_DELAY_MS; tti_counter expected_msg3_tti = userinfo.rar_tic + FDD_HARQ_DELAY_DL_MS + MSG3_DELAY_MS;
if (expected_msg3_tti == tic.tic_tx_ul()) { CONDERROR(expected_msg3_tti < tic_tx_ul and not userinfo.msg3_tic.is_valid(), "No UL msg3 alloc was made\n");
if (expected_msg3_tti == tic_tx_ul) {
// Msg3 should exist
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) { for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
if (ul_result.pusch[i].dci.rnti == rnti) { if (ul_result.pusch[i].dci.rnti == rnti) {
CONDERROR(userinfo.msg3_tic.is_valid(), "Only one Msg3 allowed per user\n"); CONDERROR(userinfo.msg3_tic.is_valid(), "Only one Msg3 allowed per user\n");
CONDERROR(ul_result.pusch[i].needs_pdcch, "Msg3 allocations do not require PDCCH\n"); CONDERROR(ul_result.pusch[i].needs_pdcch, "Msg3 allocations do not require PDCCH\n");
CONDERROR(userinfo.msg3_riv != ul_result.pusch[i].dci.type2_alloc.riv, CONDERROR(userinfo.msg3_riv != ul_result.pusch[i].dci.type2_alloc.riv,
"The Msg3 was not allocated in the expected PRBs.\n"); "The Msg3 was not allocated in the expected PRBs.\n");
userinfo.msg3_tic = tic.tic_tx_ul(); userinfo.msg3_tic = tic_tx_ul;
msg3_count++; msg3_count++;
} }
} }
} else if (expected_msg3_tti < tic.tic_tx_ul()) {
CONDERROR(not userinfo.msg3_tic.is_valid(), "No UL msg3 allocation was made\n");
} }
} }
// Find any Msg4 Allocation /* TEST: Check Msg4 */
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) { if (userinfo.msg3_tic.is_valid() and not userinfo.msg4_tic.is_valid()) {
if (dl_result.data[i].dci.rnti == rnti) { // Msg3 scheduled, but Msg4 not yet scheduled
for (uint32_t j = 0; j < dl_result.data[i].nof_pdu_elems[0]; ++j) { for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
if (dl_result.data[i].pdu[0][j].lcid == srslte::sch_subh::CON_RES_ID) { if (dl_result.data[i].dci.rnti == rnti) {
// ConRes found CONDERROR(tic < userinfo.msg3_tic, "Msg4 cannot be scheduled without Msg3 being tx\n");
CONDERROR(dl_result.data[i].dci.format != SRSLTE_DCI_FORMAT1, "ConRes must be format1\n"); for (uint32_t j = 0; j < dl_result.data[i].nof_pdu_elems[0]; ++j) {
CONDERROR(userinfo.msg4_tic.is_valid(), "Duplicate ConRes CE for the same rnti\n"); if (dl_result.data[i].pdu[0][j].lcid == srslte::sch_subh::CON_RES_ID) {
userinfo.msg4_tic = tic.tic_tx_dl(); // ConRes found
CONDERROR(dl_result.data[i].dci.format != SRSLTE_DCI_FORMAT1, "ConRes must be format1\n");
CONDERROR(userinfo.msg4_tic.is_valid(), "Duplicate ConRes CE for the same rnti\n");
userinfo.msg4_tic = tic_tx_dl;
}
} }
} }
CONDERROR(not userinfo.msg4_tic.is_valid(), "Data allocs are not allowed without first receiving ConRes\n"); }
}
/* TEST: Txs out of place */
if (not userinfo.msg4_tic.is_valid()) {
// Msg4 not yet received by user
for (uint32_t i = 0; i < dl_result.nof_data_elems; ++i) {
CONDERROR(dl_result.data[i].dci.rnti == rnti, "No DL data allocs allowed before Msg4 is scheduled\n");
}
if (userinfo.msg3_tic.is_valid() and userinfo.msg3_tic != tic_tx_ul) {
// Msg3 scheduled. No UL alloc allowed unless it is a newtx (the Msg3 itself)
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
// Needs PDCCH - filters out UL retxs
CONDERROR(ul_result.pusch[i].needs_pdcch and ul_result.pusch[i].dci.rnti == rnti,
"No UL newtxs allowed before user received Msg4\n");
}
} else if (not userinfo.msg3_tic.is_valid()) {
// Not Msg3 sched TTI
for (uint32_t i = 0; i < ul_result.nof_dci_elems; ++i) {
CONDERROR(ul_result.pusch[i].dci.rnti == rnti, "No UL newtxs allowed before user received Msg4\n");
}
} }
} }
} }