sched: added checks for localized RBG assignment in case of DCI format1A

This commit is contained in:
Francisco 2021-02-15 19:10:14 +00:00 committed by Francisco Paisana
parent 34d4dc3974
commit a7f4dfab2c
9 changed files with 173 additions and 91 deletions

View File

@ -88,13 +88,14 @@ using prbmask_t = srslte::bounded_bitset<100, true>;
struct prb_interval;
struct rbg_interval : public srslte::interval<uint32_t> {
using interval::interval;
rbg_interval(srslte::interval<uint32_t> i) : interval(i) {}
static rbg_interval rbgmask_to_rbgs(const rbgmask_t& mask);
};
//! Struct to express a {min,...,max} range of PRBs
struct prb_interval : public srslte::interval<uint32_t> {
using interval::interval;
prb_interval(srslte::interval<uint32_t> i) : interval(i) {}
static prb_interval rbgs_to_prbs(const rbg_interval& rbgs, uint32_t cell_nof_prb);
static prb_interval riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1);
};

View File

@ -36,7 +36,10 @@ struct alloc_outcome_t {
NOF_RB_INVALID,
PUCCH_COLLISION,
MEASGAP_COLLISION,
ALREADY_ALLOC
ALREADY_ALLOC,
NO_DATA,
INVALID_PRBMASK,
INVALID_CARRIER
};
result_enum result = ERROR;
alloc_outcome_t() = default;

View File

@ -142,6 +142,12 @@ uint32_t get_aggr_level(uint32_t nof_bits,
uint32_t cell_nof_prb,
bool use_tbs_index_alt);
/*******************************************************
* RB mask helper functions
*******************************************************/
bool is_contiguous(const rbgmask_t& mask);
/*******************************************************
* sched_interface helper functions
*******************************************************/

View File

@ -35,12 +35,13 @@ protected:
/**************** Helper methods ****************/
/**
* Finds a bitmask of available RBG resources
* @param L Size of the requested DL RBGs
* @param current_mask input RBG bitmask where to search for available RBGs
* @return bitmask of found RBGs
* Finds a bitmask of available RBG resources for a given UE in a greedy fashion
* @param ue UE being allocated
* @param enb_cc_idx carrier index
* @param current_mask bitmask of occupied RBGs, where to search for available RBGs
* @return bitmask of found RBGs. If a valid mask wasn't found, bitmask::size() == 0
*/
rbgmask_t find_available_dl_rbgs(uint32_t L, const rbgmask_t& current_mask);
rbgmask_t compute_user_rbgmask_greedy(sched_ue& ue, uint32_t enb_cc_idx, const rbgmask_t& current_mask);
/**
* Finds a range of L contiguous PRBs that are empty
@ -57,6 +58,8 @@ const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched);
/// Helper methods to allocate resources in subframe
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h);
alloc_outcome_t
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask = nullptr);
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h);
} // namespace srsenb

View File

@ -38,6 +38,14 @@ const char* alloc_outcome_t::to_string() const
return "measgap_collision";
case ALREADY_ALLOC:
return "already allocated";
case NO_DATA:
return "no pending data to allocate";
case INVALID_PRBMASK:
return "invalid rbg mask";
case INVALID_CARRIER:
return "invalid eNB carrier";
default:
break;
}
return "unknown error";
}
@ -770,7 +778,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
auto* cc = user->find_ue_carrier(cc_cfg->enb_cc_idx);
if (cc == nullptr or cc->cc_state() != cc_st::active) {
return alloc_outcome_t::ERROR;
return alloc_outcome_t::INVALID_CARRIER;
}
if (not user->pdsch_enabled(srslte::tti_point{get_tti_rx()}, cc_cfg->enb_cc_idx)) {
return alloc_outcome_t::MEASGAP_COLLISION;
@ -782,11 +790,17 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
// It is newTx
rbg_interval r = user->get_required_dl_rbgs(cc_cfg->enb_cc_idx);
if (r.start() > user_mask.count()) {
logger.warning("The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti());
logger.warning("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti());
return alloc_outcome_t::NOF_RB_INVALID;
}
}
srslte_dci_format_t dci_format = user->get_dci_format();
if (dci_format == SRSLTE_DCI_FORMAT1A and not is_contiguous(user_mask)) {
logger.warning("SCHED: Can't use distributed RBGs for DCI format 1A");
return alloc_outcome_t::INVALID_PRBMASK;
}
// Check if there is space in the PUCCH for HARQ ACKs
const sched_interface::ue_cfg_t& ue_cfg = user->get_ue_cfg();
std::bitset<SRSLTE_MAX_CARRIERS> scells = user->scell_activation_mask();

View File

@ -182,6 +182,11 @@ prb_interval prb_interval::riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_
return {rb_start, rb_start + l_crb};
}
bool is_contiguous(const rbgmask_t& mask)
{
return rbg_interval::rbgmask_to_rbgs(mask).length() == mask.count();
}
/*******************************************************
* Sched Params
*******************************************************/

View File

@ -14,16 +14,42 @@
namespace srsenb {
rbgmask_t find_available_dl_rbgs(uint32_t max_rbgs, const rbgmask_t& current_mask)
/*********************************
* Common UL/DL Helper methods
********************************/
template <size_t N>
srslte::interval<uint32_t> find_contiguous_interval(const srslte::bounded_bitset<N, true>& in_mask, uint32_t max_size)
{
if (max_rbgs == 0 or current_mask.all()) {
return rbgmask_t{};
srslte::interval<uint32_t> interv, max_interv;
for (uint32_t n = 0; n < in_mask.size() and interv.length() < max_size; n++) {
if (not in_mask.test(n) and interv.empty()) {
// new interval
interv.set(n, n + 1);
} else if (not in_mask.test(n)) {
// extend current interval
interv.resize_by(1);
} else if (not interv.empty()) {
// reset interval
max_interv = interv.length() > max_interv.length() ? interv : max_interv;
interv = {};
}
}
// 1's for free rbgs
rbgmask_t localmask = ~(current_mask);
return interv.length() > max_interv.length() ? interv : max_interv;
}
/****************************
* DL Helper methods
***************************/
rbgmask_t find_available_rb_mask(const rbgmask_t& in_mask, uint32_t max_size)
{
// 1's for free RBs
rbgmask_t localmask = ~(in_mask);
uint32_t i = 0, nof_alloc = 0;
for (; i < localmask.size() and nof_alloc < max_rbgs; ++i) {
for (; i < localmask.size() and nof_alloc < max_size; ++i) {
if (localmask.test(i)) {
nof_alloc++;
}
@ -32,42 +58,17 @@ rbgmask_t find_available_dl_rbgs(uint32_t max_rbgs, const rbgmask_t& current_mas
return localmask;
}
prb_interval find_contiguous_ul_prbs(uint32_t L, const prbmask_t& current_mask)
rbgmask_t compute_user_rbgmask_greedy(uint32_t max_nof_rbgs, bool is_contiguous, const rbgmask_t& current_mask)
{
prb_interval prb_interv, prb_interv2;
for (uint32_t n = 0; n < current_mask.size() and prb_interv.length() < L; n++) {
if (not current_mask.test(n) and prb_interv.length() == 0) {
// new interval
prb_interv.set(n, n + 1);
} else if (not current_mask.test(n)) {
// extend current interval
prb_interv.resize_by(1);
} else if (prb_interv.length() > 0) {
// reset interval
prb_interv2 = prb_interv.length() > prb_interv2.length() ? prb_interv : prb_interv2;
prb_interv = {};
}
// Allocate enough RBs that accommodate pending data
rbgmask_t newtx_mask(current_mask.size());
if (is_contiguous) {
rbg_interval interv = find_contiguous_interval(current_mask, max_nof_rbgs);
newtx_mask.fill(interv.start(), interv.stop());
} else {
newtx_mask = find_available_rb_mask(current_mask, max_nof_rbgs);
}
prb_interv = prb_interv2.length() > prb_interv.length() ? prb_interv2 : prb_interv;
if (prb_interv.empty()) {
return prb_interv;
}
// Make sure L is allowed by SC-FDMA modulation
prb_interv2 = prb_interv;
while (not srslte_dft_precoding_valid_prb(prb_interv.length()) and prb_interv.stop() < current_mask.size() and
not current_mask.test(prb_interv.stop())) {
prb_interv.resize_by(1);
}
if (not srslte_dft_precoding_valid_prb(prb_interv.length())) {
// if length increase failed, try to decrease
prb_interv = prb_interv2;
prb_interv.resize_by(-1);
while (not srslte_dft_precoding_valid_prb(prb_interv.length()) and not prb_interv.empty()) {
prb_interv.resize_by(-1);
}
}
return prb_interv;
return newtx_mask;
}
int get_ue_cc_idx_if_pdsch_enabled(const sched_ue& user, sf_sched* tti_sched)
@ -104,6 +105,87 @@ const dl_harq_proc* get_dl_newtx_harq(sched_ue& user, sf_sched* tti_sched)
return user.get_empty_dl_harq(tti_sched->get_tti_tx_dl(), tti_sched->get_enb_cc_idx());
}
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h)
{
// Try to reuse the same mask
rbgmask_t retx_mask = h.get_rbgmask();
alloc_outcome_t code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
if (code == alloc_outcome_t::SUCCESS or code == alloc_outcome_t::DCI_COLLISION) {
return code;
}
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A;
retx_mask = compute_user_rbgmask_greedy(nof_rbg, is_contiguous_alloc, tti_sched.get_dl_mask());
if (retx_mask.count() == nof_rbg) {
return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
}
return alloc_outcome_t::RB_COLLISION;
}
alloc_outcome_t
try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h, rbgmask_t* result_mask)
{
if (result_mask != nullptr) {
*result_mask = {};
}
// If all RBGs are occupied, the next steps can be shortcut
const rbgmask_t& current_mask = tti_sched.get_dl_mask();
if (current_mask.all()) {
return alloc_outcome_t::RB_COLLISION;
}
// If there is no data to transmit, no need to allocate
rbg_interval req_rbgs = ue.get_required_dl_rbgs(tti_sched.get_enb_cc_idx());
if (req_rbgs.stop() == 0) {
return alloc_outcome_t::NO_DATA;
}
// Find RBG mask that accommodates pending data
bool is_contiguous_alloc = ue.get_dci_format() == SRSLTE_DCI_FORMAT1A;
rbgmask_t newtxmask = compute_user_rbgmask_greedy(req_rbgs.stop(), is_contiguous_alloc, current_mask);
if (newtxmask.none() or newtxmask.count() < req_rbgs.start()) {
return alloc_outcome_t::RB_COLLISION;
}
// empty RBGs were found. Attempt allocation
alloc_outcome_t ret = tti_sched.alloc_dl_user(&ue, newtxmask, h.get_id());
if (ret == alloc_outcome_t::SUCCESS and result_mask != nullptr) {
*result_mask = newtxmask;
}
return ret;
}
/*****************
* UL Helpers
****************/
prb_interval find_contiguous_ul_prbs(uint32_t L, const prbmask_t& current_mask)
{
prb_interval prb_interv = find_contiguous_interval(current_mask, L);
if (prb_interv.empty()) {
return prb_interv;
}
// Make sure L is allowed by SC-FDMA modulation
prb_interval prb_interv2 = prb_interv;
while (not srslte_dft_precoding_valid_prb(prb_interv.length()) and prb_interv.stop() < current_mask.size() and
not current_mask.test(prb_interv.stop())) {
prb_interv.resize_by(1);
}
if (not srslte_dft_precoding_valid_prb(prb_interv.length())) {
// if length increase failed, try to decrease
prb_interv = prb_interv2;
prb_interv.resize_by(-1);
while (not srslte_dft_precoding_valid_prb(prb_interv.length()) and not prb_interv.empty()) {
prb_interv.resize_by(-1);
}
}
return prb_interv;
}
int get_ue_cc_idx_if_pusch_enabled(const sched_ue& user, sf_sched* tti_sched, bool needs_pdcch)
{
// Do not allocate a user multiple times in the same tti
@ -139,24 +221,6 @@ const ul_harq_proc* get_ul_newtx_harq(sched_ue& user, sf_sched* tti_sched)
return h->is_empty() ? h : nullptr;
}
alloc_outcome_t try_dl_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const dl_harq_proc& h)
{
// Try to reuse the same mask
rbgmask_t retx_mask = h.get_rbgmask();
alloc_outcome_t code = tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
if (code == alloc_outcome_t::SUCCESS or code == alloc_outcome_t::DCI_COLLISION) {
return code;
}
// If previous mask does not fit, find another with exact same number of rbgs
size_t nof_rbg = retx_mask.count();
retx_mask = find_available_dl_rbgs(nof_rbg, tti_sched.get_dl_mask());
if (retx_mask.count() == nof_rbg) {
return tti_sched.alloc_dl_user(&ue, retx_mask, h.get_id());
}
return alloc_outcome_t::RB_COLLISION;
}
alloc_outcome_t try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_proc& h)
{
// If can schedule the same mask, do it

View File

@ -78,20 +78,13 @@ uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
return ue_ctxt.dl_retx_h->get_tbs(0) + ue_ctxt.dl_retx_h->get_tbs(1);
}
}
// There is space in PDCCH and an available DL HARQ
if (code != alloc_outcome_t::DCI_COLLISION and ue_ctxt.dl_newtx_h != nullptr) {
rbg_interval req_rbgs = ue.get_required_dl_rbgs(cc_cfg->enb_cc_idx);
// Check if there is an empty harq for the newtx
if (req_rbgs.stop() == 0) {
return 0;
}
// Allocate resources based on pending data
rbgmask_t newtx_mask = find_available_dl_rbgs(req_rbgs.stop(), tti_sched->get_dl_mask());
if (newtx_mask.count() >= req_rbgs.start()) {
// empty RBGs were found
code = tti_sched->alloc_dl_user(&ue, newtx_mask, ue_ctxt.dl_newtx_h->get_id());
if (code == alloc_outcome_t::SUCCESS) {
return ue.get_expected_dl_bitrate(cc_cfg->enb_cc_idx, newtx_mask.count()) * tti_duration_ms / 8;
}
rbgmask_t alloc_mask;
code = try_dl_newtx_alloc_greedy(*tti_sched, ue, *ue_ctxt.dl_newtx_h, &alloc_mask);
if (code == alloc_outcome_t::SUCCESS) {
return ue.get_expected_dl_bitrate(cc_cfg->enb_cc_idx, alloc_mask.count()) * tti_duration_ms / 8;
}
}
if (code == alloc_outcome_t::DCI_COLLISION) {

View File

@ -69,20 +69,13 @@ void sched_time_rr::sched_dl_newtxs(std::map<uint16_t, sched_ue>& ue_db, sf_sche
if (user.enb_to_ue_cc_idx(cc_cfg->enb_cc_idx) < 0) {
continue;
}
const dl_harq_proc* h = get_dl_newtx_harq(user, tti_sched);
rbg_interval req_rbgs = user.get_required_dl_rbgs(cc_cfg->enb_cc_idx);
const dl_harq_proc* h = get_dl_newtx_harq(user, tti_sched);
// Check if there is an empty harq for the newtx
if (h == nullptr or req_rbgs.stop() == 0) {
if (h == nullptr) {
continue;
}
// Allocate resources based on pending data
rbgmask_t newtx_mask = find_available_dl_rbgs(req_rbgs.stop(), tti_sched->get_dl_mask());
if (newtx_mask.count() >= req_rbgs.start()) {
// empty RBGs were found
alloc_outcome_t code = tti_sched->alloc_dl_user(&user, newtx_mask, h->get_id());
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for DL tx for rnti=0x%x", user.get_rnti());
}
if (try_dl_newtx_alloc_greedy(*tti_sched, user, *h) == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for DL tx for rnti=0x%x", user.get_rnti());
}
}
}