moved the RAR/Msg3 scheduling to a separate class

This commit is contained in:
Francisco Paisana 2019-10-16 16:55:37 +01:00
parent 388776d1fa
commit c19e0d9423
5 changed files with 280 additions and 212 deletions

View File

@ -35,6 +35,20 @@
namespace srsenb {
namespace sched_utils {
inline bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2)
{
tti %= 10240;
tti1 %= 10240;
tti2 %= 10240;
if (tti1 <= tti2) {
return tti >= tti1 and tti <= tti2;
}
return tti >= tti1 or tti <= tti2;
}
} // namespace sched_utils
/* Caution: User addition (ue_cfg) and removal (ue_rem) are not thread-safe
* Rest of operations are thread-safe
@ -149,8 +163,9 @@ protected:
srslte_regs_t regs;
class bc_sched_t;
class ra_sched_t;
class tti_sched_t : public dl_tti_sched_t, public ul_tti_sched_t
class tti_sched_result_t : public dl_tti_sched_t, public ul_tti_sched_t
{
public:
struct ctrl_alloc_t {
@ -227,10 +242,10 @@ protected:
bool is_dl_alloc(sched_ue* user) const final;
bool is_ul_alloc(sched_ue* user) const final;
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
alloc_outcome_t alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_t::ul_alloc_t::type_t alloc_type,
uint32_t msg3 = 0);
alloc_outcome_t alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_result_t::ul_alloc_t::type_t alloc_type,
uint32_t msg3 = 0);
int generate_format1a(
uint32_t rb_start, uint32_t l_crb, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result);
@ -253,17 +268,15 @@ protected:
};
const static uint32_t nof_sched_ttis = 10;
tti_sched_t tti_scheds[nof_sched_ttis];
tti_sched_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % nof_sched_ttis]; }
tti_sched_result_t tti_scheds[nof_sched_ttis];
tti_sched_result_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % nof_sched_ttis]; }
std::vector<uint8_t> tti_dl_mask;
tti_sched_t* new_tti(uint32_t tti_rx);
void generate_phich(tti_sched_t* tti_sched);
int generate_dl_sched(tti_sched_t* tti_sched);
int generate_ul_sched(tti_sched_t* tti_sched);
void dl_sched_rar(tti_sched_t* tti_sched);
void dl_sched_data(tti_sched_t* tti_sched);
void ul_sched_msg3(tti_sched_t* tti_sched);
tti_sched_result_t* new_tti(uint32_t tti_rx);
void generate_phich(tti_sched_result_t* tti_sched);
int generate_dl_sched(tti_sched_result_t* tti_sched);
int generate_ul_sched(tti_sched_result_t* tti_sched);
void dl_sched_data(tti_sched_result_t* tti_sched);
// Helper methods
template <typename Func>
@ -271,17 +284,6 @@ protected:
std::map<uint16_t, sched_ue> ue_db;
typedef struct {
bool enabled;
uint16_t rnti;
uint32_t L;
uint32_t n_prb;
uint32_t mcs;
} pending_msg3_t;
std::queue<dl_sched_rar_info_t> pending_rars;
pending_msg3_t pending_msg3[TTIMOD_SZ];
// Allowed DCI locations for SIB and RAR per CFI
sched_ue::sched_dci_cce_t common_locations[3];
sched_ue::sched_dci_cce_t rar_locations[3][10];
@ -294,9 +296,8 @@ protected:
prbmask_t prach_mask;
prbmask_t pucch_mask;
uint32_t bc_aggr_level;
uint32_t rar_aggr_level;
std::unique_ptr<bc_sched_t> bc_sched;
std::unique_ptr<ra_sched_t> rar_sched;
uint32_t pdsch_re[10];
uint32_t current_tti;

View File

@ -32,7 +32,7 @@ public:
bc_sched_t(cell_cfg_t* cfg_);
void init(rrc_interface_mac* rrc_);
void dl_sched(tti_sched_t* tti_sched);
void dl_sched(tti_sched_result_t* tti_sched);
void reset();
private:
@ -42,9 +42,9 @@ private:
uint32_t n_tx = 0;
};
void update_si_windows(tti_sched_t* tti_sched);
void alloc_sibs(tti_sched_t* tti_sched);
void alloc_paging(tti_sched_t* tti_sched);
void update_si_windows(tti_sched_result_t* tti_sched);
void alloc_sibs(tti_sched_result_t* tti_sched);
void alloc_paging(tti_sched_result_t* tti_sched);
// args
cell_cfg_t* cfg;
@ -58,6 +58,45 @@ private:
uint32_t bc_aggr_level = 2;
};
class sched::ra_sched_t
{
public:
struct pending_msg3_t {
bool enabled = false;
uint16_t rnti = 0;
uint32_t L = 0;
uint32_t n_prb = 0;
uint32_t mcs = 0;
};
explicit ra_sched_t(cell_cfg_t* cfg_);
void init(srslte::log* log_, std::map<uint16_t, sched_ue>& ue_db_);
void dl_sched(tti_sched_result_t* tti_sched);
void ul_sched(tti_sched_result_t* tti_sched);
int dl_rach_info(dl_sched_rar_info_t rar_info);
void reset();
const pending_msg3_t& find_pending_msg3(uint32_t tti);
private:
struct sched_rar_t {
int buf_rar = 0;
uint16_t rnti = 0;
uint32_t ra_id = 0;
uint32_t rar_tti = 0;
};
// args
srslte::log* log_h = nullptr;
cell_cfg_t* cfg;
std::map<uint16_t, sched_ue>* ue_db = nullptr;
std::queue<dl_sched_rar_info_t> pending_rars;
std::array<pending_msg3_t, TTIMOD_SZ> pending_msg3;
uint32_t tti_tx_dl = 0;
uint32_t rar_aggr_level = 2;
};
} // namespace srsenb
#endif // SRSLTE_SCHEDULER_CTRL_H

View File

@ -38,7 +38,7 @@ namespace srsenb {
* TTI resource Scheduling Methods
*******************************************************/
void sched::tti_sched_t::init(sched* parent_)
void sched::tti_sched_result_t::init(sched* parent_)
{
parent = parent_;
log_h = parent->log_h;
@ -50,7 +50,7 @@ void sched::tti_sched_t::init(sched* parent_)
tti_alloc.init(log_h, &parent->cfg, pdcch_alloc);
}
void sched::tti_sched_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
void sched::tti_sched_result_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
{
tti_alloc.new_tti(tti_rx_, start_cfi);
@ -67,7 +67,7 @@ void sched::tti_sched_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi)
bzero(&ul_sched_result, sizeof(ul_sched_result));
}
bool sched::tti_sched_t::is_dl_alloc(sched_ue* user) const
bool sched::tti_sched_result_t::is_dl_alloc(sched_ue* user) const
{
for (const auto& a : data_allocs) {
if (a.user_ptr == user) {
@ -77,7 +77,7 @@ bool sched::tti_sched_t::is_dl_alloc(sched_ue* user) const
return false;
}
bool sched::tti_sched_t::is_ul_alloc(sched_ue* user) const
bool sched::tti_sched_result_t::is_ul_alloc(sched_ue* user) const
{
for (const auto& a : ul_data_allocs) {
if (a.user_ptr == user) {
@ -87,7 +87,8 @@ bool sched::tti_sched_t::is_ul_alloc(sched_ue* user) const
return false;
}
sched::tti_sched_t::ctrl_code_t sched::tti_sched_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti)
sched::tti_sched_result_t::ctrl_code_t
sched::tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti)
{
ctrl_alloc_t ctrl_alloc{};
@ -115,7 +116,7 @@ sched::tti_sched_t::ctrl_code_t sched::tti_sched_t::alloc_dl_ctrl(uint32_t aggr_
return {ret.first, ctrl_alloc};
}
alloc_outcome_t sched::tti_sched_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx)
alloc_outcome_t sched::tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx)
{
uint32_t sib_len = sibs_cfg[sib_idx].len;
uint32_t rv = get_rvidx(sib_ntx);
@ -138,7 +139,7 @@ alloc_outcome_t sched::tti_sched_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx
return ret.first;
}
alloc_outcome_t sched::tti_sched_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload)
alloc_outcome_t sched::tti_sched_result_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload)
{
ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI);
if (not ret.first) {
@ -154,8 +155,10 @@ alloc_outcome_t sched::tti_sched_t::alloc_paging(uint32_t aggr_lvl, uint32_t pag
return ret.first;
}
sched::tti_sched_t::rar_code_t
sched::tti_sched_t::alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant, uint32_t prach_tti, uint32_t buf_rar)
sched::tti_sched_result_t::rar_code_t sched::tti_sched_result_t::alloc_rar(uint32_t aggr_lvl,
const dl_sched_rar_t& rar_grant,
uint32_t prach_tti,
uint32_t buf_rar)
{
// RA-RNTI = 1 + t_id + f_id
// t_id = index of first subframe specified by PRACH (0<=t_id<10)
@ -176,7 +179,7 @@ sched::tti_sched_t::alloc_rar(uint32_t aggr_lvl, const dl_sched_rar_t& rar_grant
return {ret.first, &rar_allocs.back()};
}
alloc_outcome_t sched::tti_sched_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid)
alloc_outcome_t sched::tti_sched_result_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid)
{
if (is_dl_alloc(user)) {
log_h->warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x\n", user->get_rnti());
@ -200,10 +203,10 @@ alloc_outcome_t sched::tti_sched_t::alloc_dl_user(sched_ue* user, const rbgmask_
return alloc_outcome_t::SUCCESS;
}
alloc_outcome_t sched::tti_sched_t::alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_t::ul_alloc_t::type_t alloc_type,
uint32_t mcs)
alloc_outcome_t sched::tti_sched_result_t::alloc_ul(sched_ue* user,
ul_harq_proc::ul_alloc_t alloc,
tti_sched_result_t::ul_alloc_t::type_t alloc_type,
uint32_t mcs)
{
// Check whether user was already allocated
if (is_ul_alloc(user)) {
@ -229,12 +232,12 @@ alloc_outcome_t sched::tti_sched_t::alloc_ul(sched_ue* use
return alloc_outcome_t::SUCCESS;
}
alloc_outcome_t sched::tti_sched_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc)
alloc_outcome_t sched::tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc)
{
// check whether adaptive/non-adaptive retx/newtx
tti_sched_t::ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
bool has_retx = h->has_pending_retx();
tti_sched_result_t::ul_alloc_t::type_t alloc_type;
ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul());
bool has_retx = h->has_pending_retx();
if (has_retx) {
ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc();
if (prev_alloc.L == alloc.L and prev_alloc.RB_start == prev_alloc.L) {
@ -249,12 +252,12 @@ alloc_outcome_t sched::tti_sched_t::alloc_ul_user(sched_ue* user, ul_harq_proc::
return alloc_ul(user, alloc, alloc_type);
}
alloc_outcome_t sched::tti_sched_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs)
alloc_outcome_t sched::tti_sched_result_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs)
{
return alloc_ul(user, alloc, ul_alloc_t::MSG3, mcs);
}
void sched::tti_sched_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sched::tti_sched_result_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& bc_alloc : bc_allocs) {
sched_interface::dl_sched_bc_t* bc = &dl_sched_result.bc[dl_sched_result.nof_bc_elems];
@ -323,7 +326,7 @@ void sched::tti_sched_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t&
}
}
void sched::tti_sched_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sched::tti_sched_result_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& rar_alloc : rar_allocs) {
sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems];
@ -353,8 +356,7 @@ void sched::tti_sched_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t
// Print RAR allocation result
for (uint32_t i = 0; i < rar->nof_grants; ++i) {
const auto& msg3_grant = rar->msg3_grant[i];
uint32_t pending_tti = (get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ;
uint16_t expected_rnti = parent->pending_msg3[pending_tti].rnti; // FIXME
uint16_t expected_rnti = parent->rar_sched->find_pending_msg3(get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY).rnti;
log_h->info("SCHED: RAR, temp_crnti=0x%x, ra-rnti=%d, rbgs=(%d,%d), dci=(%d,%d), rar_grant_rba=%d, "
"rar_grant_mcs=%d\n",
expected_rnti,
@ -371,7 +373,7 @@ void sched::tti_sched_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t
}
}
void sched::tti_sched_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sched::tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
for (const auto& data_alloc : data_allocs) {
sched_interface::dl_sched_data_t* data = &dl_sched_result.data[dl_sched_result.nof_data_elems];
@ -429,7 +431,7 @@ void sched::tti_sched_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu
}
}
void sched::tti_sched_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
void sched::tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result)
{
/* Set UL data DCI locs and format */
for (const auto& ul_alloc : ul_data_allocs) {
@ -492,7 +494,7 @@ void sched::tti_sched_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t&
}
}
void sched::tti_sched_t::generate_dcis()
void sched::tti_sched_result_t::generate_dcis()
{
/* Pick one of the possible DCI masks */
pdcch_grid_t::alloc_result_t dci_result;
@ -512,12 +514,12 @@ void sched::tti_sched_t::generate_dcis()
set_ul_sched_result(dci_result);
}
uint32_t sched::tti_sched_t::get_nof_ctrl_symbols() const
uint32_t sched::tti_sched_result_t::get_nof_ctrl_symbols() const
{
return tti_alloc.get_cfi() + ((parent->cfg.cell.nof_prb <= 10) ? 1 : 0);
}
int sched::tti_sched_t::generate_format1a(
int sched::tti_sched_result_t::generate_format1a(
uint32_t rb_start, uint32_t l_crb, uint32_t tbs_bytes, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci)
{
/* Calculate I_tbs for this TBS */
@ -568,13 +570,12 @@ int sched::tti_sched_t::generate_format1a(
*
*******************************************************/
sched::sched() :
bc_aggr_level(0),
rar_aggr_level(0),
P(0),
si_n_rbg(0),
rar_n_rbg(0),
nof_rbg(0),
bc_sched(new bc_sched_t{&cfg})
bc_sched(new bc_sched_t{&cfg}),
rar_sched(new ra_sched_t{&cfg})
{
current_tti = 0;
log_h = nullptr;
@ -618,17 +619,18 @@ void sched::init(rrc_interface_mac* rrc_, srslte::log* log)
rrc = rrc_;
bc_sched->init(rrc);
rar_sched->init(log_h, ue_db);
reset();
}
int sched::reset()
{
bzero(pending_msg3, sizeof(pending_msg3_t) * TTIMOD_SZ);
while (not pending_rars.empty()) {
pending_rars.pop();
}
configured = false;
{
std::lock_guard<std::mutex> lock(sched_mutex);
rar_sched->reset();
bc_sched->reset();
}
pthread_rwlock_wrlock(&rwlock);
ue_db.clear();
pthread_rwlock_unlock(&rwlock);
@ -691,10 +693,8 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
}
// Initiate the tti_scheduler for each TTI
for (uint32_t i = 0; i < nof_sched_ttis; ++i) {
pdcch_grid_t pdcch_alloc;
pdcch_alloc.init(log_h, &regs, common_locations, rar_locations);
tti_scheds[i].init(this);
for (tti_sched_result_t& tti_sched : tti_scheds) {
tti_sched.init(this);
}
configured = true;
@ -842,10 +842,8 @@ int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
int sched::dl_rach_info(dl_sched_rar_info_t rar_info)
{
Info("SCHED: New RAR tti=%d, preamble=%d, temp_crnti=0x%x, ta_cmd=%d, msg3_size=%d\n",
rar_info.prach_tti, rar_info.preamble_idx, rar_info.temp_crnti, rar_info.ta_cmd, rar_info.msg3_size);
pending_rars.push(rar_info);
return 0;
std::lock_guard<std::mutex> lock(sched_mutex);
return rar_sched->dl_rach_info(rar_info);
}
int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code)
@ -894,9 +892,9 @@ void sched::tpc_dec(uint16_t rnti)
*
*******************************************************/
sched::tti_sched_t* sched::new_tti(uint32_t tti_rx)
sched::tti_sched_result_t* sched::new_tti(uint32_t tti_rx)
{
tti_sched_t* tti_sched = get_tti_sched(tti_rx);
tti_sched_result_t* tti_sched = get_tti_sched(tti_rx);
// if it is the first time tti is run, reset vars
if (tti_rx != tti_sched->get_tti_rx()) {
@ -932,96 +930,13 @@ sched::tti_sched_t* sched::new_tti(uint32_t tti_rx)
return tti_sched;
}
bool is_in_tti_interval(uint32_t tti, uint32_t tti1, uint32_t tti2)
{
tti %= 10240;
tti1 %= 10240;
tti2 %= 10240;
if (tti1 <= tti2) {
return tti >= tti1 and tti <= tti2;
}
return tti >= tti1 or tti <= tti2;
}
// Schedules RAR
// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we discard it.
void sched::dl_sched_rar(tti_sched_t* tti_sched)
{
// Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit
while (!pending_rars.empty()) {
dl_sched_rar_info_t rar = pending_rars.front();
if (not is_in_tti_interval(tti_sched->get_tti_tx_dl(),
rar.prach_tti + 3,
rar.prach_tti + 3 + cfg.prach_rar_window))
{
if (tti_sched->get_tti_tx_dl() >= rar.prach_tti + 3 + cfg.prach_rar_window) {
log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n",
rar.prach_tti,
cfg.prach_rar_window,
current_tti);
log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n",
rar.prach_tti,
cfg.prach_rar_window,
current_tti);
// Remove from pending queue and get next one if window has passed already
pending_rars.pop();
continue;
}
// If window not yet started do not look for more pending RARs
return;
}
/* Since we do a fixed Msg3 scheduling for all RAR, we can only allocate 1 RAR per TTI.
* If we have enough space in the window, every call to this function we'll allocate 1 pending RAR and associate a
* Msg3 transmission
*/
dl_sched_rar_t rar_grant;
uint32_t L_prb = 3;
uint32_t n_prb = cfg.nrb_pucch>0?cfg.nrb_pucch:2;
bzero(&rar_grant, sizeof(rar_grant));
uint32_t rba = srslte_ra_type2_to_riv(L_prb, n_prb, cfg.cell.nof_prb);
dl_sched_rar_grant_t *grant = &rar_grant.msg3_grant[0];
grant->grant.tpc_pusch = 3;
grant->grant.trunc_mcs = 0;
grant->grant.rba = rba;
grant->data = rar;
rar_grant.nof_grants++;
// Try to schedule DCI + RBGs for RAR Grant
tti_sched_t::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level,
rar_grant,
rar.prach_tti,
7 * rar_grant.nof_grants); //fixme: check RAR size
// If we can allocate, schedule Msg3 and remove from pending
if (!ret.first) {
return;
}
// Schedule Msg3 only if there is a requirement for Msg3 data
uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ;
pending_msg3[pending_tti].enabled = true;
pending_msg3[pending_tti].rnti = rar.temp_crnti; // FIXME
pending_msg3[pending_tti].L = L_prb;
pending_msg3[pending_tti].n_prb = n_prb;
dl_sched_rar_grant_t *last_msg3 = &rar_grant.msg3_grant[rar_grant.nof_grants - 1];
pending_msg3[pending_tti].mcs = last_msg3->grant.trunc_mcs;
Info("SCHED: Allocating Msg3 for rnti=%d at tti=%d\n", rar.temp_crnti, tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY);
// Remove pending RAR and exit
pending_rars.pop();
return;
}
}
void sched::dl_sched_data(tti_sched_t* tti_sched)
void sched::dl_sched_data(tti_sched_result_t* tti_sched)
{
// NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions
uint32_t tti_rx_ack = TTI_RX_ACK(tti_sched->get_tti_rx());
uint32_t pending_tti = tti_rx_ack % TTIMOD_SZ;
if (cfg.cell.nof_prb == 6 and (srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_rx_ack, -1) or
pending_msg3[pending_tti].enabled)) {
uint32_t tti_rx_ack = TTI_RX_ACK(tti_sched->get_tti_rx());
bool msg3_enabled = rar_sched->find_pending_msg3(tti_rx_ack).enabled;
if (cfg.cell.nof_prb == 6 and
(srslte_prach_tti_opportunity_config_fdd(cfg.prach_config, tti_rx_ack, -1) or msg3_enabled)) {
tti_sched->get_dl_mask().fill(0, tti_sched->get_dl_mask().size());
}
@ -1030,18 +945,16 @@ void sched::dl_sched_data(tti_sched_t* tti_sched)
}
// Compute DL scheduler result
int sched::generate_dl_sched(tti_sched_t* tti_sched)
int sched::generate_dl_sched(tti_sched_result_t* tti_sched)
{
/* Initialize variables */
current_tti = tti_sched->get_tti_tx_dl();
bc_aggr_level = 2;
rar_aggr_level = 2;
current_tti = tti_sched->get_tti_tx_dl();
/* Schedule Broadcast data (SIB and paging) */
bc_sched->dl_sched(tti_sched);
/* Schedule RAR */
dl_sched_rar(tti_sched);
rar_sched->dl_sched(tti_sched);
/* Schedule pending RLC data */
dl_sched_data(tti_sched);
@ -1049,7 +962,7 @@ int sched::generate_dl_sched(tti_sched_t* tti_sched)
return 0;
}
void sched::generate_phich(tti_sched_t* tti_sched)
void sched::generate_phich(tti_sched_result_t* tti_sched)
{
// Allocate user PHICHs
uint32_t nof_phich_elems = 0;
@ -1075,31 +988,8 @@ void sched::generate_phich(tti_sched_t* tti_sched)
tti_sched->ul_sched_result.nof_phich_elems = nof_phich_elems;
}
void sched::ul_sched_msg3(tti_sched_t* tti_sched)
{
uint32_t pending_tti = tti_sched->get_tti_tx_ul() % TTIMOD_SZ;
if (not pending_msg3[pending_tti].enabled) {
return;
}
uint16_t rnti = pending_msg3[pending_tti].rnti;
if (ue_db.count(rnti) == 0) {
log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti);
return;
}
sched_ue* user = &ue_db[rnti];
/* Allocate RBGs and HARQ for Msg3 */
ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[pending_tti].n_prb, pending_msg3[pending_tti].L};
if (not tti_sched->alloc_ul_msg3(user, msg3, pending_msg3[pending_tti].mcs)) {
log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L);
return;
}
pending_msg3[pending_tti].enabled = false;
}
// Compute UL scheduler result
int sched::generate_ul_sched(tti_sched_t* tti_sched)
int sched::generate_ul_sched(tti_sched_result_t* tti_sched)
{
/* Initialize variables */
current_tti = tti_sched->get_tti_tx_ul();
@ -1112,7 +1002,7 @@ int sched::generate_ul_sched(tti_sched_t* tti_sched)
}
// Update available allocation if there's a pending RAR
ul_sched_msg3(tti_sched);
rar_sched->ul_sched(tti_sched);
// reserve PRBs for PUCCH
if (cfg.cell.nof_prb != 6 and (ul_mask & pucch_mask).any()) {
@ -1143,7 +1033,7 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result)
uint32_t tti_rx = TTI_SUB(tti, TX_DELAY);
// Compute scheduling Result for tti_rx
tti_sched_t* tti_sched = new_tti(tti_rx);
tti_sched_result_t* tti_sched = new_tti(tti_rx);
// copy result
*sched_result = tti_sched->dl_sched_result;
@ -1159,8 +1049,8 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
}
// Compute scheduling Result for tti_rx
uint32_t tti_rx = (tti + 10240 - 2 * FDD_HARQ_DELAY_MS) % 10240;
tti_sched_t* tti_sched = new_tti(tti_rx);
uint32_t tti_rx = (tti + 10240 - 2 * FDD_HARQ_DELAY_MS) % 10240;
tti_sched_result_t* tti_sched = new_tti(tti_rx);
// Copy results
*sched_result = tti_sched->ul_sched_result;

View File

@ -30,7 +30,7 @@ void sched::bc_sched_t::init(srsenb::rrc_interface_mac* rrc_)
rrc = rrc_;
}
void sched::bc_sched_t::dl_sched(sched::tti_sched_t* tti_sched)
void sched::bc_sched_t::dl_sched(sched::tti_sched_result_t* tti_sched)
{
current_sf_idx = tti_sched->get_sf_idx();
current_sfn = tti_sched->get_sfn();
@ -48,7 +48,7 @@ void sched::bc_sched_t::dl_sched(sched::tti_sched_t* tti_sched)
alloc_paging(tti_sched);
}
void sched::bc_sched_t::update_si_windows(tti_sched_t* tti_sched)
void sched::bc_sched_t::update_si_windows(tti_sched_result_t* tti_sched)
{
uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl();
@ -86,7 +86,7 @@ void sched::bc_sched_t::update_si_windows(tti_sched_t* tti_sched)
}
}
void sched::bc_sched_t::alloc_sibs(tti_sched_t* tti_sched)
void sched::bc_sched_t::alloc_sibs(tti_sched_result_t* tti_sched)
{
for (uint32_t i = 0; i < pending_sibs.size(); i++) {
if (cfg->sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) {
@ -108,7 +108,7 @@ void sched::bc_sched_t::alloc_sibs(tti_sched_t* tti_sched)
}
}
void sched::bc_sched_t::alloc_paging(srsenb::sched::tti_sched_t* tti_sched)
void sched::bc_sched_t::alloc_paging(srsenb::sched::tti_sched_result_t* tti_sched)
{
/* Allocate DCIs and RBGs for paging */
if (rrc != nullptr) {
@ -126,4 +126,141 @@ void sched::bc_sched_t::reset()
}
}
/*******************************************************
* RAR scheduling
*******************************************************/
sched::ra_sched_t::ra_sched_t(cell_cfg_t* cfg_) : cfg(cfg_) {}
void sched::ra_sched_t::init(srslte::log* log_, std::map<uint16_t, sched_ue>& ue_db_)
{
log_h = log_;
ue_db = &ue_db_;
}
// Schedules RAR
// On every call to this function, we schedule the oldest RAR which is still within the window. If outside the window we discard it.
void sched::ra_sched_t::dl_sched(srsenb::sched::tti_sched_result_t* tti_sched)
{
tti_tx_dl = tti_sched->get_tti_tx_dl();
rar_aggr_level = 2;
// Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit
while (!pending_rars.empty()) {
dl_sched_rar_info_t rar = pending_rars.front();
if (not sched_utils::is_in_tti_interval(tti_tx_dl,
rar.prach_tti + 3,
rar.prach_tti + 3 + cfg->prach_rar_window))
{
if (tti_tx_dl >= rar.prach_tti + 3 + cfg->prach_rar_window) {
log_h->console("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n",
rar.prach_tti,
cfg->prach_rar_window,
tti_tx_dl);
log_h->error("SCHED: Could not transmit RAR within the window (RA TTI=%d, Window=%d, Now=%d)\n",
rar.prach_tti,
cfg->prach_rar_window,
tti_tx_dl);
// Remove from pending queue and get next one if window has passed already
pending_rars.pop();
continue;
}
// If window not yet started do not look for more pending RARs
return;
}
/* Since we do a fixed Msg3 scheduling for all RAR, we can only allocate 1 RAR per TTI.
* If we have enough space in the window, every call to this function we'll allocate 1 pending RAR and associate a
* Msg3 transmission
*/
dl_sched_rar_t rar_grant;
uint32_t L_prb = 3;
uint32_t n_prb = cfg->nrb_pucch>0?cfg->nrb_pucch:2;
bzero(&rar_grant, sizeof(rar_grant));
uint32_t rba = srslte_ra_type2_to_riv(L_prb, n_prb, cfg->cell.nof_prb);
dl_sched_rar_grant_t *grant = &rar_grant.msg3_grant[0];
grant->grant.tpc_pusch = 3;
grant->grant.trunc_mcs = 0;
grant->grant.rba = rba;
grant->data = rar;
rar_grant.nof_grants++;
// Try to schedule DCI + RBGs for RAR Grant
tti_sched_result_t::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level,
rar_grant,
rar.prach_tti,
7 * rar_grant.nof_grants); //fixme: check RAR size
// If we can allocate, schedule Msg3 and remove from pending
if (!ret.first) {
return;
}
// Schedule Msg3 only if there is a requirement for Msg3 data
uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ;
pending_msg3[pending_tti].enabled = true;
pending_msg3[pending_tti].rnti = rar.temp_crnti; // FIXME
pending_msg3[pending_tti].L = L_prb;
pending_msg3[pending_tti].n_prb = n_prb;
dl_sched_rar_grant_t *last_msg3 = &rar_grant.msg3_grant[rar_grant.nof_grants - 1];
pending_msg3[pending_tti].mcs = last_msg3->grant.trunc_mcs;
log_h->info("SCHED: Allocating Msg3 for rnti=%d at tti=%d\n", rar.temp_crnti, tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY);
// Remove pending RAR and exit
pending_rars.pop();
return;
}
}
// Schedules Msg3
void sched::ra_sched_t::ul_sched(srsenb::sched::tti_sched_result_t* tti_sched)
{
uint32_t pending_tti = tti_sched->get_tti_tx_ul() % TTIMOD_SZ;
// check if there is a Msg3 to allocate
if (not pending_msg3[pending_tti].enabled) {
return;
}
uint16_t rnti = pending_msg3[pending_tti].rnti;
auto user_it = ue_db->find(rnti);
if (user_it == ue_db->end()) {
log_h->warning("SCHED: Msg3 allocated for user rnti=0x%x that no longer exists\n", rnti);
return;
}
/* Allocate RBGs and HARQ for Msg3 */
ul_harq_proc::ul_alloc_t msg3 = {pending_msg3[pending_tti].n_prb, pending_msg3[pending_tti].L};
if (not tti_sched->alloc_ul_msg3(&user_it->second, msg3, pending_msg3[pending_tti].mcs)) {
log_h->warning("SCHED: Could not allocate msg3 within (%d,%d)\n", msg3.RB_start, msg3.RB_start + msg3.L);
return;
}
pending_msg3[pending_tti].enabled = false;
}
int sched::ra_sched_t::dl_rach_info(dl_sched_rar_info_t rar_info)
{
log_h->info("SCHED: New RAR tti=%d, preamble=%d, temp_crnti=0x%x, ta_cmd=%d, msg3_size=%d\n",
rar_info.prach_tti, rar_info.preamble_idx, rar_info.temp_crnti, rar_info.ta_cmd, rar_info.msg3_size);
pending_rars.push(rar_info);
return 0;
}
void sched::ra_sched_t::reset()
{
for (auto& msg3 : pending_msg3) {
msg3 = {};
}
while (not pending_rars.empty()) {
pending_rars.pop();
}
}
const sched::ra_sched_t::pending_msg3_t& sched::ra_sched_t::find_pending_msg3(uint32_t tti)
{
uint32_t pending_tti = tti % TTIMOD_SZ;
return pending_msg3[pending_tti];
}
} // namespace srsenb

View File

@ -20,6 +20,7 @@
*/
#include "srsenb/hdr/stack/mac/scheduler.h"
#include "srsenb/hdr/stack/mac/scheduler_ctrl.h"
#include "srsenb/hdr/stack/mac/scheduler_ue.h"
#include <algorithm>
#include <random>
@ -177,7 +178,7 @@ struct sched_tester : public srsenb::sched {
uint32_t tti_tx_dl;
uint32_t tti_tx_ul;
uint32_t current_cfi;
pending_msg3_t ul_pending_msg3;
ra_sched_t::pending_msg3_t ul_pending_msg3;
srslte::bounded_bitset<128, true> used_cce;
// std::vector<bool> used_cce;
std::map<uint16_t, tester_user_results> ue_data; ///< stores buffer state of each user
@ -280,7 +281,7 @@ void sched_tester::new_test_tti(uint32_t tti_)
} else {
tti_data.ul_sf_idx = (tti_data.tti_tx_ul + 10240 - FDD_HARQ_DELAY_MS) % 10;
}
tti_data.ul_pending_msg3 = pending_msg3[tti_data.tti_tx_ul % TTIMOD_SZ];
tti_data.ul_pending_msg3 = rar_sched->find_pending_msg3(tti_data.tti_tx_ul);
tti_data.current_cfi = sched_cfg.nof_ctrl_symbols;
tti_data.used_cce.resize(srslte_regs_pdcch_ncce(&regs, tti_data.current_cfi));
tti_data.used_cce.reset();
@ -493,7 +494,7 @@ void sched_tester::assert_no_empty_allocs()
*/
void sched_tester::test_tti_result()
{
tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx);
tti_sched_result_t* tti_sched = get_tti_sched(tti_data.tti_rx);
// Helper Function: checks if there is any collision. If not, fills the mask
auto try_cce_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) {
@ -543,11 +544,11 @@ void sched_tester::test_tti_result()
try_cce_fill(rar.dci.location, "DL RAR");
CondError(rar.tbs == 0, "Allocated RAR process with invalid TBS=%d\n", rar.tbs);
for (uint32_t j = 0; j < rar.nof_grants; ++j) {
const auto& msg3_grant = rar.msg3_grant[j];
uint32_t pending_tti = (tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY) % TTIMOD_SZ;
CondError(not pending_msg3[pending_tti].enabled, "Pending Msg3 should have been set\n");
uint32_t rba =
srslte_ra_type2_to_riv(pending_msg3[pending_tti].L, pending_msg3[pending_tti].n_prb, cfg.cell.nof_prb);
const auto& msg3_grant = rar.msg3_grant[j];
const ra_sched_t::pending_msg3_t& p =
rar_sched->find_pending_msg3(tti_sched->get_tti_tx_dl() + MSG3_DELAY_MS + TX_DELAY);
CondError(not p.enabled, "Pending Msg3 should have been set\n");
uint32_t rba = srslte_ra_type2_to_riv(p.L, p.n_prb, cfg.cell.nof_prb);
CondError(msg3_grant.grant.rba != rba, "Pending Msg3 RBA is not valid\n");
}
}
@ -750,7 +751,7 @@ void sched_tester::test_sibs()
void sched_tester::test_collisions()
{
tti_sched_t* tti_sched = get_tti_sched(tti_data.tti_rx);
tti_sched_result_t* tti_sched = get_tti_sched(tti_data.tti_rx);
srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb);