disable srbs UL during handover to avoid UL grants and scheduling requests before the handover is complete

This commit is contained in:
Francisco Paisana 2020-10-28 09:01:31 +00:00
parent f647343f18
commit a865858db3
6 changed files with 41 additions and 26 deletions

View File

@ -296,9 +296,10 @@ public:
virtual int ul_sched(uint32_t tti, uint32_t enb_cc_idx, ul_sched_res_t& sched_result) = 0;
/* Custom */
virtual void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) = 0;
virtual std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) = 0;
virtual int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) = 0;
virtual void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) = 0;
virtual std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) = 0;
virtual std::array<bool, SRSLTE_MAX_CARRIERS> get_scell_activation_mask(uint16_t rnti) = 0;
virtual int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) = 0;
};
} // namespace srsenb

View File

@ -129,11 +129,12 @@ public:
/* Custom functions
*/
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) final;
void tpc_inc(uint16_t rnti);
void tpc_dec(uint16_t rnti);
std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) final;
int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) final;
void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) final;
void tpc_inc(uint16_t rnti);
void tpc_dec(uint16_t rnti);
std::array<int, SRSLTE_MAX_CARRIERS> get_enb_ue_cc_map(uint16_t rnti) final;
std::array<bool, SRSLTE_MAX_CARRIERS> get_scell_activation_mask(uint16_t rnti) final;
int ul_buffer_add(uint16_t rnti, uint32_t lcid, uint32_t bytes) final;
class carrier_sched;

View File

@ -55,6 +55,7 @@ struct cc_sched_ue {
int get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t req_bytes);
const sched_cell_params_t* get_cell_cfg() const { return cell_params; }
uint32_t get_ue_cc_idx() const { return ue_cc_idx; }
void set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi);
int cqi_to_tbs(uint32_t nof_prb, uint32_t nof_re, bool use_tbs_index_alt, bool is_ul, uint32_t* mcs);
cc_st cc_state() const { return cc_state_; }

View File

@ -359,13 +359,13 @@ void sched::tpc_dec(uint16_t rnti)
std::array<int, SRSLTE_MAX_CARRIERS> sched::get_enb_ue_cc_map(uint16_t rnti)
{
std::array<int, SRSLTE_MAX_CARRIERS> ret{};
ret.fill(-1); // -1 for inactive carriers
ret.fill(-1); // -1 for inactive & non-existent carriers
ue_db_access(rnti,
[this, &ret](sched_ue& ue) {
for (size_t enb_cc_idx = 0; enb_cc_idx < carrier_schedulers.size(); ++enb_cc_idx) {
auto p = ue.get_active_cell_index(enb_cc_idx);
if (p.second < SRSLTE_MAX_CARRIERS) {
ret[enb_cc_idx] = p.second;
const cc_sched_ue* cc_ue = ue.find_ue_carrier(enb_cc_idx);
if (cc_ue != nullptr) {
ret[enb_cc_idx] = cc_ue->get_ue_cc_idx();
}
}
},
@ -373,6 +373,20 @@ std::array<int, SRSLTE_MAX_CARRIERS> sched::get_enb_ue_cc_map(uint16_t rnti)
return ret;
}
std::array<bool, SRSLTE_MAX_CARRIERS> sched::get_scell_activation_mask(uint16_t rnti)
{
std::array<int, SRSLTE_MAX_CARRIERS> enb_ue_cc_map = get_enb_ue_cc_map(rnti);
std::array<bool, SRSLTE_MAX_CARRIERS> scell_mask = {};
for (int ue_cc : enb_ue_cc_map) {
if (ue_cc <= 0) {
// inactive or PCell
continue;
}
scell_mask[ue_cc] = true;
}
return scell_mask;
}
/*******************************************************
*
* Main sched functions

View File

@ -450,20 +450,8 @@ void ue::allocate_ce(srslte::sch_pdu* pdu, uint32_t lcid)
break;
case srslte::dl_sch_lcid::SCELL_ACTIVATION:
if (pdu->new_subh()) {
std::array<int, SRSLTE_MAX_CARRIERS> enb_ue_cc_map = sched->get_enb_ue_cc_map(rnti);
std::array<bool, SRSLTE_MAX_CARRIERS> active_scell_list = {};
size_t enb_cc_idx = 0;
for (; enb_cc_idx < enb_ue_cc_map.size(); ++enb_cc_idx) {
if (enb_ue_cc_map[enb_cc_idx] >= 8) {
break;
}
if (enb_ue_cc_map[enb_cc_idx] <= 0) {
// inactive or PCell
continue;
}
active_scell_list[enb_ue_cc_map[enb_cc_idx]] = true;
}
if (enb_cc_idx == enb_ue_cc_map.size() and pdu->get()->set_scell_activation_cmd(active_scell_list)) {
std::array<bool, SRSLTE_MAX_CARRIERS> active_scell_list = sched->get_scell_activation_mask(rnti);
if (pdu->get()->set_scell_activation_cmd(active_scell_list)) {
phy->set_activation_deactivation_scell(rnti, active_scell_list);
// Allocate and initialize Rx/Tx softbuffers for new carriers (exclude PCell)
allocate_cc_buffers(active_scell_list.size() - 1);

View File

@ -120,6 +120,11 @@ int rrc::ue::mac_controller::handle_crnti_ce(uint32_t temp_crnti)
// keep DRBs disabled until RRCReconfComplete is received
set_drb_activation(false);
// Re-activate SRBs UL (needed for ReconfComplete)
for (uint32_t i = rb_id_t::RB_ID_SRB1; i <= rb_id_t::RB_ID_SRB2; ++i) {
current_sched_ue_cfg.ue_bearers[i] = next_sched_ue_cfg.ue_bearers[i];
}
return mac->ue_set_crnti(temp_crnti, rrc_ue->rnti, &current_sched_ue_cfg);
}
@ -264,6 +269,11 @@ void rrc::ue::mac_controller::handle_intraenb_ho_cmd(const asn1::rrc::rrc_conn_r
// Freeze all DRBs. SRBs DL are needed for sending the HO Cmd
set_drb_activation(false);
// Stop any SRB UL (including SRs)
for (uint32_t i = rb_id_t::RB_ID_SRB1; i <= rb_id_t::RB_ID_SRB2; ++i) {
next_sched_ue_cfg.ue_bearers[i].direction = sched_interface::ue_bearer_cfg_t::DL;
}
update_mac(mac_controller::config_tx);
}