From bb38fa71199cbd58f1cddd8bc840c88690df902c Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Wed, 4 Mar 2020 19:06:48 +0000 Subject: [PATCH] fixed tti resetting after tti end --- srsenb/hdr/stack/mac/scheduler_grid.h | 13 +++--- srsenb/src/stack/mac/scheduler_carrier.cc | 9 ++-- srsenb/src/stack/mac/scheduler_grid.cc | 55 +++++++++++++---------- srsenb/src/stack/mac/scheduler_ue.cc | 2 +- srsenb/test/mac/scheduler_test_rand.cc | 3 ++ 5 files changed, 48 insertions(+), 34 deletions(-) diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index d335086de..2f88f4b4d 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -177,6 +177,7 @@ class sf_sched : public dl_sf_sched_itf, public ul_sf_sched_itf { public: struct sf_sched_result { + tti_params_t tti_params{10241}; sched_interface::dl_sched_res_t dl_sched_result; sched_interface::ul_sched_res_t ul_sched_result; rbgmask_t dl_mask; ///< Accumulation of all DL RBG allocations @@ -238,7 +239,7 @@ public: sf_sched(); void init(const sched_cell_params_t& cell_params_); void new_tti(uint32_t tti_rx_, uint32_t start_cfi); - void reset(); + void finish_tti(); // DL alloc methods alloc_outcome_t alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx); @@ -258,17 +259,17 @@ public: // dl_tti_sched itf alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) final; - uint32_t get_tti_tx_dl() const final { return tti_params.tti_tx_dl; } + uint32_t get_tti_tx_dl() const final { return tti_params->tti_tx_dl; } uint32_t get_nof_ctrl_symbols() const final; const rbgmask_t& get_dl_mask() const final { return tti_alloc.get_dl_mask(); } // ul_tti_sched itf alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) final; const prbmask_t& get_ul_mask() const final { return tti_alloc.get_ul_mask(); } - uint32_t get_tti_tx_ul() const final { return tti_params.tti_tx_ul; } + uint32_t get_tti_tx_ul() const final { return tti_params->tti_tx_ul; } // getters - uint32_t get_tti_rx() const { return tti_params.tti_rx; } - const tti_params_t& get_tti_params() const { return tti_params; } + uint32_t get_tti_rx() const { return tti_params->tti_rx; } + const tti_params_t& get_tti_params() const { return *tti_params; } const sf_sched_result& last_sched_result() const { return *last_sf_result; } @@ -301,10 +302,10 @@ private: std::array sched_result_resources = {}; // Next TTI state - tti_params_t tti_params{10241}; sf_sched_result* current_sf_result = nullptr; sched_interface::dl_sched_res_t* dl_sched_result = nullptr; sched_interface::ul_sched_res_t* ul_sched_result = nullptr; + tti_params_t* tti_params = nullptr; // Last subframe scheduler result sf_sched_result* last_sf_result = nullptr; diff --git a/srsenb/src/stack/mac/scheduler_carrier.cc b/srsenb/src/stack/mac/scheduler_carrier.cc index d5536c900..637c2825d 100644 --- a/srsenb/src/stack/mac/scheduler_carrier.cc +++ b/srsenb/src/stack/mac/scheduler_carrier.cc @@ -319,7 +319,7 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) sf_sched* tti_sched = get_sf_sched(tti_rx); // if it is the first time tti is run, reset vars - if (tti_rx != tti_sched->get_tti_rx()) { + if (tti_rx != tti_sched->last_sched_result().tti_params.tti_rx) { uint32_t start_cfi = cc_cfg->sched_cfg->nof_ctrl_symbols; bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0; tti_sched->new_tti(tti_rx, start_cfi); @@ -350,7 +350,7 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) alloc_ul_users(tti_sched); } - /* Select the winner DCI allocation combination */ + /* Select the winner DCI allocation combination, store all the scheduling results */ tti_sched->generate_sched_results(); /* Enqueue Msg3s derived from allocated RARs */ @@ -359,10 +359,13 @@ sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) ra_sched_ptr->sched_msg3(sf_msg3_sched, tti_sched->last_sched_result().dl_sched_result); } - /* clean-up blocked pids */ + /* Reset ue harq pending ack state, clean-up blocked pids */ for (auto& user : *ue_db) { user.second.finish_tti(tti_sched->get_tti_params(), enb_cc_idx); } + + /* Reset sf_sched tti state */ + tti_sched->finish_tti(); } return tti_sched; diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 970842b36..453464240 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -414,11 +414,12 @@ bool sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict) sf_sched::sf_sched() : current_sf_result(&sched_result_resources[0]), - dl_sched_result(&sched_result_resources[0].dl_sched_result), - ul_sched_result(&sched_result_resources[0].ul_sched_result), last_sf_result(&sched_result_resources[1]), log_h(srslte::logmap::get("MAC ")) { + dl_sched_result = ¤t_sf_result->dl_sched_result; + ul_sched_result = ¤t_sf_result->ul_sched_result; + tti_params = ¤t_sf_result->tti_params; } void sf_sched::init(const sched_cell_params_t& cell_params_) @@ -426,26 +427,28 @@ void sf_sched::init(const sched_cell_params_t& cell_params_) cc_cfg = &cell_params_; tti_alloc.init(*cc_cfg); max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - (uint32_t)cc_cfg->cfg.nrb_pucch); - reset(); } void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi) { - tti_params = tti_params_t{tti_rx_}; - tti_alloc.new_tti(tti_params, start_cfi); + if (tti_params->tti_rx != tti_rx_) { + if (tti_params->tti_rx < 10240) { + log_h->warning("expected TTI for the given sf_sched does not match current_tti\n"); + } + *tti_params = tti_params_t{tti_rx_}; + } + tti_alloc.new_tti(*tti_params, start_cfi); + + // setup first prb to be used for msg3 alloc. Account for potential PRACH alloc + last_msg3_prb = cc_cfg->cfg.nrb_pucch; + uint32_t tti_msg3_alloc = TTI_ADD(tti_params->tti_tx_ul, MSG3_DELAY_MS); + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) { + last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6); + } } -void sf_sched::reset() +void sf_sched::finish_tti() { - /* Store last results */ - std::swap(current_sf_result, last_sf_result); - last_sf_result->dl_mask = tti_alloc.get_dl_mask(); - last_sf_result->ul_mask = tti_alloc.get_ul_mask(); - dl_sched_result = ¤t_sf_result->dl_sched_result; - ul_sched_result = ¤t_sf_result->ul_sched_result; - *dl_sched_result = {}; - *ul_sched_result = {}; - // reset internal state bc_allocs.clear(); rar_allocs.clear(); @@ -453,12 +456,14 @@ void sf_sched::reset() ul_data_allocs.clear(); tti_alloc.reset(); - // setup first prb to be used for msg3 alloc - last_msg3_prb = cc_cfg->cfg.nrb_pucch; - uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS); - if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) { - last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6); - } + // set new current_sf_result + current_sf_result = &sched_result_resources[(last_sf_result == &sched_result_resources[0]) ? 1 : 0]; + dl_sched_result = ¤t_sf_result->dl_sched_result; + ul_sched_result = ¤t_sf_result->ul_sched_result; + tti_params = ¤t_sf_result->tti_params; + *dl_sched_result = {}; + *ul_sched_result = {}; + *tti_params = tti_params_t{last_sf_result->tti_params.tti_rx + TTIMOD_SZ}; } bool sf_sched::is_dl_alloc(sched_ue* user) const @@ -685,7 +690,7 @@ bool sf_sched::alloc_phich(sched_ue* user) } uint32_t cell_index = p.second; - ul_harq_proc* h = user->get_ul_harq(tti_params.tti_rx, cell_index); + ul_harq_proc* h = user->get_ul_harq(tti_params->tti_tx_ul, cell_index); /* Indicate PHICH acknowledgment if needed */ if (h->has_pending_ack()) { @@ -969,8 +974,10 @@ void sf_sched::generate_sched_results() set_ul_sched_result(dci_result); - /* Reset all resources */ - reset(); + /* Store sf_sched results for this TTI */ + last_sf_result = current_sf_result; + last_sf_result->dl_mask = tti_alloc.get_dl_mask(); + last_sf_result->ul_mask = tti_alloc.get_ul_mask(); } uint32_t sf_sched::get_nof_ctrl_symbols() const diff --git a/srsenb/src/stack/mac/scheduler_ue.cc b/srsenb/src/stack/mac/scheduler_ue.cc index 5c28ad6c6..d0c33cadd 100644 --- a/srsenb/src/stack/mac/scheduler_ue.cc +++ b/srsenb/src/stack/mac/scheduler_ue.cc @@ -951,7 +951,7 @@ void sched_ue::finish_tti(const tti_params_t& tti_params, uint32_t enb_cc_idx) } uint32_t ue_cc_idx = p.second; - /* Clean-up all the UL Harqs with maxretx == 0 */ + /* Reset pending ACKs and clean-up all the UL Harqs with maxretx == 0 */ get_ul_harq(tti_params.tti_tx_ul, ue_cc_idx)->reset_pending_data(); /* reset PIDs with pending data or blocked */ diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index 840225add..3ec3b4906 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -188,6 +188,9 @@ void sched_tester::before_sched() int sched_tester::process_results() { + const auto* sf_sched = carrier_schedulers[CARRIER_IDX]->get_sf_sched_ptr(tti_info.tti_params.tti_rx); + TESTASSERT(tti_info.tti_params.tti_rx == sf_sched->last_sched_result().tti_params.tti_rx); + test_pdcch_collisions(); TESTASSERT(ue_tester->test_all(0, tti_info.dl_sched_result[CARRIER_IDX], tti_info.ul_sched_result[CARRIER_IDX]) == SRSLTE_SUCCESS);