diff --git a/lib/include/srslte/adt/bounded_vector.h b/lib/include/srslte/adt/bounded_vector.h index d940347c7..f642f4429 100644 --- a/lib/include/srslte/adt/bounded_vector.h +++ b/lib/include/srslte/adt/bounded_vector.h @@ -107,13 +107,13 @@ public: } T& front() { return (*this)[0]; } const T& front() const { return (*this)[0]; } - T* data() { return &front(); } - const T* data() const { return &front(); } + T* data() { return reinterpret_cast(buffer); } + const T* data() const { return reinterpret_cast(buffer); } // Iterators - iterator begin() { return reinterpret_cast(buffer); } + iterator begin() { return data(); } iterator end() { return begin() + size_; } - const_iterator begin() const { return reinterpret_cast(buffer); } + const_iterator begin() const { return data(); } const_iterator end() const { return begin() + size_; } // Capacity diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index 972256d61..79895bb86 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -19,7 +19,6 @@ #include "srslte/adt/bounded_bitset.h" #include "srslte/adt/circular_array.h" #include "srslte/srslog/srslog.h" -#include #include namespace srsenb { @@ -30,7 +29,7 @@ enum class alloc_result { sch_collision, no_cch_space, no_sch_space, - rnti_inactive, + no_rnti_opportunity, invalid_grant_params, invalid_coderate, no_grant_space, @@ -101,11 +100,6 @@ private: class sf_grid_t { public: - struct dl_ctrl_alloc_t { - alloc_result outcome; - rbg_interval rbg_range; - }; - sf_grid_t() : logger(srslog::fetch_basic_logger("MAC")) {} void init(const sched_cell_params_t& cell_params_); @@ -168,8 +162,6 @@ public: }; struct bc_alloc_t : public ctrl_alloc_t { sched_interface::dl_sched_bc_t bc_grant; - bc_alloc_t() = default; - explicit bc_alloc_t(const ctrl_alloc_t& c) : ctrl_alloc_t(c) {} }; struct dl_alloc_t { size_t dci_idx; @@ -194,7 +186,6 @@ public: uint32_t n_prb = 0; uint32_t mcs = 0; }; - typedef std::pair ctrl_code_t; // Control/Configuration Methods sf_sched(); @@ -206,28 +197,28 @@ public: alloc_result alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs); alloc_result alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar_grant, rbg_interval rbgs, uint32_t nof_grants); bool reserve_dl_rbgs(uint32_t rbg_start, uint32_t rbg_end) { return tti_alloc.reserve_dl_rbgs(rbg_start, rbg_end); } - const std::vector& get_allocated_rars() const { return rar_allocs; } // UL alloc methods alloc_result alloc_msg3(sched_ue* user, const sched_interface::dl_sched_rar_grant_t& rargrant); alloc_result - alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1); - bool reserve_ul_prbs(const prbmask_t& ulmask, bool strict) + alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_type, bool is_msg3 = false, int msg3_mcs = -1); + alloc_result reserve_ul_prbs(const prbmask_t& ulmask, bool strict) { - return tti_alloc.reserve_ul_prbs(ulmask, strict) == alloc_result::success; + return tti_alloc.reserve_ul_prbs(ulmask, strict); } - bool alloc_phich(sched_ue* user); + alloc_result alloc_phich(sched_ue* user); // compute DCIs and generate dl_sched_result/ul_sched_result for a given TTI void generate_sched_results(sched_ue_list& ue_db); - alloc_result alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid); - tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); } - uint32_t get_nof_ctrl_symbols() const; - const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); } - alloc_result alloc_ul_user(sched_ue* user, prb_interval alloc); - const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); } - tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); } + alloc_result alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid); + tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); } + uint32_t get_nof_ctrl_symbols() const; + const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); } + alloc_result alloc_ul_user(sched_ue* user, prb_interval alloc); + const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); } + tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); } + srslte::const_span get_allocated_rars() const { return rar_allocs; } // getters tti_point get_tti_rx() const { return tti_rx; } @@ -237,10 +228,6 @@ public: const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; } private: - void set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result); - void set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result); void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result, sched_ue_list& ue_list); @@ -257,8 +244,8 @@ private: sf_grid_t tti_alloc; srslte::bounded_vector bc_allocs; - std::vector rar_allocs; - std::vector data_allocs; + srslte::bounded_vector rar_allocs; + srslte::bounded_vector data_allocs; srslte::bounded_vector ul_data_allocs; uint32_t last_msg3_prb = 0, max_msg3_prb = 0; diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index 1306e7f71..7f43795d6 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -295,7 +295,7 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info) //! Schedule Msg3 grants in UL based on allocated RARs void ra_sched::ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched) { - const std::vector& alloc_rars = sf_dl_sched->get_allocated_rars(); + srslte::const_span alloc_rars = sf_dl_sched->get_allocated_rars(); for (const auto& rar : alloc_rars) { for (const auto& msg3grant : rar.rar_grant.msg3_grant) { @@ -334,7 +334,7 @@ sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_, sf_dl_mask.resize(1, 0); } -sched::carrier_sched::~carrier_sched() {} +sched::carrier_sched::~carrier_sched() = default; void sched::carrier_sched::reset() { @@ -386,10 +386,9 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r /* Schedule PHICH */ for (auto& ue_pair : *ue_db) { - if (cc_result->ul_sched_result.phich.size() >= MAX_PHICH_LIST) { + if (tti_sched->alloc_phich(ue_pair.second.get()) == alloc_result::no_grant_space) { break; } - tti_sched->alloc_phich(ue_pair.second.get()); } /* Schedule DL control data */ diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 1e75fda7b..d4d983c60 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -29,7 +29,7 @@ const char* to_string(alloc_result result) return "No space available in PUCCH or PDCCH"; case alloc_result::no_sch_space: return "Requested number of PRBs not available"; - case alloc_result::rnti_inactive: + case alloc_result::no_rnti_opportunity: return "rnti cannot be allocated (e.g. already allocated, no data, meas gap collision, carrier inactive, etc.)"; case alloc_result::invalid_grant_params: return "invalid grant arguments (e.g. invalid prb mask)"; @@ -55,8 +55,8 @@ void sf_sched_result::new_tti(tti_point tti_rx_) bool sf_sched_result::is_ul_alloc(uint16_t rnti) const { for (const auto& cc : enb_cc_list) { - for (uint32_t j = 0; j < cc.ul_sched_result.pusch.size(); ++j) { - if (cc.ul_sched_result.pusch[j].dci.rnti == rnti) { + for (const auto& pusch : cc.ul_sched_result.pusch) { + if (pusch.dci.rnti == rnti) { return true; } } @@ -66,8 +66,8 @@ bool sf_sched_result::is_ul_alloc(uint16_t rnti) const bool sf_sched_result::is_dl_alloc(uint16_t rnti) const { for (const auto& cc : enb_cc_list) { - for (uint32_t j = 0; j < cc.dl_sched_result.data.size(); ++j) { - if (cc.dl_sched_result.data[j].dci.rnti == rnti) { + for (const auto& data : cc.dl_sched_result.data) { + if (data.dci.rnti == rnti) { return true; } } @@ -259,7 +259,7 @@ void sf_grid_t::rem_last_alloc_dl(rbg_interval rbgs) alloc_result sf_grid_t::reserve_ul_prbs(prb_interval alloc, bool strict) { if (alloc.stop() > ul_mask.size()) { - return alloc_result::no_sch_space; + return alloc_result::invalid_grant_params; } prbmask_t newmask(ul_mask.size()); @@ -271,10 +271,13 @@ alloc_result sf_grid_t::reserve_ul_prbs(const prbmask_t& prbmask, bool strict) { alloc_result ret = alloc_result::success; if (strict and (ul_mask & prbmask).any()) { - fmt::memory_buffer tmp_buffer; - fmt::format_to(tmp_buffer, "There was a collision in the UL. Current mask={:x}, new mask={:x}", ul_mask, prbmask); - logger.error("%s", srslte::to_c_str(tmp_buffer)); - ret = alloc_result::sch_collision; + if (logger.info.enabled()) { + fmt::memory_buffer tmp_buffer; + fmt::format_to( + tmp_buffer, "There was a collision in the UL. Current mask=0x{:x}, new mask=0x{:x}", ul_mask, prbmask); + logger.info("%s", srslte::to_c_str(tmp_buffer)); + ret = alloc_result::sch_collision; + } } ul_mask |= prbmask; return ret; @@ -325,7 +328,7 @@ void sf_sched::init(const sched_cell_params_t& cell_params_) { cc_cfg = &cell_params_; tti_alloc.init(*cc_cfg); - max_msg3_prb = std::max(6u, cc_cfg->cfg.cell.nof_prb - tti_alloc.get_pucch_width()); + max_msg3_prb = std::max(6U, cc_cfg->cfg.cell.nof_prb - tti_alloc.get_pucch_width()); } void sf_sched::new_tti(tti_point tti_rx_, sf_sched_result* cc_results_) @@ -361,7 +364,7 @@ bool sf_sched::is_ul_alloc(uint16_t rnti) const alloc_result sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx, rbg_interval rbgs) { - if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { + if (bc_allocs.full()) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); return alloc_result::no_grant_space; } @@ -391,7 +394,7 @@ alloc_result sf_sched::alloc_sib(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t s alloc_result sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, rbg_interval rbgs) { - if (bc_allocs.size() >= sched_interface::MAX_BC_LIST) { + if (bc_allocs.full()) { logger.warning("SCHED: Maximum number of Broadcast allocations reached"); return alloc_result::no_grant_space; } @@ -422,7 +425,7 @@ alloc_result sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload, alloc_result sf_sched::alloc_rar(uint32_t aggr_lvl, const pending_rar_t& rar, rbg_interval rbgs, uint32_t nof_grants) { static const uint32_t msg3_nof_prbs = 3; - if (rar_allocs.size() >= sched_interface::MAX_RAR_LIST) { + if (rar_allocs.full()) { logger.info("SCHED: Maximum number of RAR allocations per TTI reached."); return alloc_result::no_grant_space; } @@ -474,22 +477,22 @@ bool is_periodic_cqi_expected(const sched_interface::ue_cfg_t& ue_cfg, tti_point alloc_result sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) { - if (data_allocs.size() >= sched_interface::MAX_DATA_LIST) { + if (data_allocs.full()) { logger.warning("SCHED: Maximum number of DL allocations reached"); return alloc_result::no_grant_space; } if (is_dl_alloc(user->get_rnti())) { logger.warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x", user->get_rnti()); - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } auto* cc = user->find_ue_carrier(cc_cfg->enb_cc_idx); if (cc == nullptr or cc->cc_state() != cc_st::active) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } if (not user->pdsch_enabled(srslte::tti_point{get_tti_rx()}, cc_cfg->enb_cc_idx)) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Check if allocation would cause segmentation @@ -498,7 +501,7 @@ alloc_result sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, // It is newTx rbg_interval r = user->get_required_dl_rbgs(cc_cfg->enb_cc_idx); if (r.start() > user_mask.count()) { - logger.warning("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti()); + logger.debug("SCHED: The number of RBGs allocated to rnti=0x%x will force segmentation", user->get_rnti()); return alloc_result::invalid_grant_params; } } @@ -568,14 +571,14 @@ sf_sched::alloc_ul(sched_ue* user, prb_interval alloc, ul_alloc_t::type_t alloc_ if (is_ul_alloc(user->get_rnti())) { logger.warning("SCHED: Attempt to assign multiple UL grants to the same user rnti=0x%x", user->get_rnti()); - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Check if there is no collision with measGap bool needs_pdcch = alloc_type == ul_alloc_t::ADAPT_RETX or (alloc_type == ul_alloc_t::NEWTX and not is_msg3); if (not user->pusch_enabled(get_tti_rx(), cc_cfg->enb_cc_idx, needs_pdcch)) { logger.debug("SCHED: PDCCH would collide with rnti=0x%x Measurement Gap", user->get_rnti()); - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Allocate RBGs and DCI space @@ -614,20 +617,20 @@ alloc_result sf_sched::alloc_ul_user(sched_ue* user, prb_interval alloc) return alloc_ul(user, alloc, alloc_type, h->is_msg3()); } -bool sf_sched::alloc_phich(sched_ue* user) +alloc_result sf_sched::alloc_phich(sched_ue* user) { using phich_t = sched_interface::ul_sched_phich_t; auto* ul_sf_result = &cc_results->get_cc(cc_cfg->enb_cc_idx)->ul_sched_result; if (ul_sf_result->phich.full()) { logger.warning("SCHED: Maximum number of PHICH allocations has been reached"); - return false; + return alloc_result::no_grant_space; } auto p = user->get_active_cell_index(cc_cfg->enb_cc_idx); if (not p.first) { // user does not support this carrier - return false; + return alloc_result::no_rnti_opportunity; } ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cc_cfg->enb_cc_idx); @@ -637,29 +640,9 @@ bool sf_sched::alloc_phich(sched_ue* user) ul_sf_result->phich.emplace_back(); ul_sf_result->phich.back().rnti = user->get_rnti(); ul_sf_result->phich.back().phich = h->pop_pending_phich() ? phich_t::ACK : phich_t::NACK; - return true; - } - return false; -} - -void sf_sched::set_bc_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result) -{ - for (const auto& bc_alloc : bc_allocs) { - dl_result->bc.emplace_back(bc_alloc.bc_grant); - dl_result->bc.back().dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; - log_broadcast_allocation(dl_result->bc.back(), bc_alloc.rbg_range, *cc_cfg); - } -} - -void sf_sched::set_rar_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, - sched_interface::dl_sched_res_t* dl_result) -{ - for (const auto& rar_alloc : rar_allocs) { - dl_result->rar.emplace_back(rar_alloc.rar_grant); - dl_result->rar.back().dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; - log_rar_allocation(dl_result->rar.back(), rar_alloc.alloc_data.rbg_range); + return alloc_result::success; } + return alloc_result::no_rnti_opportunity; } void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result, @@ -927,9 +910,17 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) cc_result->dl_sched_result.cfi = tti_alloc.get_pdcch_grid().get_cfi(); /* Generate DCI formats and fill sched_result structs */ - set_bc_sched_result(dci_result, &cc_result->dl_sched_result); + for (const auto& bc_alloc : bc_allocs) { + cc_result->dl_sched_result.bc.emplace_back(bc_alloc.bc_grant); + cc_result->dl_sched_result.bc.back().dci.location = dci_result[bc_alloc.dci_idx]->dci_pos; + log_broadcast_allocation(cc_result->dl_sched_result.bc.back(), bc_alloc.rbg_range, *cc_cfg); + } - set_rar_sched_result(dci_result, &cc_result->dl_sched_result); + for (const auto& rar_alloc : rar_allocs) { + cc_result->dl_sched_result.rar.emplace_back(rar_alloc.rar_grant); + cc_result->dl_sched_result.rar.back().dci.location = dci_result[rar_alloc.alloc_data.dci_idx]->dci_pos; + log_rar_allocation(cc_result->dl_sched_result.rar.back(), rar_alloc.alloc_data.rbg_range); + } set_dl_data_sched_result(dci_result, &cc_result->dl_sched_result, ue_db); diff --git a/srsenb/src/stack/mac/schedulers/sched_base.cc b/srsenb/src/stack/mac/schedulers/sched_base.cc index 944e2fc23..2454871eb 100644 --- a/srsenb/src/stack/mac/schedulers/sched_base.cc +++ b/srsenb/src/stack/mac/schedulers/sched_base.cc @@ -146,7 +146,7 @@ alloc_result try_dl_newtx_alloc_greedy(sf_sched& tti_sched, sched_ue& ue, const // If there is no data to transmit, no need to allocate rbg_interval req_rbgs = ue.get_required_dl_rbgs(tti_sched.get_enb_cc_idx()); if (req_rbgs.stop() == 0) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } // Find RBG mask that accommodates pending data @@ -245,7 +245,7 @@ alloc_result try_ul_retx_alloc(sf_sched& tti_sched, sched_ue& ue, const ul_harq_ // Avoid measGaps accounting for PDCCH if (not ue.pusch_enabled(tti_sched.get_tti_rx(), tti_sched.get_enb_cc_idx(), true)) { - return alloc_result::rnti_inactive; + return alloc_result::no_rnti_opportunity; } uint32_t nof_prbs = alloc.length(); alloc = find_contiguous_ul_prbs(nof_prbs, tti_sched.get_ul_mask());