diff --git a/srsenb/hdr/stack/mac/scheduler.h b/srsenb/hdr/stack/mac/scheduler.h index d9fd62c98..010bf1b9b 100644 --- a/srsenb/hdr/stack/mac/scheduler.h +++ b/srsenb/hdr/stack/mac/scheduler.h @@ -88,16 +88,16 @@ public: { public: /* Virtual methods for user metric calculation */ - virtual void set_params(const sched_params_t& sched_params_) = 0; - virtual void sched_users(std::map& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx) = 0; + virtual void set_params(const sched_params_t& sched_params_) = 0; + virtual void sched_users(std::map& ue_db, dl_sf_sched_itf* tti_sched, uint32_t cc_idx) = 0; }; class metric_ul { public: /* Virtual methods for user metric calculation */ - virtual void set_params(const sched_params_t& sched_params_) = 0; - virtual void sched_users(std::map& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx) = 0; + virtual void set_params(const sched_params_t& sched_params_) = 0; + virtual void sched_users(std::map& ue_db, ul_sf_sched_itf* tti_sched, uint32_t cc_idx) = 0; }; /************************************************************* diff --git a/srsenb/hdr/stack/mac/scheduler_carrier.h b/srsenb/hdr/stack/mac/scheduler_carrier.h index 5e283a8b7..513d50c08 100644 --- a/srsenb/hdr/stack/mac/scheduler_carrier.h +++ b/srsenb/hdr/stack/mac/scheduler_carrier.h @@ -33,26 +33,23 @@ class sched::carrier_sched { public: explicit carrier_sched(rrc_interface_mac* rrc_, std::map* ue_db_, uint32_t enb_cc_idx_); - void reset(); - void carrier_cfg(const sched_params_t& sched_params_); - void set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_); - void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs); - tti_sched_result_t* generate_tti_result(uint32_t tti_rx); - int dl_rach_info(dl_sched_rar_info_t rar_info); + void reset(); + void carrier_cfg(const sched_params_t& sched_params_); + void set_metric(sched::metric_dl* dl_metric_, sched::metric_ul* ul_metric_); + void set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs); + sf_sched* generate_tti_result(uint32_t tti_rx); + int dl_rach_info(dl_sched_rar_info_t rar_info); // getters - const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); } - const tti_sched_result_t* get_tti_sched_view(uint32_t tti_rx) const - { - return &tti_scheds[tti_rx % tti_scheds.size()]; - } + const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); } + const sf_sched* get_sf_sched(uint32_t tti_rx) const { return &sf_scheds[tti_rx % sf_scheds.size()]; } private: - void generate_phich(tti_sched_result_t* tti_sched); + void generate_phich(sf_sched* tti_sched); //! Compute DL scheduler result for given TTI - void alloc_dl_users(tti_sched_result_t* tti_result); + void alloc_dl_users(sf_sched* tti_result); //! Compute UL scheduler result for given TTI - int alloc_ul_users(tti_sched_result_t* tti_sched); + int alloc_ul_users(sf_sched* tti_sched); // args const sched_params_t* sched_params = nullptr; @@ -68,9 +65,9 @@ private: prbmask_t pucch_mask; // TTI result storage and management - std::array tti_scheds; - tti_sched_result_t* get_tti_sched(uint32_t tti_rx) { return &tti_scheds[tti_rx % tti_scheds.size()]; } - std::vector tti_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS + std::array sf_scheds; + sf_sched* get_sf_sched(uint32_t tti_rx) { return &sf_scheds[tti_rx % sf_scheds.size()]; } + std::vector sf_dl_mask; ///< Some TTIs may be forbidden for DL sched due to MBMS std::unique_ptr bc_sched_ptr; std::unique_ptr ra_sched_ptr; @@ -84,7 +81,7 @@ class bc_sched { public: explicit bc_sched(const sched::cell_cfg_t& cfg_, rrc_interface_mac* rrc_); - void dl_sched(tti_sched_result_t* tti_sched); + void dl_sched(sf_sched* tti_sched); void reset(); private: @@ -94,9 +91,9 @@ private: uint32_t n_tx = 0; }; - void update_si_windows(tti_sched_result_t* tti_sched); - void alloc_sibs(tti_sched_result_t* tti_sched); - void alloc_paging(tti_sched_result_t* tti_sched); + void update_si_windows(sf_sched* tti_sched); + void alloc_sibs(sf_sched* tti_sched); + void alloc_paging(sf_sched* tti_sched); // args const sched::cell_cfg_t* cfg; @@ -126,8 +123,8 @@ public: }; explicit ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::map& ue_db_); - void dl_sched(tti_sched_result_t* tti_sched); - void ul_sched(tti_sched_result_t* tti_sched); + void dl_sched(sf_sched* tti_sched); + void ul_sched(sf_sched* tti_sched); int dl_rach_info(dl_sched_rar_info_t rar_info); void reset(); const pending_msg3_t& find_pending_msg3(uint32_t tti) const; diff --git a/srsenb/hdr/stack/mac/scheduler_grid.h b/srsenb/hdr/stack/mac/scheduler_grid.h index 7d54c659a..b2ef120b8 100644 --- a/srsenb/hdr/stack/mac/scheduler_grid.h +++ b/srsenb/hdr/stack/mac/scheduler_grid.h @@ -104,7 +104,7 @@ private: }; //! manages a full TTI grid resources, namely CCE and DL/UL RB allocations -class tti_grid_t +class sf_grid_t { public: struct dl_ctrl_alloc_t { @@ -148,7 +148,7 @@ private: }; //! generic interface used by DL scheduler algorithm -class dl_tti_sched_t +class dl_sf_sched_itf { public: virtual alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) = 0; @@ -159,7 +159,7 @@ public: }; //! generic interface used by UL scheduler algorithm -class ul_tti_sched_t +class ul_sf_sched_itf { public: virtual alloc_outcome_t alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) = 0; @@ -172,7 +172,7 @@ public: * Converts the stored allocations' metadata to the scheduler UL/DL result * Handles the generation of DCI formats */ -class tti_sched_result_t : public dl_tti_sched_t, public ul_tti_sched_t +class sf_sched : public dl_sf_sched_itf, public ul_sf_sched_itf { public: struct ctrl_alloc_t { @@ -249,23 +249,21 @@ public: const tti_params_t& get_tti_params() const { return tti_params; } private: - bool is_dl_alloc(sched_ue* user) const final; - bool is_ul_alloc(sched_ue* user) const final; - ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); - alloc_outcome_t alloc_ul(sched_ue* user, - ul_harq_proc::ul_alloc_t alloc, - tti_sched_result_t::ul_alloc_t::type_t alloc_type, - uint32_t msg3 = 0); - int generate_format1a(uint32_t rb_start, - uint32_t l_crb, - uint32_t tbs, - uint32_t rv, - uint16_t rnti, - srslte_dci_dl_t* dci); - void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); - void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); - void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); - void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + bool is_dl_alloc(sched_ue* user) const final; + bool is_ul_alloc(sched_ue* user) const final; + ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); + alloc_outcome_t + alloc_ul(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, sf_sched::ul_alloc_t::type_t alloc_type, uint32_t msg3 = 0); + int generate_format1a(uint32_t rb_start, + uint32_t l_crb, + uint32_t tbs, + uint32_t rv, + uint16_t rnti, + srslte_dci_dl_t* dci); + void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); + void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result); // consts const sched_params_t* sched_params = nullptr; @@ -274,7 +272,7 @@ private: // internal state tti_params_t tti_params{10241}; - tti_grid_t tti_alloc; + sf_grid_t tti_alloc; std::vector rar_allocs; std::vector bc_allocs; std::vector data_allocs; diff --git a/srsenb/hdr/stack/mac/scheduler_metric.h b/srsenb/hdr/stack/mac/scheduler_metric.h index 6a4d6fc83..5a2a8930d 100644 --- a/srsenb/hdr/stack/mac/scheduler_metric.h +++ b/srsenb/hdr/stack/mac/scheduler_metric.h @@ -32,30 +32,30 @@ class dl_metric_rr : public sched::metric_dl public: void set_params(const sched_params_t& sched_params_) final; - void sched_users(std::map& ue_db, dl_tti_sched_t* tti_sched, uint32_t cc_idx) final; + void sched_users(std::map& ue_db, dl_sf_sched_itf* tti_sched, uint32_t cc_idx) final; private: bool find_allocation(uint32_t nof_rbg, rbgmask_t* rbgmask); dl_harq_proc* allocate_user(sched_ue* user, uint32_t cc_idx); - srslte::log* log_h = nullptr; - dl_tti_sched_t* tti_alloc = nullptr; + srslte::log* log_h = nullptr; + dl_sf_sched_itf* tti_alloc = nullptr; }; class ul_metric_rr : public sched::metric_ul { public: void set_params(const sched_params_t& sched_params_) final; - void sched_users(std::map& ue_db, ul_tti_sched_t* tti_sched, uint32_t cc_idx) final; + void sched_users(std::map& ue_db, ul_sf_sched_itf* tti_sched, uint32_t cc_idx) final; private: bool find_allocation(uint32_t L, ul_harq_proc::ul_alloc_t* alloc); ul_harq_proc* allocate_user_newtx_prbs(sched_ue* user, uint32_t cc_idx); ul_harq_proc* allocate_user_retx_prbs(sched_ue* user, uint32_t cc_idx); - srslte::log* log_h = nullptr; - ul_tti_sched_t* tti_alloc = nullptr; - uint32_t current_tti; + srslte::log* log_h = nullptr; + ul_sf_sched_itf* tti_alloc = nullptr; + uint32_t current_tti; }; } // namespace srsenb diff --git a/srsenb/src/stack/mac/scheduler.cc b/srsenb/src/stack/mac/scheduler.cc index c533b1ab2..0516d1a3f 100644 --- a/srsenb/src/stack/mac/scheduler.cc +++ b/srsenb/src/stack/mac/scheduler.cc @@ -396,7 +396,7 @@ int sched::dl_sched(uint32_t tti, uint32_t cc_idx, sched_interface::dl_sched_res if (cc_idx < carrier_schedulers.size()) { // Compute scheduling Result for tti_rx pthread_rwlock_rdlock(&rwlock); - tti_sched_result_t* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); + sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); pthread_rwlock_unlock(&rwlock); // copy result @@ -418,7 +418,7 @@ int sched::ul_sched(uint32_t tti, uint32_t cc_idx, srsenb::sched_interface::ul_s if (cc_idx < carrier_schedulers.size()) { pthread_rwlock_rdlock(&rwlock); - tti_sched_result_t* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); + sf_sched* tti_sched = carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); pthread_rwlock_unlock(&rwlock); // copy result diff --git a/srsenb/src/stack/mac/scheduler_carrier.cc b/srsenb/src/stack/mac/scheduler_carrier.cc index dd85e4983..15ea388dd 100644 --- a/srsenb/src/stack/mac/scheduler_carrier.cc +++ b/srsenb/src/stack/mac/scheduler_carrier.cc @@ -34,7 +34,7 @@ namespace srsenb { bc_sched::bc_sched(const sched::cell_cfg_t& cfg_, srsenb::rrc_interface_mac* rrc_) : cfg(&cfg_), rrc(rrc_) {} -void bc_sched::dl_sched(tti_sched_result_t* tti_sched) +void bc_sched::dl_sched(sf_sched* tti_sched) { current_sf_idx = tti_sched->get_sf_idx(); current_sfn = tti_sched->get_sfn(); @@ -52,7 +52,7 @@ void bc_sched::dl_sched(tti_sched_result_t* tti_sched) alloc_paging(tti_sched); } -void bc_sched::update_si_windows(tti_sched_result_t* tti_sched) +void bc_sched::update_si_windows(sf_sched* tti_sched) { uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl(); @@ -90,7 +90,7 @@ void bc_sched::update_si_windows(tti_sched_result_t* tti_sched) } } -void bc_sched::alloc_sibs(tti_sched_result_t* tti_sched) +void bc_sched::alloc_sibs(sf_sched* tti_sched) { for (uint32_t i = 0; i < pending_sibs.size(); i++) { if (cfg->sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) { @@ -112,7 +112,7 @@ void bc_sched::alloc_sibs(tti_sched_result_t* tti_sched) } } -void bc_sched::alloc_paging(tti_sched_result_t* tti_sched) +void bc_sched::alloc_paging(sf_sched* tti_sched) { /* Allocate DCIs and RBGs for paging */ if (rrc != nullptr) { @@ -144,7 +144,7 @@ ra_sched::ra_sched(const sched::cell_cfg_t& cfg_, srslte::log* log_, std::mapget_tti_tx_dl(); rar_aggr_level = 2; @@ -188,10 +188,10 @@ void ra_sched::dl_sched(srsenb::tti_sched_result_t* tti_sched) rar_grant.nof_grants++; // Try to schedule DCI + RBGs for RAR Grant - tti_sched_result_t::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level, - rar_grant, - rar.prach_tti, - 7 * rar_grant.nof_grants); // fixme: check RAR size + sf_sched::rar_code_t ret = tti_sched->alloc_rar(rar_aggr_level, + rar_grant, + rar.prach_tti, + 7 * rar_grant.nof_grants); // fixme: check RAR size // If we can allocate, schedule Msg3 and remove from pending if (!ret.first) { @@ -218,7 +218,7 @@ void ra_sched::dl_sched(srsenb::tti_sched_result_t* tti_sched) } // Schedules Msg3 -void ra_sched::ul_sched(tti_sched_result_t* tti_sched) +void ra_sched::ul_sched(sf_sched* tti_sched) { uint32_t pending_tti = tti_sched->get_tti_tx_ul() % TTIMOD_SZ; @@ -283,7 +283,7 @@ sched::carrier_sched::carrier_sched(rrc_interface_mac* rrc_, ue_db(ue_db_), enb_cc_idx(enb_cc_idx_) { - tti_dl_mask.resize(1, 0); + sf_dl_mask.resize(1, 0); } void sched::carrier_sched::reset() @@ -319,7 +319,7 @@ void sched::carrier_sched::carrier_cfg(const sched_params_t& sched_params_) prach_mask.fill(cfg_->prach_freq_offset, cfg_->prach_freq_offset + 6); // Initiate the tti_scheduler for each TTI - for (tti_sched_result_t& tti_sched : tti_scheds) { + for (sf_sched& tti_sched : sf_scheds) { tti_sched.init(*sched_params, enb_cc_idx); } } @@ -332,12 +332,12 @@ void sched::carrier_sched::set_metric(sched::metric_dl* dl_metric_, sched::metri void sched::carrier_sched::set_dl_tti_mask(uint8_t* tti_mask, uint32_t nof_sfs) { - tti_dl_mask.assign(tti_mask, tti_mask + nof_sfs); + sf_dl_mask.assign(tti_mask, tti_mask + nof_sfs); } -tti_sched_result_t* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) +sf_sched* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) { - tti_sched_result_t* tti_sched = get_tti_sched(tti_rx); + sf_sched* tti_sched = get_sf_sched(tti_rx); // if it is the first time tti is run, reset vars if (tti_rx != tti_sched->get_tti_rx()) { @@ -351,7 +351,7 @@ tti_sched_result_t* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) generate_phich(tti_sched); /* Schedule DL control data */ - if (tti_dl_mask[tti_sched->get_tti_tx_dl() % tti_dl_mask.size()] == 0) { + if (sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0) { /* Schedule Broadcast data (SIB and paging) */ bc_sched_ptr->dl_sched(tti_sched); @@ -383,7 +383,7 @@ tti_sched_result_t* sched::carrier_sched::generate_tti_result(uint32_t tti_rx) return tti_sched; } -void sched::carrier_sched::generate_phich(tti_sched_result_t* tti_sched) +void sched::carrier_sched::generate_phich(sf_sched* tti_sched) { // Allocate user PHICHs uint32_t nof_phich_elems = 0; @@ -415,9 +415,9 @@ void sched::carrier_sched::generate_phich(tti_sched_result_t* tti_sched) tti_sched->ul_sched_result.nof_phich_elems = nof_phich_elems; } -void sched::carrier_sched::alloc_dl_users(tti_sched_result_t* tti_result) +void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result) { - if (tti_dl_mask[tti_result->get_tti_tx_dl() % tti_dl_mask.size()] != 0) { + if (sf_dl_mask[tti_result->get_tti_tx_dl() % sf_dl_mask.size()] != 0) { return; } @@ -437,7 +437,7 @@ void sched::carrier_sched::alloc_dl_users(tti_sched_result_t* tti_result) dl_metric->sched_users(*ue_db, tti_result, enb_cc_idx); } -int sched::carrier_sched::alloc_ul_users(tti_sched_result_t* tti_sched) +int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) { uint32_t tti_tx_ul = tti_sched->get_tti_tx_ul(); prbmask_t& ul_mask = tti_sched->get_ul_mask(); diff --git a/srsenb/src/stack/mac/scheduler_grid.cc b/srsenb/src/stack/mac/scheduler_grid.cc index 05962c051..868962c01 100644 --- a/srsenb/src/stack/mac/scheduler_grid.cc +++ b/srsenb/src/stack/mac/scheduler_grid.cc @@ -263,7 +263,7 @@ std::string pdcch_grid_t::result_to_string(bool verbose) const * TTI resource Scheduling Methods *******************************************************/ -void tti_grid_t::init(const sched_params_t& sched_params_, uint32_t cc_idx_) +void sf_grid_t::init(const sched_params_t& sched_params_, uint32_t cc_idx_) { sched_params = &sched_params_; log_h = sched_params->log_h; @@ -275,7 +275,7 @@ void tti_grid_t::init(const sched_params_t& sched_params_, uint32_t cc_idx_) pdcch_alloc.init(*sched_params); } -void tti_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) +void sf_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) { tti_params = &tti_params_; @@ -289,7 +289,7 @@ void tti_grid_t::new_tti(const tti_params_t& tti_params_, uint32_t start_cfi) } //! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging) -alloc_outcome_t tti_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user) +alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user) { // Check RBG collision if ((dl_mask & alloc_mask).any()) { @@ -316,7 +316,7 @@ alloc_outcome_t tti_grid_t::alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, } //! Allocates CCEs and RBs for control allocs. It allocates RBs in a contiguous manner. -tti_grid_t::dl_ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type) +sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type) { rbg_range_t range; range.rbg_start = nof_rbgs - avail_rbg; @@ -339,7 +339,7 @@ tti_grid_t::dl_ctrl_alloc_t tti_grid_t::alloc_dl_ctrl(uint32_t aggr_lvl, alloc_t } //! Allocates CCEs and RBs for a user DL data alloc. -alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask) +alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask) { srslte_dci_format_t dci_format = user->get_dci_format(); uint32_t nof_bits = srslte_dci_format_sizeof(&sched_params->cfg->cell, nullptr, nullptr, dci_format); @@ -347,7 +347,7 @@ alloc_outcome_t tti_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_ return alloc_dl(aggr_level, alloc_type_t::DL_DATA, user_mask, user); } -alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch) +alloc_outcome_t sf_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, bool needs_pdcch) { if (alloc.RB_start + alloc.L > ul_mask.size()) { return alloc_outcome_t::ERROR; @@ -382,7 +382,7 @@ alloc_outcome_t tti_grid_t::alloc_ul_data(sched_ue* user, ul_harq_proc::ul_alloc * TTI resource Scheduling Methods *******************************************************/ -void tti_sched_result_t::init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) +void sf_sched::init(const sched_params_t& sched_params_, uint32_t enb_cc_idx_) { sched_params = &sched_params_; enb_cc_idx = enb_cc_idx_; @@ -390,7 +390,7 @@ void tti_sched_result_t::init(const sched_params_t& sched_params_, uint32_t enb_ tti_alloc.init(*sched_params, 0); } -void tti_sched_result_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) +void sf_sched::new_tti(uint32_t tti_rx_, uint32_t start_cfi) { tti_params = tti_params_t{tti_rx_}; tti_alloc.new_tti(tti_params, start_cfi); @@ -408,7 +408,7 @@ void tti_sched_result_t::new_tti(uint32_t tti_rx_, uint32_t start_cfi) bzero(&ul_sched_result, sizeof(ul_sched_result)); } -bool tti_sched_result_t::is_dl_alloc(sched_ue* user) const +bool sf_sched::is_dl_alloc(sched_ue* user) const { for (const auto& a : data_allocs) { if (a.user_ptr == user) { @@ -418,7 +418,7 @@ bool tti_sched_result_t::is_dl_alloc(sched_ue* user) const return false; } -bool tti_sched_result_t::is_ul_alloc(sched_ue* user) const +bool sf_sched::is_ul_alloc(sched_ue* user) const { for (const auto& a : ul_data_allocs) { if (a.user_ptr == user) { @@ -428,7 +428,7 @@ bool tti_sched_result_t::is_ul_alloc(sched_ue* user) const return false; } -tti_sched_result_t::ctrl_code_t tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti) +sf_sched::ctrl_code_t sf_sched::alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti) { ctrl_alloc_t ctrl_alloc{}; @@ -441,7 +441,7 @@ tti_sched_result_t::ctrl_code_t tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_ } /* Allocate space in the DL RBG and PDCCH grids */ - tti_grid_t::dl_ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type); + sf_grid_t::dl_ctrl_alloc_t ret = tti_alloc.alloc_dl_ctrl(aggr_lvl, alloc_type); if (not ret.outcome) { return {ret.outcome, ctrl_alloc}; } @@ -456,7 +456,7 @@ tti_sched_result_t::ctrl_code_t tti_sched_result_t::alloc_dl_ctrl(uint32_t aggr_ return {ret.outcome, ctrl_alloc}; } -alloc_outcome_t tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) +alloc_outcome_t sf_sched::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx, uint32_t sib_ntx) { uint32_t sib_len = sched_params->cfg->sibs[sib_idx].len; uint32_t rv = sched::get_rvidx(sib_ntx); @@ -479,7 +479,7 @@ alloc_outcome_t tti_sched_result_t::alloc_bc(uint32_t aggr_lvl, uint32_t sib_idx return ret.first; } -alloc_outcome_t tti_sched_result_t::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload) +alloc_outcome_t sf_sched::alloc_paging(uint32_t aggr_lvl, uint32_t paging_payload) { ctrl_code_t ret = alloc_dl_ctrl(aggr_lvl, paging_payload, SRSLTE_PRNTI); if (not ret.first) { @@ -495,10 +495,10 @@ alloc_outcome_t tti_sched_result_t::alloc_paging(uint32_t aggr_lvl, uint32_t pag return ret.first; } -tti_sched_result_t::rar_code_t tti_sched_result_t::alloc_rar(uint32_t aggr_lvl, - const sched_interface::dl_sched_rar_t& rar_grant, - uint32_t prach_tti, - uint32_t buf_rar) +sf_sched::rar_code_t sf_sched::alloc_rar(uint32_t aggr_lvl, + const sched_interface::dl_sched_rar_t& rar_grant, + uint32_t prach_tti, + uint32_t buf_rar) { // RA-RNTI = 1 + t_id + f_id // t_id = index of first subframe specified by PRACH (0<=t_id<10) @@ -519,7 +519,7 @@ tti_sched_result_t::rar_code_t tti_sched_result_t::alloc_rar(uint32_t return {ret.first, &rar_allocs.back()}; } -alloc_outcome_t tti_sched_result_t::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) +alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid) { if (is_dl_alloc(user)) { log_h->warning("SCHED: Attempt to assign multiple harq pids to the same user rnti=0x%x\n", user->get_rnti()); @@ -543,10 +543,10 @@ alloc_outcome_t tti_sched_result_t::alloc_dl_user(sched_ue* user, const rbgmask_ return alloc_outcome_t::SUCCESS; } -alloc_outcome_t tti_sched_result_t::alloc_ul(sched_ue* user, - ul_harq_proc::ul_alloc_t alloc, - tti_sched_result_t::ul_alloc_t::type_t alloc_type, - uint32_t mcs) +alloc_outcome_t sf_sched::alloc_ul(sched_ue* user, + ul_harq_proc::ul_alloc_t alloc, + sf_sched::ul_alloc_t::type_t alloc_type, + uint32_t mcs) { // Check whether user was already allocated if (is_ul_alloc(user)) { @@ -572,12 +572,12 @@ alloc_outcome_t tti_sched_result_t::alloc_ul(sched_ue* return alloc_outcome_t::SUCCESS; } -alloc_outcome_t tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) +alloc_outcome_t sf_sched::alloc_ul_user(sched_ue* user, ul_harq_proc::ul_alloc_t alloc) { // check whether adaptive/non-adaptive retx/newtx - tti_sched_result_t::ul_alloc_t::type_t alloc_type; - ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), user->get_cell_index(enb_cc_idx).second); - bool has_retx = h->has_pending_retx(); + sf_sched::ul_alloc_t::type_t alloc_type; + ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), user->get_cell_index(enb_cc_idx).second); + bool has_retx = h->has_pending_retx(); if (has_retx) { ul_harq_proc::ul_alloc_t prev_alloc = h->get_alloc(); if (prev_alloc.L == alloc.L and prev_alloc.RB_start == prev_alloc.L) { @@ -592,12 +592,12 @@ alloc_outcome_t tti_sched_result_t::alloc_ul_user(sched_ue* user, ul_harq_proc:: return alloc_ul(user, alloc, alloc_type); } -alloc_outcome_t tti_sched_result_t::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs) +alloc_outcome_t sf_sched::alloc_ul_msg3(sched_ue* user, ul_harq_proc::ul_alloc_t alloc, uint32_t mcs) { return alloc_ul(user, alloc, ul_alloc_t::MSG3, mcs); } -void tti_sched_result_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& bc_alloc : bc_allocs) { sched_interface::dl_sched_bc_t* bc = &dl_sched_result.bc[dl_sched_result.nof_bc_elems]; @@ -666,7 +666,7 @@ void tti_sched_result_t::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& } } -void tti_sched_result_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& rar_alloc : rar_allocs) { sched_interface::dl_sched_rar_t* rar = &dl_sched_result.rar[dl_sched_result.nof_rar_elems]; @@ -713,7 +713,7 @@ void tti_sched_result_t::set_rar_sched_result(const pdcch_grid_t::alloc_result_t } } -void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { for (const auto& data_alloc : data_allocs) { sched_interface::dl_sched_data_t* data = &dl_sched_result.data[dl_sched_result.nof_data_elems]; @@ -772,7 +772,7 @@ void tti_sched_result_t::set_dl_data_sched_result(const pdcch_grid_t::alloc_resu } } -void tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) +void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result) { /* Set UL data DCI locs and format */ for (const auto& ul_alloc : ul_data_allocs) { @@ -836,7 +836,7 @@ void tti_sched_result_t::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& } } -void tti_sched_result_t::generate_dcis() +void sf_sched::generate_dcis() { /* Pick one of the possible DCI masks */ pdcch_grid_t::alloc_result_t dci_result; @@ -856,17 +856,17 @@ void tti_sched_result_t::generate_dcis() set_ul_sched_result(dci_result); } -uint32_t tti_sched_result_t::get_nof_ctrl_symbols() const +uint32_t sf_sched::get_nof_ctrl_symbols() const { return tti_alloc.get_cfi() + ((sched_params->cfg->cell.nof_prb <= 10) ? 1 : 0); } -int tti_sched_result_t::generate_format1a(uint32_t rb_start, - uint32_t l_crb, - uint32_t tbs_bytes, - uint32_t rv, - uint16_t rnti, - srslte_dci_dl_t* dci) +int sf_sched::generate_format1a(uint32_t rb_start, + uint32_t l_crb, + uint32_t tbs_bytes, + uint32_t rv, + uint16_t rnti, + srslte_dci_dl_t* dci) { /* Calculate I_tbs for this TBS */ int tbs = tbs_bytes * 8; diff --git a/srsenb/src/stack/mac/scheduler_metric.cc b/srsenb/src/stack/mac/scheduler_metric.cc index e4d968a32..e6fd77203 100644 --- a/srsenb/src/stack/mac/scheduler_metric.cc +++ b/srsenb/src/stack/mac/scheduler_metric.cc @@ -41,7 +41,7 @@ void dl_metric_rr::set_params(const sched_params_t& sched_params_) log_h = sched_params_.log_h; } -void dl_metric_rr::sched_users(std::map& ue_db, dl_tti_sched_t* tti_sched, uint32_t enb_cc_idx) +void dl_metric_rr::sched_users(std::map& ue_db, dl_sf_sched_itf* tti_sched, uint32_t enb_cc_idx) { tti_alloc = tti_sched; @@ -160,7 +160,7 @@ void ul_metric_rr::set_params(const sched_params_t& sched_params_) log_h = sched_params_.log_h; } -void ul_metric_rr::sched_users(std::map& ue_db, ul_tti_sched_t* tti_sched, uint32_t enb_cc_idx) +void ul_metric_rr::sched_users(std::map& ue_db, ul_sf_sched_itf* tti_sched, uint32_t enb_cc_idx) { tti_alloc = tti_sched; current_tti = tti_alloc->get_tti_tx_ul(); diff --git a/srsenb/test/mac/scheduler_test_rand.cc b/srsenb/test/mac/scheduler_test_rand.cc index e26ca0a94..eac76418b 100644 --- a/srsenb/test/mac/scheduler_test_rand.cc +++ b/srsenb/test/mac/scheduler_test_rand.cc @@ -527,7 +527,7 @@ int sched_tester::assert_no_empty_allocs() */ int sched_tester::test_tti_result() { - const srsenb::tti_sched_result_t* tti_sched = carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx); + const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx); // Helper Function: checks if there is any collision. If not, fills the mask auto try_cce_fill = [&](const srslte_dci_location_t& dci_loc, const char* ch) { @@ -588,7 +588,7 @@ int sched_tester::test_tti_result() } /* verify if sched_result "used_cce" coincide with sched "used_cce" */ - auto* tti_alloc = carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx); + auto* tti_alloc = carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx); if (tti_data.used_cce != tti_alloc->get_pdcch_mask()) { std::string mask_str = tti_alloc->get_pdcch_mask().to_string(); TESTERROR("[TESTER] The used_cce do not match: (%s!=%s)\n", mask_str.c_str(), tti_data.used_cce.to_hex().c_str()); @@ -793,7 +793,7 @@ int sched_tester::test_sibs() int sched_tester::test_collisions() { - const srsenb::tti_sched_result_t* tti_sched = carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx); + const srsenb::sf_sched* tti_sched = carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx); srsenb::prbmask_t ul_allocs(cfg.cell.nof_prb); @@ -937,7 +937,7 @@ int sched_tester::test_collisions() rbgmask.reset(i); } } - if (rbgmask != carrier_schedulers[0]->get_tti_sched_view(tti_data.tti_rx)->get_dl_mask()) { + if (rbgmask != carrier_schedulers[0]->get_sf_sched(tti_data.tti_rx)->get_dl_mask()) { TESTERROR("[TESTER] The UL PRB mask and the scheduler result UL mask are not consistent\n"); } return SRSLTE_SUCCESS;