diff --git a/lib/include/srsran/common/slot_point.h b/lib/include/srsran/common/slot_point.h index 18db410cb..70477312e 100644 --- a/lib/include/srsran/common/slot_point.h +++ b/lib/include/srsran/common/slot_point.h @@ -31,6 +31,11 @@ class slot_point public: slot_point() : numerology_(NOF_NUMEROLOGIES), count_(0) {} + slot_point(uint8_t numerology, uint32_t count) : numerology_(numerology), count_(count) + { + srsran_assert(numerology < NOF_NUMEROLOGIES, "Invalid numerology idx=%d passed", (int)numerology); + srsran_assert(count < nof_slots_per_hf(), "Invalid slot count=%d passed", (int)count); + } slot_point(uint8_t numerology, uint16_t sfn_val, uint8_t slot) : numerology_(numerology), count_(slot + sfn_val * nof_slots_per_frame()) { @@ -47,7 +52,7 @@ public: uint8_t nof_slots_per_frame() const { return nof_slots_per_subframe() * NOF_SUBFRAMES_PER_FRAME; } uint16_t sfn() const { return count_ / nof_slots_per_frame(); } - uint16_t sf_idx() const { return slot_idx() / nof_slots_per_subframe(); } + uint16_t subframe_idx() const { return slot_idx() / nof_slots_per_subframe(); } uint8_t slot_idx() const { return count_ % nof_slots_per_frame(); } uint8_t numerology_idx() const { return numerology_; } uint32_t to_uint() const { return count_; } @@ -56,11 +61,7 @@ public: void clear() { numerology_ = NOF_NUMEROLOGIES; } // operators - bool operator==(const slot_point& other) const - { - srsran_assert(numerology_idx() == other.numerology_idx(), "Comparing slots of different numerologies"); - return other.count_ == count_; - } + bool operator==(const slot_point& other) const { return other.count_ == count_ and other.numerology_ == numerology_; } bool operator!=(const slot_point& other) const { return not(*this == other); } bool operator<(const slot_point& other) const { @@ -163,8 +164,9 @@ struct formatter { namespace srsenb { -using slot_point = srsran::slot_point; +using slot_point = srsran::slot_point; +using slot_interval = srsran::slot_interval; -} +} // namespace srsenb #endif // SRSRAN_SLOT_POINT_H diff --git a/lib/test/common/tti_point_test.cc b/lib/test/common/tti_point_test.cc index cb52feb6e..71c98ed00 100644 --- a/lib/test/common/tti_point_test.cc +++ b/lib/test/common/tti_point_test.cc @@ -73,7 +73,7 @@ void test_nr_slot_type() srsran::slot_point slot1; TESTASSERT(not slot1.valid()); srsran::slot_point slot2{0, 1, 5}; - TESTASSERT(slot2.valid() and slot2.numerology_idx() == 0 and slot2.slot_idx() == 5 and slot2.sf_idx() == 5 and + TESTASSERT(slot2.valid() and slot2.numerology_idx() == 0 and slot2.slot_idx() == 5 and slot2.slot_idx() == 5 and slot2.sfn() == 1); srsran::slot_point slot3{slot2}; TESTASSERT(slot3 == slot2); diff --git a/srsenb/hdr/stack/mac/nr/sched_nr.h b/srsenb/hdr/stack/mac/nr/sched_nr.h index 9e82c82eb..f186119a7 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr.h @@ -17,7 +17,7 @@ #include "sched_nr_interface.h" #include "sched_nr_ue.h" #include "srsran/adt/pool/cached_alloc.h" -#include "srsran/common/tti_point.h" +#include "srsran/common/slot_point.h" #include extern "C" { #include "srsran/config.h" @@ -43,13 +43,13 @@ public: void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override; void ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc) override; - void ul_sr_info(tti_point tti_rx, uint16_t rnti) override; + void ul_sr_info(slot_point slot_rx, uint16_t rnti) override; - int get_dl_sched(tti_point pdsch_tti, uint32_t cc, dl_sched_t& result) override; - int get_ul_sched(tti_point pusch_tti, uint32_t cc, ul_sched_t& result) override; + int get_dl_sched(slot_point pdsch_tti, uint32_t cc, dl_sched_t& result) override; + int get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_sched_t& result) override; private: - int generate_slot_result(tti_point pdcch_tti, uint32_t cc); + int generate_slot_result(slot_point pdcch_tti, uint32_t cc); void ue_cfg_impl(uint16_t rnti, const ue_cfg_t& cfg); // args diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_cell.h b/srsenb/hdr/stack/mac/nr/sched_nr_cell.h index fcf38564a..3efcf5649 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_cell.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_cell.h @@ -24,7 +24,7 @@ using dl_sched_rar_info_t = sched_nr_interface::dl_sched_rar_info_t; struct pending_rar_t { uint16_t ra_rnti = 0; - tti_point prach_tti; + slot_point prach_slot; srsran::bounded_vector msg3_grant; }; diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_harq.h b/srsenb/hdr/stack/mac/nr/sched_nr_harq.h index 326a50f5a..a077f849f 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_harq.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_harq.h @@ -15,7 +15,7 @@ #include "sched_nr_cfg.h" #include "srsenb/hdr/stack/mac/nr/harq_softbuffer.h" -#include "srsran/common/tti_point.h" +#include "srsran/common/slot_point.h" #include namespace srsenb { @@ -31,23 +31,30 @@ public: return std::all_of(tb.begin(), tb.end(), [](const tb_t& t) { return not t.active; }); } bool empty(uint32_t tb_idx) const { return not tb[tb_idx].active; } - bool has_pending_retx(tti_point tti_rx) const { return not empty() and not tb[0].ack_state and tti_ack <= tti_rx; } + bool has_pending_retx(slot_point slot_rx) const + { + return not empty() and not tb[0].ack_state and slot_ack <= slot_rx; + } uint32_t nof_retx() const { return tb[0].n_rtx; } uint32_t max_nof_retx() const { return max_retx; } uint32_t tbs() const { return tb[0].tbs; } uint32_t ndi() const { return tb[0].ndi; } uint32_t mcs() const { return tb[0].mcs; } const prb_grant& prbs() const { return prbs_; } - tti_point harq_tti_ack() const { return tti_ack; } + slot_point harq_slot_ack() const { return slot_ack; } bool ack_info(uint32_t tb_idx, bool ack); - void new_tti(tti_point tti_rx); + void new_slot(slot_point slot_rx); void reset(); - bool - new_tx(tti_point tti_tx, tti_point tti_ack, const prb_grant& grant, uint32_t mcs, uint32_t tbs, uint32_t max_retx); - bool new_retx(tti_point tti_tx, tti_point tti_ack, const prb_grant& grant); - bool new_retx(tti_point tti_tx, tti_point tti_ack); + bool new_tx(slot_point slot_tx, + slot_point slot_ack, + const prb_grant& grant, + uint32_t mcs, + uint32_t tbs, + uint32_t max_retx); + bool new_retx(slot_point slot_tx, slot_point slot_ack, const prb_grant& grant); + bool new_retx(slot_point slot_tx, slot_point slot_ack); // NOTE: Has to be used before first tx is dispatched bool set_tbs(uint32_t tbs); @@ -65,8 +72,8 @@ private: }; uint32_t max_retx = 1; - tti_point tti_tx; - tti_point tti_ack; + slot_point slot_tx; + slot_point slot_ack; prb_grant prbs_; std::array tb; }; @@ -113,18 +120,18 @@ class harq_entity { public: explicit harq_entity(uint32_t nprb, uint32_t nof_harq_procs = SCHED_NR_MAX_HARQ); - void new_tti(tti_point tti_rx_); + void new_slot(slot_point slot_rx_); void dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { dl_harqs[pid].ack_info(tb_idx, ack); } void ul_crc_info(uint32_t pid, bool ack) { ul_harqs[pid].ack_info(0, ack); } dl_harq_proc* find_pending_dl_retx() { - return find_dl([this](const dl_harq_proc& h) { return h.has_pending_retx(tti_rx); }); + return find_dl([this](const dl_harq_proc& h) { return h.has_pending_retx(slot_rx); }); } ul_harq_proc* find_pending_ul_retx() { - return find_ul([this](const ul_harq_proc& h) { return h.has_pending_retx(tti_rx); }); + return find_ul([this](const ul_harq_proc& h) { return h.has_pending_retx(slot_rx); }); } dl_harq_proc* find_empty_dl_harq() { @@ -149,7 +156,7 @@ private: return (it == ul_harqs.end()) ? nullptr : &(*it); } - tti_point tti_rx; + slot_point slot_rx; std::vector dl_harqs; std::vector ul_harqs; }; diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_interface.h b/srsenb/hdr/stack/mac/nr/sched_nr_interface.h index ad3b9efcf..91b4e0d02 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_interface.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_interface.h @@ -18,7 +18,7 @@ #include "srsran/adt/optional.h" #include "srsran/adt/span.h" #include "srsran/common/phy_cfg_nr.h" -#include "srsran/common/tti_point.h" +#include "srsran/common/slot_point.h" #include "srsran/interfaces/gnb_interfaces.h" #include "srsran/phy/phch/dci_nr.h" @@ -85,7 +85,7 @@ public: uint32_t ta_cmd; uint16_t temp_crnti; uint32_t msg3_size; - uint32_t prach_tti; + uint32_t prach_slot; }; ///// Sched Result ///// @@ -96,12 +96,12 @@ public: virtual ~sched_nr_interface() = default; virtual int cell_cfg(srsran::const_span ue_cfg) = 0; virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0; - virtual int get_dl_sched(tti_point tti_rx, uint32_t cc, dl_sched_t& result) = 0; - virtual int get_ul_sched(tti_point tti_rx, uint32_t cc, ul_sched_t& result) = 0; + virtual int get_dl_sched(slot_point slot_rx, uint32_t cc, dl_sched_t& result) = 0; + virtual int get_ul_sched(slot_point slot_rx, uint32_t cc, ul_sched_t& result) = 0; virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0; virtual void ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc) = 0; - virtual void ul_sr_info(tti_point, uint16_t rnti) = 0; + virtual void ul_sr_info(slot_point, uint16_t rnti) = 0; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h b/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h index 0b81d0dcd..ba452a0e8 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h @@ -61,8 +61,8 @@ struct bwp_slot_grid { struct bwp_res_grid { bwp_res_grid(const bwp_params& bwp_cfg_); - bwp_slot_grid& operator[](tti_point tti) { return slots[tti.to_uint() % slots.capacity()]; }; - const bwp_slot_grid& operator[](tti_point tti) const { return slots[tti.to_uint() % slots.capacity()]; }; + bwp_slot_grid& operator[](slot_point tti) { return slots[tti.to_uint() % slots.capacity()]; }; + const bwp_slot_grid& operator[](slot_point tti) const { return slots[tti.to_uint() % slots.capacity()]; }; uint32_t id() const { return cfg->bwp_id; } uint32_t nof_prbs() const { return cfg->cfg.rb_width; } @@ -81,7 +81,7 @@ class bwp_slot_allocator public: explicit bwp_slot_allocator(bwp_res_grid& bwp_grid_); - void new_slot(tti_point pdcch_tti_) { pdcch_tti = pdcch_tti_; } + void new_slot(slot_point pdcch_slot_) { pdcch_slot = pdcch_slot_; } alloc_result alloc_rar_and_msg3(uint32_t aggr_idx, const pending_rar_t& rar, @@ -91,7 +91,7 @@ public: alloc_result alloc_pdsch(slot_ue& ue, const prb_grant& dl_grant); alloc_result alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask); - tti_point get_pdcch_tti() const { return pdcch_tti; } + slot_point get_pdcch_tti() const { return pdcch_slot; } const bwp_res_grid& res_grid() const { return bwp_grid; } const bwp_params& cfg; @@ -102,7 +102,7 @@ private: srslog::basic_logger& logger; bwp_res_grid& bwp_grid; - tti_point pdcch_tti; + slot_point pdcch_slot; }; } // namespace sched_nr_impl diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_ue.h b/srsenb/hdr/stack/mac/nr/sched_nr_ue.h index 974d69449..535a7f405 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_ue.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_ue.h @@ -30,25 +30,25 @@ class slot_ue { public: slot_ue() = default; - explicit slot_ue(uint16_t rnti_, tti_point tti_rx_, uint32_t cc); + explicit slot_ue(uint16_t rnti_, slot_point slot_rx_, uint32_t cc); slot_ue(slot_ue&&) noexcept = default; slot_ue& operator=(slot_ue&&) noexcept = default; bool empty() const { return rnti == SCHED_NR_INVALID_RNTI; } void release() { rnti = SCHED_NR_INVALID_RNTI; } - uint16_t rnti = SCHED_NR_INVALID_RNTI; - tti_point tti_rx; - uint32_t cc = SCHED_NR_MAX_CARRIERS; + uint16_t rnti = SCHED_NR_INVALID_RNTI; + slot_point slot_rx; + uint32_t cc = SCHED_NR_MAX_CARRIERS; // UE parameters common to all sectors bool pending_sr; // UE parameters that are sector specific const bwp_ue_cfg* cfg = nullptr; - tti_point pdcch_tti; - tti_point pdsch_tti; - tti_point pusch_tti; - tti_point uci_tti; + slot_point pdcch_slot; + slot_point pdsch_slot; + slot_point pusch_slot; + slot_point uci_slot; uint32_t dl_cqi; uint32_t ul_cqi; dl_harq_proc* h_dl = nullptr; @@ -59,8 +59,8 @@ class ue_carrier { public: ue_carrier(uint16_t rnti, const ue_cfg_t& cfg, const sched_cell_params& cell_params_); - void new_tti(tti_point pdcch_tti, const ue_cfg_t& uecfg_); - slot_ue try_reserve(tti_point pdcch_tti); + void new_slot(slot_point pdcch_slot, const ue_cfg_t& uecfg_); + slot_ue try_reserve(slot_point pdcch_slot); const uint16_t rnti; const uint32_t cc; @@ -81,12 +81,12 @@ class ue public: ue(uint16_t rnti, const ue_cfg_t& cfg, const sched_params& sched_cfg_); - slot_ue try_reserve(tti_point pdcch_tti, uint32_t cc); + slot_ue try_reserve(slot_point pdcch_slot, uint32_t cc); void set_cfg(const ue_cfg_t& cfg); const ue_cfg_t& cfg() const { return ue_cfg; } - void ul_sr_info(tti_point tti_rx) { pending_sr = true; } + void ul_sr_info(slot_point slot_rx) { pending_sr = true; } bool has_ca() const { return ue_cfg.carriers.size() > 1; } uint32_t pcell_cc() const { return ue_cfg.carriers[0].cc; } diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_worker.h b/srsenb/hdr/stack/mac/nr/sched_nr_worker.h index a08d4df8e..631f53537 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_worker.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_worker.h @@ -37,10 +37,10 @@ public: explicit slot_cc_worker(serv_cell_manager& sched); - void start(tti_point pdcch_tti, ue_map_t& ue_db_); + void start(slot_point pdcch_slot, ue_map_t& ue_db_); void run(); void finish(); - bool running() const { return tti_rx.is_valid(); } + bool running() const { return slot_rx.valid(); } /// Enqueue feedback directed at a given UE in a given cell void enqueue_cc_feedback(uint16_t rnti, feedback_callback_t fdbk); @@ -57,7 +57,7 @@ private: serv_cell_manager& cell; srslog::basic_logger& logger; - tti_point tti_rx; + slot_point slot_rx; bwp_slot_allocator bwp_alloc; // Process of UE cell-specific feedback @@ -76,7 +76,7 @@ class sched_worker_manager struct slot_worker_ctxt { std::mutex slot_mutex; // lock of all workers of the same slot. std::condition_variable cvar; - tti_point tti_rx; + slot_point slot_rx; int nof_workers_waiting = 0; std::atomic worker_count{0}; // variable shared across slot_cc_workers std::vector workers; @@ -90,7 +90,7 @@ public: sched_worker_manager(sched_worker_manager&&) = delete; ~sched_worker_manager(); - void run_slot(tti_point tti_tx, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res); + void run_slot(slot_point slot_tx, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res); void enqueue_event(uint16_t rnti, srsran::move_callback ev); void enqueue_cc_feedback(uint16_t rnti, uint32_t cc, slot_cc_worker::feedback_callback_t fdbk) @@ -99,7 +99,7 @@ public: } private: - bool save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res); + bool save_sched_result(slot_point pdcch_slot, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res); const sched_params& cfg; ue_map_t& ue_db; @@ -124,7 +124,7 @@ private: std::mutex slot_mutex; std::condition_variable cvar; - tti_point current_tti; + slot_point current_slot; std::atomic worker_count{0}; // variable shared across slot_cc_workers std::vector > cc_worker_list; }; diff --git a/srsenb/src/stack/mac/nr/sched_nr.cc b/srsenb/src/stack/mac/nr/sched_nr.cc index 702210928..da25e2862 100644 --- a/srsenb/src/stack/mac/nr/sched_nr.cc +++ b/srsenb/src/stack/mac/nr/sched_nr.cc @@ -34,40 +34,40 @@ public: } } - dl_sched_t& add_dl_result(tti_point tti, uint32_t cc) + dl_sched_t& add_dl_result(slot_point tti, uint32_t cc) { if (not has_dl_result(tti, cc)) { - results[tti.to_uint()][cc].tti_dl = tti; - results[tti.to_uint()][cc].dl_res = {}; + results[tti.to_uint()][cc].slot_dl = tti; + results[tti.to_uint()][cc].dl_res = {}; } return results[tti.to_uint()][cc].dl_res; } - ul_sched_t& add_ul_result(tti_point tti, uint32_t cc) + ul_sched_t& add_ul_result(slot_point tti, uint32_t cc) { if (not has_ul_result(tti, cc)) { - results[tti.to_uint()][cc].tti_ul = tti; - results[tti.to_uint()][cc].ul_res = {}; + results[tti.to_uint()][cc].slot_ul = tti; + results[tti.to_uint()][cc].ul_res = {}; } return results[tti.to_uint()][cc].ul_res; } - bool has_dl_result(tti_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].tti_dl == tti; } + bool has_dl_result(slot_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].slot_dl == tti; } - bool has_ul_result(tti_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].tti_ul == tti; } + bool has_ul_result(slot_point tti, uint32_t cc) const { return results[tti.to_uint()][cc].slot_ul == tti; } - dl_sched_t pop_dl_result(tti_point tti, uint32_t cc) + dl_sched_t pop_dl_result(slot_point tti, uint32_t cc) { if (has_dl_result(tti, cc)) { - results[tti.to_uint()][cc].tti_dl.reset(); + results[tti.to_uint()][cc].slot_dl.clear(); return results[tti.to_uint()][cc].dl_res; } return {}; } - ul_sched_t pop_ul_result(tti_point tti, uint32_t cc) + ul_sched_t pop_ul_result(slot_point tti, uint32_t cc) { if (has_ul_result(tti, cc)) { - results[tti.to_uint()][cc].tti_ul.reset(); + results[tti.to_uint()][cc].slot_ul.clear(); return results[tti.to_uint()][cc].ul_res; } return {}; @@ -75,8 +75,8 @@ public: private: struct slot_result_t { - tti_point tti_dl; - tti_point tti_ul; + slot_point slot_dl; + slot_point slot_ul; dl_sched_t dl_res; ul_sched_t ul_res; }; @@ -126,7 +126,7 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg) } /// Generate {tti,cc} scheduling decision -int sched_nr::generate_slot_result(tti_point pdcch_tti, uint32_t cc) +int sched_nr::generate_slot_result(slot_point pdcch_tti, uint32_t cc) { // Copy results to intermediate buffer dl_sched_t& dl_res = pending_results->add_dl_result(pdcch_tti, cc); @@ -138,16 +138,16 @@ int sched_nr::generate_slot_result(tti_point pdcch_tti, uint32_t cc) return SRSRAN_SUCCESS; } -int sched_nr::get_dl_sched(tti_point tti_tx, uint32_t cc, dl_sched_t& result) +int sched_nr::get_dl_sched(slot_point slot_tx, uint32_t cc, dl_sched_t& result) { - if (not pending_results->has_dl_result(tti_tx, cc)) { - generate_slot_result(tti_tx, cc); + if (not pending_results->has_dl_result(slot_tx, cc)) { + generate_slot_result(slot_tx, cc); } - result = pending_results->pop_dl_result(tti_tx, cc); + result = pending_results->pop_dl_result(slot_tx, cc); return SRSRAN_SUCCESS; } -int sched_nr::get_ul_sched(tti_point pusch_tti, uint32_t cc, ul_sched_t& result) +int sched_nr::get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_sched_t& result) { if (not pending_results->has_ul_result(pusch_tti, cc)) { // sched result hasn't been generated @@ -169,9 +169,9 @@ void sched_nr::ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc) sched_workers->enqueue_cc_feedback(rnti, cc, [pid, crc](ue_carrier& ue_cc) { ue_cc.harq_ent.ul_crc_info(pid, crc); }); } -void sched_nr::ul_sr_info(tti_point tti_rx, uint16_t rnti) +void sched_nr::ul_sr_info(slot_point slot_rx, uint16_t rnti) { - sched_workers->enqueue_event(rnti, [this, rnti, tti_rx]() { ue_db[rnti]->ul_sr_info(tti_rx); }); + sched_workers->enqueue_event(rnti, [this, rnti, slot_rx]() { ue_db[rnti]->ul_sr_info(slot_rx); }); } #define VERIFY_INPUT(cond, msg, ...) \ diff --git a/srsenb/src/stack/mac/nr/sched_nr_cell.cc b/srsenb/src/stack/mac/nr/sched_nr_cell.cc index 8c5e1a26e..9c30de42c 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_cell.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_cell.cc @@ -55,9 +55,9 @@ alloc_result ra_sched::allocate_pending_rar(bwp_slot_allocator& slot_grid, void ra_sched::run_slot(bwp_slot_allocator& slot_grid, slot_ue_map_t& slot_ues) { static const uint32_t PRACH_RAR_OFFSET = 3; - tti_point pdcch_tti = slot_grid.get_pdcch_tti(); - tti_point msg3_tti = pdcch_tti + bwp_cfg->pusch_ra_list[0].msg3_delay; - if (not slot_grid.res_grid()[msg3_tti].is_ul) { + slot_point pdcch_slot = slot_grid.get_pdcch_tti(); + slot_point msg3_slot = pdcch_slot + bwp_cfg->pusch_ra_list[0].msg3_delay; + if (not slot_grid.res_grid()[msg3_slot].is_ul) { return; } @@ -67,16 +67,16 @@ void ra_sched::run_slot(bwp_slot_allocator& slot_grid, slot_ue_map_t& slot_ues) // In case of RAR outside RAR window: // - if window has passed, discard RAR // - if window hasn't started, stop loop, as RARs are ordered by TTI - tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET, - rar.prach_tti + PRACH_RAR_OFFSET + bwp_cfg->cfg.rar_window_size}; - if (not rar_window.contains(pdcch_tti)) { - if (pdcch_tti >= rar_window.stop()) { + slot_interval rar_window{rar.prach_slot + PRACH_RAR_OFFSET, + rar.prach_slot + PRACH_RAR_OFFSET + bwp_cfg->cfg.rar_window_size}; + if (not rar_window.contains(pdcch_slot)) { + if (pdcch_slot >= rar_window.stop()) { fmt::memory_buffer str_buffer; fmt::format_to(str_buffer, "SCHED: Could not transmit RAR within the window (RA={}, Window={}, RAR={}", - rar.prach_tti, + rar.prach_slot, rar_window, - pdcch_tti); + pdcch_slot); srsran::console("%s\n", srsran::to_c_str(str_buffer)); logger.warning("%s", srsran::to_c_str(str_buffer)); it = pending_rars.erase(it); @@ -116,7 +116,7 @@ void ra_sched::run_slot(bwp_slot_allocator& slot_grid, slot_ue_map_t& slot_ues) int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info) { logger.info("SCHED: New PRACH tti=%d, preamble=%d, temp_crnti=0x%x, ta_cmd=%d, msg3_size=%d", - rar_info.prach_tti, + rar_info.prach_slot, rar_info.preamble_idx, rar_info.temp_crnti, rar_info.ta_cmd, @@ -125,11 +125,11 @@ int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info) // RA-RNTI = 1 + t_id + f_id // t_id = index of first subframe specified by PRACH (0<=t_id<10) // f_id = index of the PRACH within subframe, in ascending order of freq domain (0<=f_id<6) (for FDD, f_id=0) - uint16_t ra_rnti = 1 + (uint16_t)(rar_info.prach_tti % 10u); + uint16_t ra_rnti = 1 + (uint16_t)(rar_info.prach_slot % 10u); // find pending rar with same RA-RNTI for (pending_rar_t& r : pending_rars) { - if (r.prach_tti.to_uint() == rar_info.prach_tti and ra_rnti == r.ra_rnti) { + if (r.prach_slot.to_uint() == rar_info.prach_slot and ra_rnti == r.ra_rnti) { if (r.msg3_grant.size() >= sched_interface::MAX_RAR_LIST) { logger.warning("PRACH ignored, as the the maximum number of RAR grants per tti has been reached"); return SRSRAN_ERROR; @@ -141,8 +141,8 @@ int ra_sched::dl_rach_info(const dl_sched_rar_info_t& rar_info) // create new RAR pending_rar_t p; - p.ra_rnti = ra_rnti; - p.prach_tti = tti_point{rar_info.prach_tti}; + p.ra_rnti = ra_rnti; + p.prach_slot = slot_point{0, rar_info.prach_slot}; p.msg3_grant.push_back(rar_info); pending_rars.push_back(p); diff --git a/srsenb/src/stack/mac/nr/sched_nr_harq.cc b/srsenb/src/stack/mac/nr/sched_nr_harq.cc index 928523347..513b5da31 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_harq.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_harq.cc @@ -27,9 +27,9 @@ bool harq_proc::ack_info(uint32_t tb_idx, bool ack) return true; } -void harq_proc::new_tti(tti_point tti_rx) +void harq_proc::new_slot(slot_point slot_rx) { - if (has_pending_retx(tti_rx) and nof_retx() + 1 >= max_nof_retx()) { + if (has_pending_retx(slot_rx) and nof_retx() + 1 >= max_nof_retx()) { tb[0].active = false; } } @@ -43,8 +43,8 @@ void harq_proc::reset() tb[0].tbs = std::numeric_limits::max(); } -bool harq_proc::new_tx(tti_point tti_tx_, - tti_point tti_ack_, +bool harq_proc::new_tx(slot_point slot_tx_, + slot_point slot_ack_, const prb_grant& grant, uint32_t mcs, uint32_t tbs, @@ -55,8 +55,8 @@ bool harq_proc::new_tx(tti_point tti_tx_, } reset(); max_retx = max_retx_; - tti_tx = tti_tx_; - tti_ack = tti_ack_; + slot_tx = slot_tx_; + slot_ack = slot_ack_; prbs_ = grant; tb[0].ndi = !tb[0].ndi; tb[0].mcs = mcs; @@ -74,27 +74,27 @@ bool harq_proc::set_tbs(uint32_t tbs) return true; } -bool harq_proc::new_retx(tti_point tti_tx_, tti_point tti_ack_, const prb_grant& grant) +bool harq_proc::new_retx(slot_point slot_tx_, slot_point slot_ack_, const prb_grant& grant) { if (grant.is_alloc_type0() != prbs_.is_alloc_type0() or (grant.is_alloc_type0() and grant.rbgs().count() != prbs_.rbgs().count()) or (grant.is_alloc_type1() and grant.prbs().length() == prbs_.prbs().length())) { return false; } - if (new_retx(tti_tx_, tti_ack_)) { + if (new_retx(slot_tx_, slot_ack_)) { prbs_ = grant; return true; } return false; } -bool harq_proc::new_retx(tti_point tti_tx_, tti_point tti_ack_) +bool harq_proc::new_retx(slot_point slot_tx_, slot_point slot_ack_) { if (empty()) { return false; } - tti_tx = tti_tx_; - tti_ack = tti_ack_; + slot_tx = slot_tx_; + slot_ack = slot_ack_; tb[0].ack_state = false; tb[0].n_rtx++; return true; @@ -111,14 +111,14 @@ harq_entity::harq_entity(uint32_t nprb, uint32_t nof_harq_procs) } } -void harq_entity::new_tti(tti_point tti_rx_) +void harq_entity::new_slot(slot_point slot_rx_) { - tti_rx = tti_rx_; + slot_rx = slot_rx_; for (harq_proc& dl_h : dl_harqs) { - dl_h.new_tti(tti_rx); + dl_h.new_slot(slot_rx); } for (harq_proc& ul_h : ul_harqs) { - ul_h.new_tti(tti_rx); + ul_h.new_slot(slot_rx); } } diff --git a/srsenb/src/stack/mac/nr/sched_nr_helpers.cc b/srsenb/src/stack/mac/nr/sched_nr_helpers.cc index 8e54fdfb4..5f33527be 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_helpers.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_helpers.cc @@ -67,9 +67,9 @@ void fill_dl_dci_ue_fields(const slot_ue& ue, fill_dci_common(ue, bwp_cfg, dci); if (dci.ctx.format == srsran_dci_format_nr_1_0) { - dci.harq_feedback = ue.cfg->phy().harq_ack.dl_data_to_ul_ack[ue.pdsch_tti.sf_idx()] - 1; + dci.harq_feedback = ue.cfg->phy().harq_ack.dl_data_to_ul_ack[ue.pdsch_slot.slot_idx()] - 1; } else { - dci.harq_feedback = ue.pdsch_tti.sf_idx(); + dci.harq_feedback = ue.pdsch_slot.slot_idx(); } } diff --git a/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc b/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc index cdcf4f630..e10cd61f0 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc @@ -72,9 +72,9 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint32_t { static const uint32_t msg3_nof_prbs = 3, m = 0; - bwp_slot_grid& bwp_pdcch_slot = bwp_grid[pdcch_tti]; - tti_point msg3_tti = pdcch_tti + cfg.pusch_ra_list[m].msg3_delay; - bwp_slot_grid& bwp_msg3_slot = bwp_grid[msg3_tti]; + bwp_slot_grid& bwp_pdcch_slot = bwp_grid[pdcch_slot]; + slot_point msg3_slot = pdcch_slot + cfg.pusch_ra_list[m].msg3_delay; + bwp_slot_grid& bwp_msg3_slot = bwp_grid[msg3_slot]; alloc_result ret = verify_pusch_space(bwp_msg3_slot, nullptr); if (ret != alloc_result::success) { return ret; @@ -129,11 +129,11 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint32_t const int mcs = 0, max_harq_msg3_retx = 4; int dai = 0; srsran_slot_cfg_t slot_cfg; - slot_cfg.idx = msg3_tti.sf_idx(); + slot_cfg.idx = msg3_slot.slot_idx(); for (const auto& grant : rar.msg3_grant) { slot_ue& ue = ues[grant.temp_crnti]; bool success = ue.h_ul->new_tx( - msg3_tti, msg3_tti, prb_interval{last_msg3, last_msg3 + msg3_nof_prbs}, mcs, 100, max_harq_msg3_retx); + msg3_slot, msg3_slot, prb_interval{last_msg3, last_msg3 + msg3_nof_prbs}, mcs, 100, max_harq_msg3_retx); srsran_assert(success, "Failed to allocate Msg3"); last_msg3 += msg3_nof_prbs; srsran_dci_ul_nr_t msg3_dci; // Create dummy Msg3 DCI @@ -168,9 +168,9 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr logger.warning("SCHED: Trying to allocate PDSCH for rnti=0x%x with no available HARQs", ue.rnti); return alloc_result::no_rnti_opportunity; } - bwp_slot_grid& bwp_pdcch_slot = bwp_grid[ue.pdcch_tti]; - bwp_slot_grid& bwp_pdsch_slot = bwp_grid[ue.pdsch_tti]; - bwp_slot_grid& bwp_uci_slot = bwp_grid[ue.uci_tti]; + bwp_slot_grid& bwp_pdcch_slot = bwp_grid[ue.pdcch_slot]; + bwp_slot_grid& bwp_pdsch_slot = bwp_grid[ue.pdsch_slot]; + bwp_slot_grid& bwp_uci_slot = bwp_grid[ue.uci_slot]; if (not bwp_pdsch_slot.is_dl) { logger.warning("SCHED: Trying to allocate PDSCH in TDD non-DL slot index=%d", bwp_pdsch_slot.slot_idx); return alloc_result::no_sch_space; @@ -200,10 +200,10 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr srsran_assert(ue.cfg->ue_cfg()->fixed_dl_mcs >= 0, "Dynamic MCS not yet supported"); int mcs = ue.cfg->ue_cfg()->fixed_dl_mcs; int tbs = 100; - bool ret = ue.h_dl->new_tx(ue.pdsch_tti, ue.uci_tti, dl_grant, mcs, tbs, 4); + bool ret = ue.h_dl->new_tx(ue.pdsch_slot, ue.uci_slot, dl_grant, mcs, tbs, 4); srsran_assert(ret, "Failed to allocate DL HARQ"); } else { - bool ret = ue.h_dl->new_retx(ue.pdsch_tti, ue.uci_tti, dl_grant); + bool ret = ue.h_dl->new_retx(ue.pdsch_slot, ue.uci_slot, dl_grant); srsran_assert(ret, "Failed to allocate DL HARQ retx"); } @@ -230,7 +230,7 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr bwp_pdsch_slot.pdschs.emplace_back(); pdsch_t& pdsch = bwp_pdsch_slot.pdschs.back(); srsran_slot_cfg_t slot_cfg; - slot_cfg.idx = ue.pdsch_tti.sf_idx(); + slot_cfg.idx = ue.pdsch_slot.slot_idx(); bool ret = ue.cfg->phy().get_pdsch_cfg(slot_cfg, pdcch.dci, pdsch.sch); srsran_assert(ret, "Error converting DCI to grant"); pdsch.sch.grant.tb[0].softbuffer.tx = ue.h_dl->get_softbuffer().get(); @@ -245,8 +245,8 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbg_bitmap& ul_mask) { - auto& bwp_pdcch_slot = bwp_grid[ue.pdcch_tti]; - auto& bwp_pusch_slot = bwp_grid[ue.pusch_tti]; + auto& bwp_pdcch_slot = bwp_grid[ue.pdcch_slot]; + auto& bwp_pusch_slot = bwp_grid[ue.pusch_slot]; alloc_result ret = verify_pusch_space(bwp_pusch_slot, &bwp_pdcch_slot); if (ret != alloc_result::success) { return ret; @@ -272,10 +272,10 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbg_bitmap& ul_m srsran_assert(ue.cfg->ue_cfg()->fixed_ul_mcs >= 0, "Dynamic MCS not yet supported"); int mcs = ue.cfg->ue_cfg()->fixed_ul_mcs; int tbs = 100; - bool success = ue.h_ul->new_tx(ue.pusch_tti, ue.pusch_tti, ul_mask, mcs, tbs, ue.cfg->ue_cfg()->maxharq_tx); + bool success = ue.h_ul->new_tx(ue.pusch_slot, ue.pusch_slot, ul_mask, mcs, tbs, ue.cfg->ue_cfg()->maxharq_tx); srsran_assert(success, "Failed to allocate UL HARQ"); } else { - srsran_assert(ue.h_ul->new_retx(ue.pusch_tti, ue.pusch_tti, ul_mask), "Failed to allocate UL HARQ retx"); + srsran_assert(ue.h_ul->new_retx(ue.pusch_slot, ue.pusch_slot, ul_mask), "Failed to allocate UL HARQ retx"); } // Allocation Successful @@ -288,7 +288,7 @@ alloc_result bwp_slot_allocator::alloc_pusch(slot_ue& ue, const rbg_bitmap& ul_m bwp_pusch_slot.puschs.emplace_back(); pusch_t& pusch = bwp_pusch_slot.puschs.back(); srsran_slot_cfg_t slot_cfg; - slot_cfg.idx = ue.pusch_tti.sf_idx(); + slot_cfg.idx = ue.pusch_slot.to_uint(); bool success = ue.cfg->phy().get_pusch_cfg(slot_cfg, pdcch.dci, pusch.sch); srsran_assert(success, "Error converting DCI to PUSCH grant"); pusch.sch.grant.tb[0].softbuffer.rx = ue.h_ul->get_softbuffer().get(); diff --git a/srsenb/src/stack/mac/nr/sched_nr_ue.cc b/srsenb/src/stack/mac/nr/sched_nr_ue.cc index 45785eb94..a2d5a5279 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_ue.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_ue.cc @@ -16,7 +16,7 @@ namespace srsenb { namespace sched_nr_impl { -slot_ue::slot_ue(uint16_t rnti_, tti_point tti_rx_, uint32_t cc_) : rnti(rnti_), tti_rx(tti_rx_), cc(cc_) {} +slot_ue::slot_ue(uint16_t rnti_, slot_point slot_rx_, uint32_t cc_) : rnti(rnti_), slot_rx(slot_rx_), cc(cc_) {} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -28,41 +28,42 @@ ue_carrier::ue_carrier(uint16_t rnti_, const ue_cfg_t& uecfg_, const sched_cell_ harq_ent(cell_params_.nof_prb()) {} -void ue_carrier::new_tti(tti_point pdcch_tti, const ue_cfg_t& uecfg_) +void ue_carrier::new_slot(slot_point pdcch_slot, const ue_cfg_t& uecfg_) { if (bwp_cfg.ue_cfg() != &uecfg_) { bwp_cfg = bwp_ue_cfg(rnti, cell_params.bwps[0], uecfg_); } - harq_ent.new_tti(pdcch_tti - TX_ENB_DELAY); + harq_ent.new_slot(pdcch_slot - TX_ENB_DELAY); } -slot_ue ue_carrier::try_reserve(tti_point pdcch_tti) +slot_ue ue_carrier::try_reserve(slot_point pdcch_slot) { - tti_point tti_rx = pdcch_tti - TX_ENB_DELAY; + slot_point slot_rx = pdcch_slot - TX_ENB_DELAY; // copy cc-specific parameters and find available HARQs - slot_ue sfu(rnti, tti_rx, cc); + slot_ue sfu(rnti, slot_rx, cc); sfu.cfg = &bwp_cfg; - sfu.pdcch_tti = pdcch_tti; + sfu.pdcch_slot = pdcch_slot; const uint32_t k0 = 0; - sfu.pdsch_tti = sfu.pdcch_tti + k0; + sfu.pdsch_slot = sfu.pdcch_slot + k0; uint32_t k1 = - sfu.cfg->phy().harq_ack.dl_data_to_ul_ack[sfu.pdsch_tti.sf_idx() % sfu.cfg->phy().harq_ack.nof_dl_data_to_ul_ack]; - sfu.uci_tti = sfu.pdsch_tti + k1; - uint32_t k2 = bwp_cfg.active_bwp().pusch_ra_list[0].K; - sfu.pusch_tti = sfu.pdcch_tti + k2; - sfu.dl_cqi = dl_cqi; - sfu.ul_cqi = ul_cqi; + sfu.cfg->phy() + .harq_ack.dl_data_to_ul_ack[sfu.pdsch_slot.slot_idx() % sfu.cfg->phy().harq_ack.nof_dl_data_to_ul_ack]; + sfu.uci_slot = sfu.pdsch_slot + k1; + uint32_t k2 = bwp_cfg.active_bwp().pusch_ra_list[0].K; + sfu.pusch_slot = sfu.pdcch_slot + k2; + sfu.dl_cqi = dl_cqi; + sfu.ul_cqi = ul_cqi; const srsran_tdd_config_nr_t& tdd_cfg = cell_params.cell_cfg.tdd; - if (srsran_tdd_nr_is_dl(&tdd_cfg, 0, sfu.pdsch_tti.sf_idx())) { + if (srsran_tdd_nr_is_dl(&tdd_cfg, 0, sfu.pdsch_slot.slot_idx())) { // If DL enabled sfu.h_dl = harq_ent.find_pending_dl_retx(); if (sfu.h_dl == nullptr) { sfu.h_dl = harq_ent.find_empty_dl_harq(); } } - if (srsran_tdd_nr_is_ul(&tdd_cfg, 0, sfu.pusch_tti.sf_idx())) { + if (srsran_tdd_nr_is_ul(&tdd_cfg, 0, sfu.pusch_slot.slot_idx())) { // If UL enabled sfu.h_ul = harq_ent.find_pending_ul_retx(); if (sfu.h_ul == nullptr) { @@ -95,12 +96,12 @@ void ue::set_cfg(const ue_cfg_t& cfg) ue_cfg = cfg; } -slot_ue ue::try_reserve(tti_point pdcch_tti, uint32_t cc) +slot_ue ue::try_reserve(slot_point pdcch_slot, uint32_t cc) { if (carriers[cc] == nullptr) { return slot_ue(); } - slot_ue sfu = carriers[cc]->try_reserve(pdcch_tti); + slot_ue sfu = carriers[cc]->try_reserve(pdcch_slot); if (sfu.empty()) { return slot_ue(); } diff --git a/srsenb/src/stack/mac/nr/sched_nr_worker.cc b/srsenb/src/stack/mac/nr/sched_nr_worker.cc index 98bc3bb50..86d64235b 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_worker.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_worker.cc @@ -46,10 +46,10 @@ void slot_cc_worker::run_feedback(ue_map_t& ue_db) } /// Called at the beginning of TTI in a locked context, to reserve available UE resources -void slot_cc_worker::start(tti_point pdcch_tti, ue_map_t& ue_db) +void slot_cc_worker::start(slot_point pdcch_slot, ue_map_t& ue_db) { srsran_assert(not running(), "scheduler worker::start() called for active worker"); - tti_rx = pdcch_tti - TX_ENB_DELAY; + slot_rx = pdcch_slot - TX_ENB_DELAY; // Run pending cell feedback run_feedback(ue_db); @@ -62,15 +62,15 @@ void slot_cc_worker::start(tti_point pdcch_tti, ue_map_t& ue_db) continue; } - u.carriers[cfg.cc]->new_tti(pdcch_tti, u.cfg()); + u.carriers[cfg.cc]->new_slot(pdcch_slot, u.cfg()); - slot_ues.insert(rnti, u.try_reserve(pdcch_tti, cfg.cc)); + slot_ues.insert(rnti, u.try_reserve(pdcch_slot, cfg.cc)); if (slot_ues[rnti].empty()) { // Failed to generate slot UE because UE has no conditions for DL/UL tx slot_ues.erase(rnti); continue; } - // UE acquired successfully for scheduling in this {tti, cc} + // UE acquired successfully for scheduling in this {slot, cc} } } @@ -78,7 +78,7 @@ void slot_cc_worker::run() { srsran_assert(running(), "scheduler worker::run() called for non-active worker"); - bwp_alloc.new_slot(tti_rx + TX_ENB_DELAY); + bwp_alloc.new_slot(slot_rx + TX_ENB_DELAY); // Allocate pending RARs cell.bwps[0].ra.run_slot(bwp_alloc, slot_ues); @@ -92,7 +92,7 @@ void slot_cc_worker::run() // releases UE resources slot_ues.clear(); - tti_rx = {}; + slot_rx = {}; } void slot_cc_worker::finish() @@ -132,7 +132,7 @@ void slot_cc_worker::alloc_ul_ues() void slot_cc_worker::log_result() const { - const bwp_slot_grid& bwp_slot = cell.bwps[0].grid[tti_rx + TX_ENB_DELAY]; + const bwp_slot_grid& bwp_slot = cell.bwps[0].grid[slot_rx + TX_ENB_DELAY]; for (const pdcch_dl_t& pdcch : bwp_slot.dl_pdcchs) { fmt::memory_buffer fmtbuf; if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_c) { @@ -146,8 +146,8 @@ void slot_cc_worker::log_result() const ue.h_dl->nof_retx(), srsran_dci_format_nr_string(pdcch.dci.ctx.format), pdcch.dci.dai, - ue.pdsch_tti, - ue.uci_tti); + ue.pdsch_slot, + ue.uci_slot); } else if (pdcch.dci.ctx.rnti_type == srsran_rnti_type_ra) { fmt::format_to(fmtbuf, "SCHED: DL RAR, cc={}", cell.cfg.cc); } else { @@ -168,7 +168,7 @@ void slot_cc_worker::log_result() const pdcch.dci.pid, ue.h_dl->nof_retx(), srsran_dci_format_nr_string(pdcch.dci.ctx.format), - ue.pusch_tti); + ue.pusch_slot); } else { fmt::format_to(fmtbuf, "SCHED: unknown rnti format"); } @@ -198,18 +198,18 @@ void sched_worker_manager::enqueue_event(uint16_t rnti, srsran::move_callback waiting_cvars; { std::unique_lock lock(slot_mutex); - while (current_tti.is_valid() and current_tti != tti_tx) { + while (current_slot.valid() and current_slot != slot_tx) { // Wait for previous slot to finish cc_worker_list[cc]->waiting = true; cc_worker_list[cc]->cvar.wait(lock); cc_worker_list[cc]->waiting = false; } - if (not current_tti.is_valid()) { + if (not current_slot.valid()) { /* First Worker to start slot */ // process non-cc specific feedback if pending (e.g. SRs, buffer updates, UE config) for UEs with CA @@ -226,7 +226,7 @@ void sched_worker_manager::run_slot(tti_point tti_tx, uint32_t cc, dl_sched_t& d } // mark the start of slot. awake remaining workers if locking on the mutex - current_tti = tti_tx; + current_slot = slot_tx; worker_count.store(static_cast(cc_worker_list.size()), std::memory_order_relaxed); for (auto& w : cc_worker_list) { if (w->waiting) { @@ -251,20 +251,20 @@ void sched_worker_manager::run_slot(tti_point tti_tx, uint32_t cc, dl_sched_t& d } // process pending feedback and pre-cache UE state for slot decision - cc_worker_list[cc]->worker.start(tti_tx, ue_db); + cc_worker_list[cc]->worker.start(slot_tx, ue_db); - // Get {tti, cc} scheduling decision + // Get {slot, cc} scheduling decision cc_worker_list[cc]->worker.run(); // decrement the number of active workers int rem_workers = worker_count.fetch_sub(1, std::memory_order_release) - 1; - srsran_assert(rem_workers >= 0, "invalid number of calls to run_tti(tti, cc)"); + srsran_assert(rem_workers >= 0, "invalid number of calls to run_slot(slot, cc)"); if (rem_workers == 0) { /* Last Worker to finish slot */ // Signal the release of slot if it is the last worker that finished its own generation std::unique_lock lock(slot_mutex); - current_tti = {}; + current_slot = {}; // All the workers of the same slot have finished. Synchronize scheduling decisions with UEs state for (auto& c : cc_worker_list) { @@ -282,13 +282,13 @@ void sched_worker_manager::run_slot(tti_point tti_tx, uint32_t cc, dl_sched_t& d } // Copy results to intermediate buffer - save_sched_result(tti_tx, cc, dl_res, ul_res); + save_sched_result(slot_tx, cc, dl_res, ul_res); } -bool sched_worker_manager::save_sched_result(tti_point pdcch_tti, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res) +bool sched_worker_manager::save_sched_result(slot_point pdcch_slot, uint32_t cc, dl_sched_t& dl_res, ul_sched_t& ul_res) { // NOTE: Unlocked region - auto& bwp_slot = cells[cc]->bwps[0].grid[pdcch_tti]; + auto& bwp_slot = cells[cc]->bwps[0].grid[pdcch_slot]; dl_res.pdcch_dl = bwp_slot.dl_pdcchs; dl_res.pdcch_ul = bwp_slot.ul_pdcchs; @@ -309,7 +309,7 @@ bool sched_worker_manager::save_sched_result(tti_point pdcch_tti, uint32_t cc, d if (phy_cfg != nullptr) { srsran_slot_cfg_t slot_cfg{}; - slot_cfg.idx = pdcch_tti.sf_idx(); + slot_cfg.idx = pdcch_slot.slot_idx(); srsran_uci_cfg_nr_t uci_cfg = {}; srsran_assert(phy_cfg->get_uci_cfg(slot_cfg, ack, uci_cfg), "Error getting UCI CFG"); diff --git a/srsenb/test/mac/nr/sched_nr_sim_ue.cc b/srsenb/test/mac/nr/sched_nr_sim_ue.cc index 6c04b4378..67ab553b6 100644 --- a/srsenb/test/mac/nr/sched_nr_sim_ue.cc +++ b/srsenb/test/mac/nr/sched_nr_sim_ue.cc @@ -18,14 +18,14 @@ namespace srsenb { sched_nr_ue_sim::sched_nr_ue_sim(uint16_t rnti_, const sched_nr_interface::ue_cfg_t& ue_cfg_, - tti_point prach_tti_rx, + slot_point prach_slot_rx, uint32_t preamble_idx) : logger(srslog::fetch_basic_logger("MAC")) { - ctxt.rnti = rnti_; - ctxt.prach_tti_rx = prach_tti_rx; - ctxt.preamble_idx = preamble_idx; - ctxt.ue_cfg = ue_cfg_; + ctxt.rnti = rnti_; + ctxt.prach_slot_rx = prach_slot_rx; + ctxt.preamble_idx = preamble_idx; + ctxt.ue_cfg = ue_cfg_; ctxt.cc_list.resize(ue_cfg_.carriers.size()); for (auto& cc : ctxt.cc_list) { @@ -45,16 +45,16 @@ int sched_nr_ue_sim::update(const sched_nr_cc_output_res_t& cc_out) if (data.dci.ctx.rnti != ctxt.rnti) { continue; } - tti_point pdcch_tti = cc_out.tti; - uint32_t k1 = ctxt.ue_cfg.phy_cfg.harq_ack - .dl_data_to_ul_ack[pdcch_tti.sf_idx() % ctxt.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack]; - tti_point uci_tti = pdcch_tti + k1; + slot_point pdcch_slot = cc_out.slot; + uint32_t k1 = ctxt.ue_cfg.phy_cfg.harq_ack + .dl_data_to_ul_ack[pdcch_slot.slot_idx() % ctxt.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack]; + slot_point uci_slot = pdcch_slot + k1; - ctxt.cc_list[cc_out.cc].pending_acks[uci_tti.to_uint()]++; + ctxt.cc_list[cc_out.cc].pending_acks[uci_slot.to_uint()]++; } // clear up old slots - ctxt.cc_list[cc_out.cc].pending_acks[(cc_out.tti - 1).to_uint()] = 0; + ctxt.cc_list[cc_out.cc].pending_acks[(cc_out.slot - 1).to_uint()] = 0; return SRSRAN_SUCCESS; } @@ -70,21 +70,21 @@ void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out) auto& h = ctxt.cc_list[cc].dl_harqs[data.dci.pid]; if (h.nof_txs == 0 or h.ndi != data.dci.ndi) { // It is newtx - h.nof_retxs = 0; - h.ndi = data.dci.ndi; - h.first_tti_tx = cc_out.tti; - h.dci_loc = data.dci.ctx.location; - h.tbs = 100; // TODO + h.nof_retxs = 0; + h.ndi = data.dci.ndi; + h.first_slot_tx = cc_out.slot; + h.dci_loc = data.dci.ctx.location; + h.tbs = 100; // TODO } else { // it is retx h.nof_retxs++; } - h.active = true; - h.last_tti_tx = cc_out.tti; - h.last_tti_ack = - h.last_tti_tx + + h.active = true; + h.last_slot_tx = cc_out.slot; + h.last_slot_ack = + h.last_slot_tx + ctxt.ue_cfg.phy_cfg.harq_ack - .dl_data_to_ul_ack[h.last_tti_tx.sf_idx() % ctxt.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack]; + .dl_data_to_ul_ack[h.last_slot_tx.slot_idx() % ctxt.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack]; h.nof_txs++; } } @@ -117,26 +117,26 @@ int sched_nr_sim_base::add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_ TESTASSERT(ue_db.count(rnti) == 0); sched_ptr->ue_cfg(rnti, ue_cfg_); - ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_tti_tx, preamble_idx))); + ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_slot_tx, preamble_idx))); return SRSRAN_SUCCESS; } -void sched_nr_sim_base::new_slot(srsran::tti_point tti_tx) +void sched_nr_sim_base::new_slot(slot_point slot_tx) { std::unique_lock lock(mutex); while (cc_finished > 0) { cvar.wait(lock); } - logger.set_context(tti_tx.to_uint()); - mac_logger.set_context(tti_tx.to_uint()); - logger.info("---------------- TTI=%d ---------------", tti_tx.to_uint()); - current_tti_tx = tti_tx; - cc_finished = cell_params.size(); + logger.set_context(slot_tx.to_uint()); + mac_logger.set_context(slot_tx.to_uint()); + logger.info("---------------- TTI=%d ---------------", slot_tx.to_uint()); + current_slot_tx = slot_tx; + cc_finished = cell_params.size(); for (auto& ue : ue_db) { - ue_nr_tti_events events; - set_default_tti_events(ue.second.get_ctxt(), events); - set_external_tti_events(ue.second.get_ctxt(), events); - apply_tti_events(ue.second.get_ctxt(), events); + ue_nr_slot_events events; + set_default_slot_events(ue.second.get_ctxt(), events); + set_external_slot_events(ue.second.get_ctxt(), events); + apply_slot_events(ue.second.get_ctxt(), events); } } @@ -157,11 +157,11 @@ void sched_nr_sim_base::update(sched_nr_cc_output_res_t& cc_out) } } -int sched_nr_sim_base::set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_tti_events& pending_events) +int sched_nr_sim_base::set_default_slot_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_slot_events& pending_events) { pending_events.cc_list.clear(); pending_events.cc_list.resize(cell_params.size()); - pending_events.tti_rx = current_tti_tx; + pending_events.slot_rx = current_slot_tx; for (uint32_t enb_cc_idx = 0; enb_cc_idx < pending_events.cc_list.size(); ++enb_cc_idx) { auto& cc_feedback = pending_events.cc_list[enb_cc_idx]; @@ -172,13 +172,13 @@ int sched_nr_sim_base::set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, u auto& ul_h = ue_ctxt.cc_list[enb_cc_idx].ul_harqs[pid]; // Set default DL ACK - if (dl_h.active and (dl_h.last_tti_ack) == current_tti_tx) { - cc_feedback.dl_acks.push_back(ue_nr_tti_events::ack_t{pid, true}); + if (dl_h.active and (dl_h.last_slot_ack) == current_slot_tx) { + cc_feedback.dl_acks.push_back(ue_nr_slot_events::ack_t{pid, true}); } // Set default UL ACK - if (ul_h.active and (ul_h.last_tti_tx + 8) == current_tti_tx) { - cc_feedback.ul_acks.emplace_back(ue_nr_tti_events::ack_t{pid, true}); + if (ul_h.active and (ul_h.last_slot_tx + 8) == current_slot_tx) { + cc_feedback.ul_acks.emplace_back(ue_nr_slot_events::ack_t{pid, true}); } // TODO: other CSI @@ -188,7 +188,7 @@ int sched_nr_sim_base::set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, u return SRSRAN_SUCCESS; } -int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_tti_events& events) +int sched_nr_sim_base::apply_slot_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_slot_events& events) { for (uint32_t enb_cc_idx = 0; enb_cc_idx < events.cc_list.size(); ++enb_cc_idx) { const auto& cc_feedback = events.cc_list[enb_cc_idx]; @@ -201,7 +201,7 @@ int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_t if (ack.ack) { logger.info( - "DL ACK rnti=0x%x tti_dl_tx=%u cc=%d pid=%d", ue_ctxt.rnti, h.last_tti_tx.to_uint(), enb_cc_idx, ack.pid); + "DL ACK rnti=0x%x slot_dl_tx=%u cc=%d pid=%d", ue_ctxt.rnti, h.last_slot_tx.to_uint(), enb_cc_idx, ack.pid); } // update scheduler @@ -218,11 +218,11 @@ int sched_nr_sim_base::apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_t if (ack.ack) { logger.info( - "UL ACK rnti=0x%x, tti_ul_tx=%u, cc=%d pid=%d", ue_ctxt.rnti, h.last_tti_tx.to_uint(), enb_cc_idx, h.pid); + "UL ACK rnti=0x%x, slot_ul_tx=%u, cc=%d pid=%d", ue_ctxt.rnti, h.last_slot_tx.to_uint(), enb_cc_idx, h.pid); } // // update scheduler - // if (sched_ptr->ul_crc_info(events.tti_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.ul_ack) < 0) { + // if (sched_ptr->ul_crc_info(events.slot_rx.to_uint(), ue_ctxt.rnti, enb_cc_idx, cc_feedback.ul_ack) < 0) { // logger.error("The ACKed UL Harq pid=%d does not exist.", cc_feedback.ul_pid); // error_counter++; // } diff --git a/srsenb/test/mac/nr/sched_nr_sim_ue.h b/srsenb/test/mac/nr/sched_nr_sim_ue.h index 6c78a9188..4bb54c437 100644 --- a/srsenb/test/mac/nr/sched_nr_sim_ue.h +++ b/srsenb/test/mac/nr/sched_nr_sim_ue.h @@ -31,10 +31,10 @@ struct ue_nr_harq_ctxt_t { uint32_t riv = 0; srsran_dci_location_t dci_loc = {}; uint32_t tbs = 0; - tti_point last_tti_tx, first_tti_tx, last_tti_ack; + slot_point last_slot_tx, first_slot_tx, last_slot_ack; }; struct sched_nr_cc_output_res_t { - tti_point tti; + slot_point slot; uint32_t cc; const sched_nr_interface::dl_sched_t* dl_cc_result; const sched_nr_interface::ul_sched_t* ul_cc_result; @@ -46,7 +46,7 @@ struct ue_nr_cc_ctxt_t { srsran::circular_array pending_acks; }; -struct ue_nr_tti_events { +struct ue_nr_slot_events { struct ack_t { uint32_t pid; bool ack; @@ -56,14 +56,14 @@ struct ue_nr_tti_events { srsran::bounded_vector dl_acks; srsran::bounded_vector ul_acks; }; - srsran::tti_point tti_rx; + slot_point slot_rx; std::vector cc_list; }; struct sim_nr_ue_ctxt_t { uint16_t rnti; uint32_t preamble_idx; - srsran::tti_point prach_tti_rx; + slot_point prach_slot_rx; sched_nr_interface::ue_cfg_t ue_cfg; std::vector cc_list; @@ -83,7 +83,7 @@ class sched_nr_ue_sim public: sched_nr_ue_sim(uint16_t rnti_, const sched_nr_interface::ue_cfg_t& ue_cfg_, - tti_point prach_tti_rx, + slot_point prach_slot_rx, uint32_t preamble_idx); int update(const sched_nr_cc_output_res_t& cc_out); @@ -108,7 +108,7 @@ public: int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, uint32_t preamble_idx); - void new_slot(srsran::tti_point tti_tx); + void new_slot(slot_point slot_tx); void update(sched_nr_cc_output_res_t& cc_out); sched_nr_ue_sim& at(uint16_t rnti) { return ue_db.at(rnti); } @@ -131,10 +131,10 @@ public: } sched_nr* get_sched() { return sched_ptr.get(); } srsran::const_span get_cell_params() { return cell_params; } - tti_point get_tti_rx() const + slot_point get_slot_rx() const { std::lock_guard lock(mutex); - return current_tti_tx; + return current_slot_tx; } sim_nr_enb_ctxt_t get_enb_ctxt() const; @@ -143,11 +143,11 @@ public: std::map::iterator end() { return ue_db.end(); } // configurable by simulator concrete implementation - virtual void set_external_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_tti_events& pending_events) {} + virtual void set_external_slot_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_slot_events& pending_events) {} private: - int set_default_tti_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_tti_events& pending_events); - int apply_tti_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_tti_events& events); + int set_default_slot_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_slot_events& pending_events); + int apply_slot_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_slot_events& events); std::string test_name; srslog::basic_logger& logger; @@ -155,8 +155,8 @@ private: std::unique_ptr sched_ptr; std::vector cell_params; - srsran::tti_point current_tti_tx; - int cc_finished = 0; + slot_point current_slot_tx; + int cc_finished = 0; std::map ue_db; diff --git a/srsenb/test/mac/nr/sched_nr_test.cc b/srsenb/test/mac/nr/sched_nr_test.cc index 2704ad637..885044b1b 100644 --- a/srsenb/test/mac/nr/sched_nr_test.cc +++ b/srsenb/test/mac/nr/sched_nr_test.cc @@ -38,22 +38,22 @@ struct task_job_manager { explicit task_job_manager(int max_concurrent_slots = 4) : slot_counter(max_concurrent_slots) {} - void start_slot(tti_point tti, int nof_sectors) + void start_slot(slot_point slot, int nof_sectors) { std::unique_lock lock(mutex); - auto& sl = slot_counter[tti.to_uint() % slot_counter.size()]; + auto& sl = slot_counter[slot.to_uint() % slot_counter.size()]; while (sl.count > 0) { sl.cvar.wait(lock); } sl.count = nof_sectors; } - void finish_cc(tti_point tti, const dl_sched_t& dl_res, const sched_nr_interface::ul_sched_t& ul_res) + void finish_cc(slot_point slot, const dl_sched_t& dl_res, const sched_nr_interface::ul_sched_t& ul_res) { std::unique_lock lock(mutex); TESTASSERT(dl_res.pdcch_dl.size() <= 1); res_count++; pdsch_count += dl_res.pdcch_dl.size(); - auto& sl = slot_counter[tti.to_uint() % slot_counter.size()]; + auto& sl = slot_counter[slot.to_uint() % slot_counter.size()]; if (--sl.count == 0) { sl.cvar.notify_one(); } @@ -91,23 +91,23 @@ void sched_nr_cfg_serialized_test() sched_tester.add_user(0x46, uecfg, 0); std::vector count_per_cc(nof_sectors, 0); - for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { - tti_point tti_rx(nof_ttis % 10240); - tti_point tti_tx = tti_rx + TX_ENB_DELAY; - tasks.start_slot(tti_rx, nof_sectors); - sched_tester.new_slot(tti_tx); + for (uint32_t nof_slots = 0; nof_slots < max_nof_ttis; ++nof_slots) { + slot_point slot_rx(0, nof_slots % 10240); + slot_point slot_tx = slot_rx + TX_ENB_DELAY; + tasks.start_slot(slot_rx, nof_sectors); + sched_tester.new_slot(slot_tx); for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { sched_nr_interface::dl_sched_t dl_res; sched_nr_interface::ul_sched_t ul_res; auto tp1 = std::chrono::steady_clock::now(); - TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS); - TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS); + TESTASSERT(sched_tester.get_sched()->get_dl_sched(slot_tx, cc, dl_res) == SRSRAN_SUCCESS); + TESTASSERT(sched_tester.get_sched()->get_ul_sched(slot_tx, cc, ul_res) == SRSRAN_SUCCESS); auto tp2 = std::chrono::steady_clock::now(); count_per_cc[cc] += std::chrono::duration_cast(tp2 - tp1).count(); - sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res}; + sched_nr_cc_output_res_t out{slot_tx, cc, &dl_res, &ul_res}; sched_tester.update(out); - tasks.finish_cc(tti_rx, dl_res, ul_res); - TESTASSERT(not srsran_tdd_nr_is_dl(&cells_cfg[cc].tdd, 0, (tti_tx).sf_idx()) or dl_res.pdcch_dl.size() == 1); + tasks.finish_cc(slot_rx, dl_res, ul_res); + TESTASSERT(not srsran_tdd_nr_is_dl(&cells_cfg[cc].tdd, 0, (slot_tx).slot_idx()) or dl_res.pdcch_dl.size() == 1); } } @@ -139,24 +139,24 @@ void sched_nr_cfg_parallel_cc_test() sched_tester.add_user(0x46, uecfg, 0); std::array, SRSRAN_MAX_CARRIERS> nano_count{}; - for (uint32_t nof_ttis = 0; nof_ttis < max_nof_ttis; ++nof_ttis) { - tti_point tti_rx(nof_ttis % 10240); - tti_point tti_tx = tti_rx + TX_ENB_DELAY; - tasks.start_slot(tti_tx, nof_sectors); - sched_tester.new_slot(tti_tx); + for (uint32_t nof_slots = 0; nof_slots < max_nof_ttis; ++nof_slots) { + slot_point slot_rx(0, nof_slots % 10240); + slot_point slot_tx = slot_rx + TX_ENB_DELAY; + tasks.start_slot(slot_tx, nof_sectors); + sched_tester.new_slot(slot_tx); for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) { - srsran::get_background_workers().push_task([cc, tti_tx, &tasks, &sched_tester, &nano_count]() { + srsran::get_background_workers().push_task([cc, slot_tx, &tasks, &sched_tester, &nano_count]() { sched_nr_interface::dl_sched_t dl_res; sched_nr_interface::ul_sched_t ul_res; auto tp1 = std::chrono::steady_clock::now(); - TESTASSERT(sched_tester.get_sched()->get_dl_sched(tti_tx, cc, dl_res) == SRSRAN_SUCCESS); - TESTASSERT(sched_tester.get_sched()->get_ul_sched(tti_tx, cc, ul_res) == SRSRAN_SUCCESS); + TESTASSERT(sched_tester.get_sched()->get_dl_sched(slot_tx, cc, dl_res) == SRSRAN_SUCCESS); + TESTASSERT(sched_tester.get_sched()->get_ul_sched(slot_tx, cc, ul_res) == SRSRAN_SUCCESS); auto tp2 = std::chrono::steady_clock::now(); nano_count[cc].fetch_add(std::chrono::duration_cast(tp2 - tp1).count(), std::memory_order_relaxed); - sched_nr_cc_output_res_t out{tti_tx, cc, &dl_res, &ul_res}; + sched_nr_cc_output_res_t out{slot_tx, cc, &dl_res, &ul_res}; sched_tester.update(out); - tasks.finish_cc(tti_tx, dl_res, ul_res); + tasks.finish_cc(slot_tx, dl_res, ul_res); }); } } diff --git a/srsenb/test/mac/nr/sched_nr_ue_ded_test_suite.cc b/srsenb/test/mac/nr/sched_nr_ue_ded_test_suite.cc index 13eba2fb7..d5f115a56 100644 --- a/srsenb/test/mac/nr/sched_nr_ue_ded_test_suite.cc +++ b/srsenb/test/mac/nr/sched_nr_ue_ded_test_suite.cc @@ -20,9 +20,9 @@ using namespace srsenb::sched_nr_impl; void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_output_res_t& cc_out) { - tti_point pdcch_tti = cc_out.tti; - const pdcch_dl_list_t& pdcchs = cc_out.dl_cc_result->pdcch_dl; - const pdsch_list_t& pdschs = cc_out.dl_cc_result->pdsch; + slot_point pdcch_slot = cc_out.slot; + const pdcch_dl_list_t& pdcchs = cc_out.dl_cc_result->pdcch_dl; + const pdsch_list_t& pdschs = cc_out.dl_cc_result->pdsch; // Iterate over UE PDCCH allocations for (const pdcch_dl_t& pdcch : pdcchs) { @@ -31,7 +31,7 @@ void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_o } const sim_nr_ue_ctxt_t& ue = *enb_ctxt.ue_db.at(pdcch.dci.ctx.rnti); uint32_t k1 = ue.ue_cfg.phy_cfg.harq_ack - .dl_data_to_ul_ack[pdcch_tti.sf_idx() % ue.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack]; + .dl_data_to_ul_ack[pdcch_slot.slot_idx() % ue.ue_cfg.phy_cfg.harq_ack.nof_dl_data_to_ul_ack]; // CHECK: Carrier activation TESTASSERT(ue.ue_cfg.carriers[cc_out.cc].active); @@ -46,9 +46,9 @@ void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_o if (pdcch.dci.ctx.format == srsran_dci_format_nr_1_0) { TESTASSERT(pdcch.dci.harq_feedback == k1 - 1); } else { - TESTASSERT(pdcch.dci.harq_feedback == pdcch_tti.sf_idx()); + TESTASSERT(pdcch.dci.harq_feedback == pdcch_slot.slot_idx()); } - TESTASSERT(ue.cc_list[cc_out.cc].pending_acks[(pdcch_tti + k1).to_uint()] % 4 == pdcch.dci.dai); + TESTASSERT(ue.cc_list[cc_out.cc].pending_acks[(pdcch_slot + k1).to_uint()] % 4 == pdcch.dci.dai); } for (const pdsch_t& pdsch : pdschs) { diff --git a/test/phy/dummy_gnb_stack.h b/test/phy/dummy_gnb_stack.h index 9ddbaee58..7ed5d96cf 100644 --- a/test/phy/dummy_gnb_stack.h +++ b/test/phy/dummy_gnb_stack.h @@ -28,6 +28,8 @@ class gnb_dummy_stack : public srsenb::stack_interface_phy_nr { + const static uint32_t NUMEROLOGY_IDX = 0; + public: struct prach_metrics_t { uint32_t count; @@ -72,7 +74,7 @@ private: bool valid = false; srsenb::sched_nr sched; - srsran::tti_point pdsch_tti, pusch_tti; + srsran::slot_point pdsch_slot, pusch_slot; srslog::basic_logger& sched_logger; std::mutex metrics_mutex; @@ -406,14 +408,14 @@ public: { logger.set_context(slot_cfg.idx); sched_logger.set_context(slot_cfg.idx); - if (not pdsch_tti.is_valid()) { - pdsch_tti = srsran::tti_point{slot_cfg.idx}; + if (not pdsch_slot.valid()) { + pdsch_slot = srsran::slot_point{NUMEROLOGY_IDX, slot_cfg.idx}; } else { - pdsch_tti++; + pdsch_slot++; } if (not use_dummy_sched) { - int ret = sched.get_dl_sched(pdsch_tti, 0, dl_sched); + int ret = sched.get_dl_sched(pdsch_slot, 0, dl_sched); for (pdsch_t& pdsch : dl_sched.pdsch) { // Set TBS @@ -452,14 +454,14 @@ public: { logger.set_context(slot_cfg.idx); sched_logger.set_context(slot_cfg.idx); - if (not pusch_tti.is_valid()) { - pusch_tti = srsran::tti_point{slot_cfg.idx}; + if (not pusch_slot.valid()) { + pusch_slot = srsran::slot_point{NUMEROLOGY_IDX, slot_cfg.idx}; } else { - pusch_tti++; + pusch_slot++; } if (not use_dummy_sched) { - int ret = sched.get_ul_sched(pusch_tti, 0, ul_sched); + int ret = sched.get_ul_sched(pusch_slot, 0, ul_sched); for (pusch_t& pusch : ul_sched.pusch) { pusch.data[0] = rx_harq_proc[pusch.pid].get_tb(pusch.sch.grant.tb[0].tbs).data();