diff --git a/lib/include/srslte/adt/interval.h b/lib/include/srslte/adt/interval.h index 2cb4d392d..20c9cfaa1 100644 --- a/lib/include/srslte/adt/interval.h +++ b/lib/include/srslte/adt/interval.h @@ -14,6 +14,7 @@ #define SRSLTE_INTERVAL_H #include "adt_utils.h" +#include "srslte/srslog/bundled/fmt/format.h" #include #include #include @@ -76,11 +77,7 @@ public: bool contains(T point) const { return start_ <= point and point < stop_; } - std::string to_string() const - { - std::string s = "[" + std::to_string(start_) + "," + std::to_string(stop_) + ")"; - return s; - } + std::string to_string() const { return fmt::format("[{},{})", start_, stop_); } private: T start_; diff --git a/lib/include/srslte/common/tti_point.h b/lib/include/srslte/common/tti_point.h index f1675a4c3..59fa3df29 100644 --- a/lib/include/srslte/common/tti_point.h +++ b/lib/include/srslte/common/tti_point.h @@ -113,8 +113,20 @@ using tti_interval = srslte::interval; } // namespace srslte +template <> +struct fmt::formatter : public fmt::formatter { + // parse is inherited from formatter. + template + auto format(srslte::tti_point tti, FormatContext& ctx) -> decltype(format_to(ctx.out(), "")) + { + return format_to(ctx.out(), "{}", tti.to_uint()); + } +}; + namespace srsenb { +using tti_point = srslte::tti_point; + inline srslte::tti_point to_tx_dl(srslte::tti_point t) { return t + TX_ENB_DELAY; diff --git a/srsenb/hdr/stack/enb_stack_lte.h b/srsenb/hdr/stack/enb_stack_lte.h index 2d41a356b..719b835b7 100644 --- a/srsenb/hdr/stack/enb_stack_lte.h +++ b/srsenb/hdr/stack/enb_stack_lte.h @@ -68,9 +68,9 @@ public: { return mac.cqi_info(tti, rnti, cc_idx, cqi_value); } - int snr_info(uint32_t tti, uint16_t rnti, uint32_t cc_idx, float snr_db) final + int snr_info(uint32_t tti_rx, uint16_t rnti, uint32_t cc_idx, float snr_db) final { - return mac.snr_info(tti, rnti, cc_idx, snr_db); + return mac.snr_info(tti_rx, rnti, cc_idx, snr_db); } int ta_info(uint32_t tti, uint16_t rnti, float ta_us) override { return mac.ta_info(tti, rnti, ta_us); } int ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) final diff --git a/srsenb/hdr/stack/mac/sched_carrier.h b/srsenb/hdr/stack/mac/sched_carrier.h index 928e6416c..8f36872d7 100644 --- a/srsenb/hdr/stack/mac/sched_carrier.h +++ b/srsenb/hdr/stack/mac/sched_carrier.h @@ -38,7 +38,7 @@ public: // getters const ra_sched* get_ra_sched() const { return ra_sched_ptr.get(); } //! Get a subframe result for a given tti - const sf_sched_result* get_sf_result(uint32_t tti_rx) const; + const sf_sched_result* get_sf_result(tti_point tti_rx) const; private: //! Compute DL scheduler result for given TTI @@ -78,9 +78,9 @@ public: private: struct sched_sib_t { - bool is_in_window = false; - uint32_t window_start = 0; - uint32_t n_tx = 0; + bool is_in_window = false; + tti_point window_start{}; + uint32_t n_tx = 0; }; void update_si_windows(sf_sched* tti_sched); @@ -94,8 +94,8 @@ private: std::array pending_sibs; // TTI specific - uint32_t current_tti = 0; - uint32_t bc_aggr_level = 2; + tti_point current_tti{}; + uint32_t bc_aggr_level = 2; }; //! RAR/Msg3 scheduler diff --git a/srsenb/hdr/stack/mac/sched_common.h b/srsenb/hdr/stack/mac/sched_common.h index 5098a674a..82e2537e2 100644 --- a/srsenb/hdr/stack/mac/sched_common.h +++ b/srsenb/hdr/stack/mac/sched_common.h @@ -35,17 +35,6 @@ struct sched_dci_cce_t { uint32_t nof_loc[4]; ///< Number of possible CCE locations for each aggregation level index }; -//! Params relative to a single TTI -struct tti_params_t { - uint32_t tti_rx; - uint32_t tti_tx_dl; - uint32_t tti_tx_ul; - uint32_t sf_idx_tx_dl; - uint32_t sfn_tx_dl; - explicit tti_params_t(uint32_t tti_rx_); - uint32_t tti_rx_ack_dl() const { return tti_tx_ul; } -}; - //! structs to bundle together all the sched arguments, and share them with all the sched sub-components class sched_cell_params_t { diff --git a/srsenb/hdr/stack/mac/sched_grid.h b/srsenb/hdr/stack/mac/sched_grid.h index c5b9a34dc..a627778fd 100644 --- a/srsenb/hdr/stack/mac/sched_grid.h +++ b/srsenb/hdr/stack/mac/sched_grid.h @@ -38,14 +38,14 @@ struct alloc_outcome_t { //! Result of a Subframe sched computation struct cc_sched_result { - tti_params_t tti_params{10241}; + tti_point tti_rx; sched_interface::dl_sched_res_t dl_sched_result = {}; sched_interface::ul_sched_res_t ul_sched_result = {}; rbgmask_t dl_mask = {}; ///< Accumulation of all DL RBG allocations prbmask_t ul_mask = {}; ///< Accumulation of all UL PRB allocations pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations - bool is_generated(srslte::tti_point tti_rx) const { return srslte::tti_point{tti_params.tti_rx} == tti_rx; } + bool is_generated(tti_point tti_rx_) const { return tti_rx == tti_rx_; } }; struct sf_sched_result { @@ -91,7 +91,7 @@ public: using alloc_result_t = std::vector; void init(const sched_cell_params_t& cell_params_); - void new_tti(const tti_params_t& tti_params_); + void new_tti(tti_point tti_rx_); bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr); bool set_cfi(uint32_t cfi); @@ -134,14 +134,14 @@ private: int node_idx, const alloc_record_t& dci_record, const sched_dci_cce_t& dci_locs, - uint32_t tti_tx_dl); + tti_point tti_tx_dl); // consts const sched_cell_params_t* cc_cfg = nullptr; srslte::log_ref log_h; // tti vars - const tti_params_t* tti_params = nullptr; + tti_point tti_rx; uint32_t current_cfix = 0; std::vector alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index std::vector dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far @@ -157,7 +157,7 @@ public: }; void init(const sched_cell_params_t& cell_params_); - void new_tti(const tti_params_t& tti_params_); + void new_tti(tti_point tti_rx); dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type); alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask); bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg); @@ -180,12 +180,11 @@ private: uint32_t nof_rbgs = 0; uint32_t si_n_rbg = 0, rar_n_rbg = 0; - // tti const - const tti_params_t* tti_params = nullptr; // derived pdcch_grid_t pdcch_alloc = {}; // internal state + tti_point tti_rx; uint32_t avail_rbg = 0; rbgmask_t dl_mask = {}; prbmask_t ul_mask = {}; @@ -241,8 +240,8 @@ public: uint32_t mcs = 0; }; struct pending_rar_t { - uint16_t ra_rnti = 0; - uint32_t prach_tti = 0; + uint16_t ra_rnti = 0; + tti_point prach_tti{}; uint32_t nof_grants = 0; sched_interface::dl_sched_rar_info_t msg3_grant[sched_interface::MAX_RAR_LIST] = {}; }; @@ -270,19 +269,18 @@ public: void generate_sched_results(sched_ue_list& ue_db); alloc_outcome_t alloc_dl_user(sched_ue* user, const rbgmask_t& user_mask, uint32_t pid); - uint32_t get_tti_tx_dl() const { return tti_params.tti_tx_dl; } + tti_point get_tti_tx_dl() const { return to_tx_dl(tti_rx); } uint32_t get_nof_ctrl_symbols() const; const rbgmask_t& get_dl_mask() const { return tti_alloc.get_dl_mask(); } alloc_outcome_t alloc_ul_user(sched_ue* user, prb_interval alloc); const prbmask_t& get_ul_mask() const { return tti_alloc.get_ul_mask(); } - uint32_t get_tti_tx_ul() const { return tti_params.tti_tx_ul; } + tti_point get_tti_tx_ul() const { return to_tx_ul(tti_rx); } // getters - uint32_t get_tti_rx() const { return tti_params.tti_rx; } - const tti_params_t& get_tti_params() const { return tti_params; } - bool is_dl_alloc(uint16_t rnti) const; - bool is_ul_alloc(uint16_t rnti) const; - uint32_t get_enb_cc_idx() const { return cc_cfg->enb_cc_idx; } + tti_point get_tti_rx() const { return tti_rx; } + bool is_dl_alloc(uint16_t rnti) const; + bool is_ul_alloc(uint16_t rnti) const; + uint32_t get_enb_cc_idx() const { return cc_cfg->enb_cc_idx; } private: ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti); @@ -311,7 +309,7 @@ private: uint32_t last_msg3_prb = 0, max_msg3_prb = 0; // Next TTI state - tti_params_t tti_params{10241}; + tti_point tti_rx; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/sched_harq.h b/srsenb/hdr/stack/mac/sched_harq.h index ce9782118..6a4337320 100644 --- a/srsenb/hdr/stack/mac/sched_harq.h +++ b/srsenb/hdr/stack/mac/sched_harq.h @@ -62,17 +62,17 @@ class dl_harq_proc : public harq_proc { public: dl_harq_proc(); - void new_tx(const rbgmask_t& new_mask, - uint32_t tb_idx, - uint32_t tti, - int mcs, - int tbs, - uint32_t n_cce_, - uint32_t max_retx); - void new_retx(const rbgmask_t& new_mask, uint32_t tb_idx, uint32_t tti_, int* mcs, int* tbs, uint32_t n_cce_); - int set_ack(uint32_t tb_idx, bool ack); + void new_tx(const rbgmask_t& new_mask, + uint32_t tb_idx, + tti_point tti_tx_dl, + int mcs, + int tbs, + uint32_t n_cce_, + uint32_t max_retx); + void new_retx(const rbgmask_t& new_mask, uint32_t tb_idx, tti_point tti_tx_dl, int* mcs, int* tbs, uint32_t n_cce_); + int set_ack(uint32_t tb_idx, bool ack); rbgmask_t get_rbgmask() const; - bool has_pending_retx(uint32_t tb_idx, uint32_t tti) const; + bool has_pending_retx(uint32_t tb_idx, tti_point tti_tx_dl) const; int get_tbs(uint32_t tb_idx) const; uint32_t get_n_cce() const; void reset_pending_data(); @@ -123,13 +123,13 @@ public: * @param tti_tx_dl assumed to always be equal or ahead in time in comparison to current harqs * @return pointer to found dl_harq */ - dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl); + dl_harq_proc* get_pending_dl_harq(tti_point tti_tx_dl); /** * Get empty DL Harq * @param tti_tx_dl only used in case of sync dl sched * @return pointer to found dl_harq */ - dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl); + dl_harq_proc* get_empty_dl_harq(tti_point tti_tx_dl); /** * Set ACK state for DL Harq Proc @@ -138,10 +138,10 @@ public: * @param ack true for ACK and false for NACK * @return pair with pid and size of TB of the DL harq that was ACKed */ - std::pair set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack); + std::pair set_ack_info(tti_point tti_rx, uint32_t tb_idx, bool ack); //! Get UL Harq for a given tti_tx_ul - ul_harq_proc* get_ul_harq(uint32_t tti_tx_ul); + ul_harq_proc* get_ul_harq(tti_point tti_tx_ul); /** * Set ACK state for UL Harq Proc @@ -152,7 +152,7 @@ public: void reset_pending_data(srslte::tti_point tti_rx); private: - dl_harq_proc* get_oldest_dl_harq(uint32_t tti_tx_dl); + dl_harq_proc* get_oldest_dl_harq(tti_point tti_tx_dl); srslte::log_ref log_h; diff --git a/srsenb/hdr/stack/mac/sched_ue.h b/srsenb/hdr/stack/mac/sched_ue.h index 6b4aeac8a..fed1f6b93 100644 --- a/srsenb/hdr/stack/mac/sched_ue.h +++ b/srsenb/hdr/stack/mac/sched_ue.h @@ -48,21 +48,21 @@ struct cc_sched_ue { uint32_t get_required_prb_ul(uint32_t req_bytes); const sched_cell_params_t* get_cell_cfg() const { return cell_params; } uint32_t get_ue_cc_idx() const { return ue_cc_idx; } - void set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi); + void set_dl_cqi(tti_point tti_rx, uint32_t dl_cqi); int cqi_to_tbs(uint32_t nof_prb, uint32_t nof_re, bool is_ul, uint32_t* mcs); cc_st cc_state() const { return cc_state_; } harq_entity harq_ent; - uint32_t dl_ri = 0; - uint32_t dl_ri_tti = 0; - uint32_t dl_pmi = 0; - uint32_t dl_pmi_tti = 0; - uint32_t dl_cqi = 1; - uint32_t dl_cqi_tti = 0; - uint32_t ul_cqi = 1; - uint32_t ul_cqi_tti = 0; - bool dl_cqi_rx = false; + uint32_t dl_ri = 0; + tti_point dl_ri_tti_rx{}; + uint32_t dl_pmi = 0; + tti_point dl_pmi_tti_rx{}; + uint32_t dl_cqi = 1; + tti_point dl_cqi_tti_rx{0}; + uint32_t ul_cqi = 1; + tti_point ul_cqi_tti_rx{}; + bool dl_cqi_rx = false; uint32_t max_mcs_dl = 28, max_mcs_ul = 28; uint32_t max_aggr_level = 3; @@ -92,21 +92,25 @@ const char* to_string(sched_interface::ue_bearer_cfg_t::direction_t dir); */ class sched_ue { + using ue_cfg_t = sched_interface::ue_cfg_t; + using bearer_cfg_t = sched_interface::ue_bearer_cfg_t; + public: sched_ue(); void reset(); void init(uint16_t rnti, const std::vector& cell_list_params_); - void new_tti(srslte::tti_point new_tti); + void new_subframe(tti_point tti_rx, uint32_t enb_cc_idx); /************************************************************* * * FAPI-like Interface * ************************************************************/ - void phy_config_enabled(uint32_t tti, bool enabled); - void set_cfg(const sched_interface::ue_cfg_t& cfg); - void set_bearer_cfg(uint32_t lc_id, srsenb::sched_interface::ue_bearer_cfg_t* cfg); + void phy_config_enabled(tti_point tti_rx, bool enabled); + void set_cfg(const ue_cfg_t& cfg); + + void set_bearer_cfg(uint32_t lc_id, const bearer_cfg_t& cfg); void rem_bearer(uint32_t lc_id); void dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue); @@ -114,12 +118,12 @@ public: void ul_phr(int phr); void mac_buffer_state(uint32_t ce_code, uint32_t nof_cmds); - void set_ul_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code); - void set_dl_ri(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri); - void set_dl_pmi(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri); - void set_dl_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi); - int set_ack_info(uint32_t tti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack); - void set_ul_crc(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool crc_res); + void set_ul_cqi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code); + void set_dl_ri(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t ri); + void set_dl_pmi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t ri); + void set_dl_cqi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t cqi); + int set_ack_info(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack); + void set_ul_crc(tti_point tti_rx, uint32_t enb_cc_idx, bool crc_res); /******************************************************* * Custom functions @@ -128,12 +132,12 @@ public: void tpc_inc(); void tpc_dec(); - const dl_harq_proc& get_dl_harq(uint32_t idx, uint32_t cc_idx) const; - uint16_t get_rnti() const { return rnti; } - std::pair get_active_cell_index(uint32_t enb_cc_idx) const; - const sched_interface::ue_cfg_t& get_ue_cfg() const { return cfg; } - uint32_t get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits); - void ul_buffer_add(uint8_t lcid, uint32_t bytes); + const dl_harq_proc& get_dl_harq(uint32_t idx, uint32_t cc_idx) const; + uint16_t get_rnti() const { return rnti; } + std::pair get_active_cell_index(uint32_t enb_cc_idx) const; + const ue_cfg_t& get_ue_cfg() const { return cfg; } + uint32_t get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits); + void ul_buffer_add(uint8_t lcid, uint32_t bytes); /******************************************************* * Functions used by scheduler metric objects @@ -146,21 +150,21 @@ public: uint32_t get_pending_dl_rlc_data() const; uint32_t get_expected_dl_bitrate(uint32_t ue_cc_idx) const; - uint32_t get_pending_ul_data_total(uint32_t tti, int this_ue_cc_idx); - uint32_t get_pending_ul_new_data(uint32_t tti, int this_ue_cc_idx); + uint32_t get_pending_ul_data_total(tti_point tti_tx_ul, int this_ue_cc_idx); + uint32_t get_pending_ul_new_data(tti_point tti_tx_ul, int this_ue_cc_idx); uint32_t get_pending_ul_old_data(); uint32_t get_pending_ul_old_data(uint32_t cc_idx); uint32_t get_expected_ul_bitrate(uint32_t ue_cc_idx) const; - dl_harq_proc* get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx); - dl_harq_proc* get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t cc_idx); - ul_harq_proc* get_ul_harq(uint32_t tti, uint32_t ue_cc_idx); + dl_harq_proc* get_pending_dl_harq(tti_point tti_tx_dl, uint32_t cc_idx); + dl_harq_proc* get_empty_dl_harq(tti_point tti_tx_dl, uint32_t cc_idx); + ul_harq_proc* get_ul_harq(tti_point tti_tx_ul, uint32_t ue_cc_idx); /******************************************************* * Functions used by the scheduler carrier object *******************************************************/ - void finish_tti(const tti_params_t& tti_params, uint32_t enb_cc_idx); + void finish_tti(tti_point tti_rx, uint32_t enb_cc_idx); /******************************************************* * Functions used by the scheduler object @@ -171,12 +175,12 @@ public: int generate_dl_dci_format(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti, + tti_point tti_tx_dl, uint32_t ue_cc_idx, uint32_t cfi, const rbgmask_t& user_mask); int generate_format0(sched_interface::ul_sched_data_t* data, - uint32_t tti, + tti_point tti_tx_ul, uint32_t cc_idx, prb_interval alloc, bool needs_pdcch, @@ -192,12 +196,11 @@ public: std::bitset scell_activation_mask() const; int enb_to_ue_cc_idx(uint32_t enb_cc_idx) const; - bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false); uint32_t get_max_retx(); - bool pucch_sr_collision(uint32_t tti, uint32_t n_cce); - bool pdsch_enabled(srslte::tti_point tti_rx, uint32_t enb_cc_idx) const; - bool pusch_enabled(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const; + bool pucch_sr_collision(tti_point tti_tx_dl, uint32_t n_cce); + bool pdsch_enabled(tti_point tti_rx, uint32_t enb_cc_idx) const; + bool pusch_enabled(tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const; private: void check_ue_cfg_correctness() const; @@ -206,40 +209,40 @@ private: std::pair allocate_new_dl_mac_pdu(sched_interface::dl_sched_data_t* data, dl_harq_proc* h, const rbgmask_t& user_mask, - uint32_t tti_tx_dl, + tti_point tti_tx_dl, uint32_t ue_cc_idx, uint32_t cfi, uint32_t tb); std::pair compute_mcs_and_tbs(uint32_t ue_cc_idx, - uint32_t tti_tx_dl, + tti_point tti_tx_dl, uint32_t nof_alloc_prbs, uint32_t cfi, const srslte_dci_dl_t& dci); - bool needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_send = false); + bool needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send = false); int generate_format1(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti, - uint32_t cc_idx, + tti_point tti_tx_dl, + uint32_t ue_cc_idx, uint32_t cfi, const rbgmask_t& user_mask); int generate_format2a(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti, + tti_point tti_tx_dl, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask); int generate_format2(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti, + tti_point tti_tx_dl, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask); /* Args */ - sched_interface::ue_cfg_t cfg = {}; + ue_cfg_t cfg = {}; srslte_cell_t cell = {}; srslte::log_ref log_h; const std::vector* cell_params_list = nullptr; @@ -260,8 +263,9 @@ private: bool phy_config_dedicated_enabled = false; - srslte::tti_point current_tti; + tti_point current_tti; std::vector carriers; ///< map of UE CellIndex to carrier configuration + std::vector enb_ue_cc_idx_map; }; using sched_ue_list = std::map; diff --git a/srsenb/src/stack/mac/mac.cc b/srsenb/src/stack/mac/mac.cc index 302bb189f..8b797d6c6 100644 --- a/srsenb/src/stack/mac/mac.cc +++ b/srsenb/src/stack/mac/mac.cc @@ -284,16 +284,16 @@ void mac::get_metrics(std::vector& metrics) * *******************************************************/ -int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) +int mac::ack_info(uint32_t tti_rx, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) { - log_h->step(tti); + log_h->step(tti_rx); srslte::rwlock_read_guard lock(rwlock); if (not check_ue_exists(rnti)) { return SRSLTE_ERROR; } - uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, enb_cc_idx, tb_idx, ack); + uint32_t nof_bytes = scheduler.dl_ack_info(tti_rx, rnti, enb_cc_idx, tb_idx, ack); ue_db[rnti]->metrics_tx(ack, nof_bytes); if (ack) { @@ -382,9 +382,9 @@ int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi return SRSLTE_SUCCESS; } -int mac::snr_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, float snr) +int mac::snr_info(uint32_t tti_rx, uint16_t rnti, uint32_t enb_cc_idx, float snr) { - log_h->step(tti); + log_h->step(tti_rx); srslte::rwlock_read_guard lock(rwlock); if (not check_ue_exists(rnti)) { @@ -392,7 +392,7 @@ int mac::snr_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, float snr) } uint32_t cqi = srslte_cqi_from_snr(snr); - return scheduler.ul_cqi_info(tti, rnti, enb_cc_idx, cqi, 0); + return scheduler.ul_cqi_info(tti_rx, rnti, enb_cc_idx, cqi, 0); } int mac::ta_info(uint32_t tti, uint16_t rnti, float ta_us) diff --git a/srsenb/src/stack/mac/sched.cc b/srsenb/src/stack/mac/sched.cc index 3a331808c..eccc5cef3 100644 --- a/srsenb/src/stack/mac/sched.cc +++ b/srsenb/src/stack/mac/sched.cc @@ -231,12 +231,12 @@ void sched::phy_config_enabled(uint16_t rnti, bool enabled) { // TODO: Check if correct use of last_tti ue_db_access( - rnti, [this, enabled](sched_ue& ue) { ue.phy_config_enabled(last_tti.to_uint(), enabled); }, __PRETTY_FUNCTION__); + rnti, [this, enabled](sched_ue& ue) { ue.phy_config_enabled(last_tti, enabled); }, __PRETTY_FUNCTION__); } int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_) { - return ue_db_access(rnti, [lc_id, cfg_](sched_ue& ue) { ue.set_bearer_cfg(lc_id, cfg_); }); + return ue_db_access(rnti, [lc_id, cfg_](sched_ue& ue) { ue.set_bearer_cfg(lc_id, *cfg_); }); } int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id) @@ -258,7 +258,7 @@ uint32_t sched::get_ul_buffer(uint16_t rnti) uint32_t ret = SRSLTE_ERROR; ue_db_access( rnti, - [this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(last_tti.to_uint(), -1); }, + [this, &ret](sched_ue& ue) { ret = ue.get_pending_ul_new_data(to_tx_ul(last_tti), -1); }, __PRETTY_FUNCTION__); return ret; } @@ -273,33 +273,38 @@ int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code, uint32_t nof_cmd return ue_db_access(rnti, [ce_code, nof_cmds](sched_ue& ue) { ue.mac_buffer_state(ce_code, nof_cmds); }); } -int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) +int sched::dl_ack_info(uint32_t tti_rx, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) { int ret = -1; ue_db_access( - rnti, [&](sched_ue& ue) { ret = ue.set_ack_info(tti, enb_cc_idx, tb_idx, ack); }, __PRETTY_FUNCTION__); + rnti, + [&](sched_ue& ue) { ret = ue.set_ack_info(tti_point{tti_rx}, enb_cc_idx, tb_idx, ack); }, + __PRETTY_FUNCTION__); return ret; } int sched::ul_crc_info(uint32_t tti_rx, uint16_t rnti, uint32_t enb_cc_idx, bool crc) { - return ue_db_access( - rnti, [tti_rx, enb_cc_idx, crc](sched_ue& ue) { ue.set_ul_crc(srslte::tti_point{tti_rx}, enb_cc_idx, crc); }); + return ue_db_access(rnti, + [tti_rx, enb_cc_idx, crc](sched_ue& ue) { ue.set_ul_crc(tti_point{tti_rx}, enb_cc_idx, crc); }); } int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t ri_value) { - return ue_db_access(rnti, [tti, enb_cc_idx, ri_value](sched_ue& ue) { ue.set_dl_ri(tti, enb_cc_idx, ri_value); }); + return ue_db_access( + rnti, [tti, enb_cc_idx, ri_value](sched_ue& ue) { ue.set_dl_ri(tti_point{tti}, enb_cc_idx, ri_value); }); } int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t pmi_value) { - return ue_db_access(rnti, [tti, enb_cc_idx, pmi_value](sched_ue& ue) { ue.set_dl_pmi(tti, enb_cc_idx, pmi_value); }); + return ue_db_access( + rnti, [tti, enb_cc_idx, pmi_value](sched_ue& ue) { ue.set_dl_pmi(tti_point{tti}, enb_cc_idx, pmi_value); }); } int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi_value) { - return ue_db_access(rnti, [tti, enb_cc_idx, cqi_value](sched_ue& ue) { ue.set_dl_cqi(tti, enb_cc_idx, cqi_value); }); + return ue_db_access( + rnti, [tti, enb_cc_idx, cqi_value](sched_ue& ue) { ue.set_dl_cqi(tti_point{tti}, enb_cc_idx, cqi_value); }); } int sched::dl_rach_info(uint32_t enb_cc_idx, dl_sched_rar_info_t rar_info) @@ -308,9 +313,9 @@ int sched::dl_rach_info(uint32_t enb_cc_idx, dl_sched_rar_info_t rar_info) return carrier_schedulers[enb_cc_idx]->dl_rach_info(rar_info); } -int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) +int sched::ul_cqi_info(uint32_t tti_rx, uint16_t rnti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) { - return ue_db_access(rnti, [&](sched_ue& ue) { ue.set_ul_cqi(tti, enb_cc_idx, cqi, ul_ch_code); }); + return ue_db_access(rnti, [&](sched_ue& ue) { ue.set_ul_cqi(tti_point{tti_rx}, enb_cc_idx, cqi, ul_ch_code); }); } int sched::ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr) @@ -444,11 +449,6 @@ void sched::new_tti(tti_point tti_rx) // Generate sched results for all CCs, if not yet generated for (size_t cc_idx = 0; cc_idx < carrier_schedulers.size(); ++cc_idx) { if (not is_generated(tti_rx, cc_idx)) { - // Setup tti-specific vars of the UE - for (auto& user : ue_db) { - user.second.new_tti(tti_rx); - } - // Generate carrier scheduling result carrier_schedulers[cc_idx]->generate_tti_result(tti_rx); } diff --git a/srsenb/src/stack/mac/sched_carrier.cc b/srsenb/src/stack/mac/sched_carrier.cc index 674a7e639..783fe9895 100644 --- a/srsenb/src/stack/mac/sched_carrier.cc +++ b/srsenb/src/stack/mac/sched_carrier.cc @@ -44,9 +44,9 @@ void bc_sched::dl_sched(sf_sched* tti_sched) void bc_sched::update_si_windows(sf_sched* tti_sched) { - uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl(); - uint32_t current_sf_idx = tti_sched->get_tti_params().sf_idx_tx_dl; - uint32_t current_sfn = tti_sched->get_tti_params().sfn_tx_dl; + tti_point tti_tx_dl = tti_sched->get_tti_tx_dl(); + uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx(); + uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn(); for (uint32_t i = 0; i < pending_sibs.size(); ++i) { // There is SIB data @@ -68,7 +68,7 @@ void bc_sched::update_si_windows(sf_sched* tti_sched) } } else { if (i > 0) { - if (srslte_tti_interval(tti_tx_dl, pending_sibs[i].window_start) > cc_cfg->cfg.si_window_ms) { + if (pending_sibs[i].window_start + cc_cfg->cfg.si_window_ms < tti_tx_dl) { // the si window has passed pending_sibs[i] = {}; } @@ -84,8 +84,8 @@ void bc_sched::update_si_windows(sf_sched* tti_sched) void bc_sched::alloc_sibs(sf_sched* tti_sched) { - uint32_t current_sf_idx = tti_sched->get_tti_params().sf_idx_tx_dl; - uint32_t current_sfn = tti_sched->get_tti_params().sfn_tx_dl; + uint32_t current_sf_idx = tti_sched->get_tti_tx_dl().sf_idx(); + uint32_t current_sfn = tti_sched->get_tti_tx_dl().sfn(); for (uint32_t i = 0; i < pending_sibs.size(); i++) { if (cc_cfg->cfg.sibs[i].len > 0 and pending_sibs[i].is_in_window and pending_sibs[i].n_tx < 4) { @@ -112,7 +112,7 @@ void bc_sched::alloc_paging(sf_sched* tti_sched) /* Allocate DCIs and RBGs for paging */ if (rrc != nullptr) { uint32_t paging_payload = 0; - if (rrc->is_paging_opportunity(current_tti, &paging_payload) and paging_payload > 0) { + if (rrc->is_paging_opportunity(current_tti.to_uint(), &paging_payload) and paging_payload > 0) { tti_sched->alloc_paging(bc_aggr_level, paging_payload); } } @@ -138,25 +138,24 @@ ra_sched::ra_sched(const sched_cell_params_t& cfg_, std::map // discard it. void ra_sched::dl_sched(sf_sched* tti_sched) { - uint32_t tti_tx_dl = tti_sched->get_tti_tx_dl(); - rar_aggr_level = 2; + tti_point tti_tx_dl = tti_sched->get_tti_tx_dl(); + rar_aggr_level = 2; while (not pending_rars.empty()) { - sf_sched::pending_rar_t& rar = pending_rars.front(); - uint32_t prach_tti = rar.prach_tti; + sf_sched::pending_rar_t& rar = pending_rars.front(); // Discard all RARs out of the window. The first one inside the window is scheduled, if we can't we exit - if (not sched_utils::is_in_tti_interval( - tti_tx_dl, prach_tti + PRACH_RAR_OFFSET, prach_tti + PRACH_RAR_OFFSET + cc_cfg->cfg.prach_rar_window)) { - if (tti_tx_dl >= prach_tti + PRACH_RAR_OFFSET + cc_cfg->cfg.prach_rar_window) { + srslte::tti_interval rar_window{rar.prach_tti + PRACH_RAR_OFFSET, + rar.prach_tti + PRACH_RAR_OFFSET + cc_cfg->cfg.prach_rar_window}; + if (not rar_window.contains(tti_tx_dl)) { + if (tti_tx_dl >= rar_window.stop()) { char error_msg[128]; int len = snprintf(error_msg, sizeof(error_msg), - "SCHED: Could not transmit RAR within the window (RA=%d, Window=[%d..%d], RAR=%d)\n", - prach_tti, - prach_tti + PRACH_RAR_OFFSET, - prach_tti + PRACH_RAR_OFFSET + cc_cfg->cfg.prach_rar_window, - tti_tx_dl); + "SCHED: Could not transmit RAR within the window (RA=%d, Window=%s, RAR=%d)\n", + rar.prach_tti.to_uint(), + rar_window.to_string().c_str(), + tti_tx_dl.to_uint()); error_msg[len] = '\0'; srslte::console("%s", error_msg); log_h->error("%s", error_msg); @@ -206,7 +205,7 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info) // find pending rar with same RA-RNTI for (sf_sched::pending_rar_t& r : pending_rars) { - if (r.prach_tti == rar_info.prach_tti and ra_rnti == r.ra_rnti) { + if (r.prach_tti.to_uint() == rar_info.prach_tti and ra_rnti == r.ra_rnti) { if (r.nof_grants >= sched_interface::MAX_RAR_LIST) { log_h->warning("PRACH ignored, as the the maximum number of RAR grants per tti has been reached\n"); return SRSLTE_ERROR; @@ -220,7 +219,7 @@ int ra_sched::dl_rach_info(dl_sched_rar_info_t rar_info) // create new RAR sf_sched::pending_rar_t p; p.ra_rnti = ra_rnti; - p.prach_tti = rar_info.prach_tti; + p.prach_tti = tti_point{rar_info.prach_tti}; p.nof_grants = 1; p.msg3_grant[0] = rar_info; pending_rars.push_back(p); @@ -240,9 +239,11 @@ void ra_sched::ul_sched(sf_sched* sf_dl_sched, sf_sched* sf_msg3_sched) uint16_t crnti = msg3grant.data.temp_crnti; auto user_it = ue_db->find(crnti); if (user_it != ue_db->end() and sf_msg3_sched->alloc_msg3(&user_it->second, msg3grant)) { - log_h->debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d\n", crnti, sf_msg3_sched->get_tti_tx_ul()); + log_h->debug("SCHED: Queueing Msg3 for rnti=0x%x at tti=%d\n", crnti, sf_msg3_sched->get_tti_tx_ul().to_uint()); } else { - log_h->error("SCHED: Failed to allocate Msg3 for rnti=0x%x at tti=%d\n", crnti, sf_msg3_sched->get_tti_tx_ul()); + log_h->error("SCHED: Failed to allocate Msg3 for rnti=0x%x at tti=%d\n", + crnti, + sf_msg3_sched->get_tti_tx_ul().to_uint()); } } } @@ -313,7 +314,12 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r sf_sched_result* sf_result = prev_sched_results->get_sf(tti_rx); cc_sched_result* cc_result = sf_result->new_cc(enb_cc_idx); - bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl() % sf_dl_mask.size()] == 0; + bool dl_active = sf_dl_mask[tti_sched->get_tti_tx_dl().to_uint() % sf_dl_mask.size()] == 0; + + /* Refresh UE internal buffers and subframe vars */ + for (auto& user : *ue_db) { + user.second.new_subframe(tti_rx, enb_cc_idx); + } /* Schedule PHICH */ for (auto& ue_pair : *ue_db) { @@ -350,7 +356,7 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r /* Reset ue harq pending ack state, clean-up blocked pids */ for (auto& user : *ue_db) { - user.second.finish_tti(cc_result->tti_params, enb_cc_idx); + user.second.finish_tti(tti_rx, enb_cc_idx); } log_dl_cc_results(log_h, enb_cc_idx, cc_result->dl_sched_result); @@ -360,14 +366,14 @@ const cc_sched_result& sched::carrier_sched::generate_tti_result(tti_point tti_r void sched::carrier_sched::alloc_dl_users(sf_sched* tti_result) { - if (sf_dl_mask[tti_result->get_tti_tx_dl() % sf_dl_mask.size()] != 0) { + if (sf_dl_mask[tti_result->get_tti_tx_dl().to_uint() % sf_dl_mask.size()] != 0) { return; } // NOTE: In case of 6 PRBs, do not transmit if there is going to be a PRACH in the UL to avoid collisions if (cc_cfg->nof_prb() == 6) { - uint32_t tti_rx_ack = tti_result->get_tti_params().tti_rx_ack_dl(); - if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_rx_ack, -1)) { + tti_point tti_rx_ack = to_tx_dl_ack(tti_result->get_tti_rx()); + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_rx_ack.to_uint(), -1)) { tti_result->reserve_dl_rbgs(0, cc_cfg->nof_rbgs); } } @@ -387,11 +393,11 @@ int sched::carrier_sched::alloc_ul_users(sf_sched* tti_sched) sf_sched* sched::carrier_sched::get_sf_sched(tti_point tti_rx) { sf_sched* ret = &sf_scheds[tti_rx.to_uint() % sf_scheds.size()]; - if (ret->get_tti_rx() != tti_rx.to_uint()) { - sf_sched_result* sf_res = prev_sched_results->get_sf(srslte::tti_point{tti_rx}); + if (ret->get_tti_rx() != tti_rx) { + sf_sched_result* sf_res = prev_sched_results->get_sf(tti_rx); if (sf_res == nullptr) { // Reset if tti_rx has not been yet set in the sched results - sf_res = prev_sched_results->new_tti(srslte::tti_point{tti_rx}); + sf_res = prev_sched_results->new_tti(tti_rx); } // start new TTI for the given CC. ret->new_tti(tti_rx, sf_res); @@ -399,9 +405,9 @@ sf_sched* sched::carrier_sched::get_sf_sched(tti_point tti_rx) return ret; } -const sf_sched_result* sched::carrier_sched::get_sf_result(uint32_t tti_rx) const +const sf_sched_result* sched::carrier_sched::get_sf_result(tti_point tti_rx) const { - return prev_sched_results->get_sf(srslte::tti_point{tti_rx}); + return prev_sched_results->get_sf(tti_rx); } int sched::carrier_sched::dl_rach_info(dl_sched_rar_info_t rar_info) diff --git a/srsenb/src/stack/mac/sched_grid.cc b/srsenb/src/stack/mac/sched_grid.cc index 2338d1f90..14cd9fa38 100644 --- a/srsenb/src/stack/mac/sched_grid.cc +++ b/srsenb/src/stack/mac/sched_grid.cc @@ -41,14 +41,6 @@ const char* alloc_outcome_t::to_string() const return "unknown error"; } -tti_params_t::tti_params_t(uint32_t tti_rx_) : - tti_rx(tti_rx_), - sf_idx_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS) % 10), - tti_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS)), - tti_tx_ul(TTI_ADD(tti_rx, (FDD_HARQ_DELAY_UL_MS + FDD_HARQ_DELAY_DL_MS))), - sfn_tx_dl(TTI_ADD(tti_rx, FDD_HARQ_DELAY_UL_MS) / 10) -{} - cc_sched_result* sf_sched_result::new_cc(uint32_t enb_cc_idx) { if (enb_cc_idx >= enb_cc_list.size()) { @@ -135,9 +127,9 @@ void pdcch_grid_t::init(const sched_cell_params_t& cell_params_) } } -void pdcch_grid_t::new_tti(const tti_params_t& tti_params_) +void pdcch_grid_t::new_tti(tti_point tti_rx_) { - tti_params = &tti_params_; + tti_rx = tti_rx_; // Reset back all CFIs for (auto& t : alloc_trees) { @@ -155,11 +147,11 @@ const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, case alloc_type_t::DL_PCCH: return &cc_cfg->common_locations[cfix]; case alloc_type_t::DL_RAR: - return &cc_cfg->rar_locations[cfix][tti_params->sf_idx_tx_dl]; + return &cc_cfg->rar_locations[cfix][to_tx_dl(tti_rx).sf_idx()]; case alloc_type_t::DL_DATA: - return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, tti_params->sf_idx_tx_dl); + return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); case alloc_type_t::UL_DATA: - return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, tti_params->sf_idx_tx_dl); + return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx()); default: break; } @@ -204,10 +196,10 @@ bool pdcch_grid_t::alloc_dci_record(const alloc_record_t& record, uint32_t cfix) if (tree.prev_end > 0) { for (size_t j = tree.prev_start; j < tree.prev_end; ++j) { - ret |= add_tree_node_leaves(tree, (int)j, record, *dci_locs, tti_params->tti_tx_dl); + ret |= add_tree_node_leaves(tree, (int)j, record, *dci_locs, to_tx_dl(tti_rx)); } } else { - ret = add_tree_node_leaves(tree, -1, record, *dci_locs, tti_params->tti_tx_dl); + ret = add_tree_node_leaves(tree, -1, record, *dci_locs, to_tx_dl(tti_rx)); } if (ret) { @@ -223,7 +215,7 @@ bool pdcch_grid_t::add_tree_node_leaves(alloc_tree_t& tree, int parent_node_idx, const alloc_record_t& dci_record, const sched_dci_cce_t& dci_locs, - uint32_t tti_tx_dl) + tti_point tti_tx_dl) { bool ret = false; @@ -397,16 +389,16 @@ void sf_grid_t::init(const sched_cell_params_t& cell_params_) pdcch_alloc.init(*cc_cfg); } -void sf_grid_t::new_tti(const tti_params_t& tti_params_) +void sf_grid_t::new_tti(tti_point tti_rx_) { - tti_params = &tti_params_; + tti_rx = tti_rx_; dl_mask.reset(); ul_mask.reset(); avail_rbg = nof_rbgs; // internal state - pdcch_alloc.new_tti(*tti_params); + pdcch_alloc.new_tti(tti_rx); } //! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging) @@ -581,26 +573,27 @@ void sf_sched::new_tti(tti_point tti_rx_, sf_sched_result* cc_results_) data_allocs.clear(); ul_data_allocs.clear(); - tti_params = tti_params_t{tti_rx_.to_uint()}; - tti_alloc.new_tti(tti_params); + tti_rx = tti_rx_; + tti_alloc.new_tti(tti_rx_); cc_results = cc_results_; // Reserve PRBs for PUCCH reserve_ul_prbs(pucch_mask, true); // Reserve PRBs for PRACH - if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_params.tti_tx_ul, -1)) { + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, to_tx_ul(tti_rx).to_uint(), -1)) { prbmask_t prach_mask{cc_cfg->nof_prb()}; prach_mask.fill(cc_cfg->cfg.prach_freq_offset, cc_cfg->cfg.prach_freq_offset + 6); reserve_ul_prbs(prach_mask, cc_cfg->nof_prb() != 6); - log_h->debug( - "SCHED: Allocated PRACH RBs for tti_tx_ul=%d. Mask: 0x%s\n", tti_params.tti_tx_ul, prach_mask.to_hex().c_str()); + log_h->debug("SCHED: Allocated PRACH RBs for tti_tx_ul=%d. Mask: 0x%s\n", + to_tx_ul(tti_rx).to_uint(), + prach_mask.to_hex().c_str()); } // setup first prb to be used for msg3 alloc. Account for potential PRACH alloc - last_msg3_prb = cc_cfg->cfg.nrb_pucch; - uint32_t tti_msg3_alloc = TTI_ADD(tti_params.tti_tx_ul, MSG3_DELAY_MS); - if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc, -1)) { + last_msg3_prb = cc_cfg->cfg.nrb_pucch; + tti_point tti_msg3_alloc = to_tx_ul(tti_rx) + MSG3_DELAY_MS; + if (srslte_prach_tti_opportunity_config_fdd(cc_cfg->cfg.prach_config, tti_msg3_alloc.to_uint(), -1)) { last_msg3_prb = std::max(last_msg3_prb, cc_cfg->cfg.prach_freq_offset + 6); } } @@ -744,11 +737,11 @@ std::pair sf_sched::alloc_rar(uint32_t aggr_lvl, cons return ret; } -bool is_periodic_cqi_expected(const sched_interface::ue_cfg_t& ue_cfg, uint32_t tti_tx_ul) +bool is_periodic_cqi_expected(const sched_interface::ue_cfg_t& ue_cfg, tti_point tti_tx_ul) { for (const sched_interface::ue_cfg_t::cc_cfg_t& cc : ue_cfg.supported_cc_list) { if (cc.dl_cfg.cqi_report.periodic_configured) { - if (srslte_cqi_periodic_send(&cc.dl_cfg.cqi_report, tti_tx_ul, SRSLTE_FDD)) { + if (srslte_cqi_periodic_send(&cc.dl_cfg.cqi_report, tti_tx_ul.to_uint(), SRSLTE_FDD)) { return true; } } @@ -880,7 +873,7 @@ bool sf_sched::alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_s } uint32_t cell_index = p.second; - ul_harq_proc* h = user->get_ul_harq(tti_params.tti_tx_ul, cell_index); + ul_harq_proc* h = user->get_ul_harq(get_tti_tx_ul(), cell_index); /* Indicate PHICH acknowledgment if needed */ if (h->has_pending_phich()) { @@ -1085,7 +1078,7 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched, // Check if CQI is pending for this CC const srslte_cqi_report_cfg_t& cqi_report = ue_cfg.supported_cc_list[ueccidx].dl_cfg.cqi_report; - if (srslte_cqi_periodic_send(&cqi_report, sf_sched->get_tti_tx_ul(), SRSLTE_FDD)) { + if (srslte_cqi_periodic_send(&cqi_report, sf_sched->get_tti_tx_ul().to_uint(), SRSLTE_FDD)) { if (uci_alloc == UCI_PUSCH_ACK) { uci_alloc = UCI_PUSCH_ACK_CQI; } else { @@ -1271,9 +1264,9 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db) set_ul_sched_result(dci_result, &cc_result->ul_sched_result, ue_db); /* Store remaining sf_sched results for this TTI */ - cc_result->dl_mask = tti_alloc.get_dl_mask(); - cc_result->ul_mask = tti_alloc.get_ul_mask(); - cc_result->tti_params = tti_params; + cc_result->dl_mask = tti_alloc.get_dl_mask(); + cc_result->ul_mask = tti_alloc.get_ul_mask(); + cc_result->tti_rx = get_tti_rx(); } uint32_t sf_sched::get_nof_ctrl_symbols() const diff --git a/srsenb/src/stack/mac/sched_harq.cc b/srsenb/src/stack/mac/sched_harq.cc index 714d2f986..0c4e6e710 100644 --- a/srsenb/src/stack/mac/sched_harq.cc +++ b/srsenb/src/stack/mac/sched_harq.cc @@ -163,7 +163,7 @@ dl_harq_proc::dl_harq_proc() : harq_proc() void dl_harq_proc::new_tx(const rbgmask_t& new_mask, uint32_t tb_idx, - uint32_t tti, + tti_point tti_tx_dl, int mcs, int tbs, uint32_t n_cce_, @@ -171,19 +171,19 @@ void dl_harq_proc::new_tx(const rbgmask_t& new_mask, { n_cce = n_cce_; rbgmask = new_mask; - new_tx_common(tb_idx, tti_point{tti}, mcs, tbs, max_retx_); + new_tx_common(tb_idx, tti_tx_dl, mcs, tbs, max_retx_); } void dl_harq_proc::new_retx(const rbgmask_t& new_mask, uint32_t tb_idx, - uint32_t tti_, + tti_point tti_tx_dl, int* mcs, int* tbs, uint32_t n_cce_) { n_cce = n_cce_; rbgmask = new_mask; - new_retx_common(tb_idx, tti_point{tti_}, mcs, tbs); + new_retx_common(tb_idx, tti_tx_dl, mcs, tbs); } int dl_harq_proc::set_ack(uint32_t tb_idx, bool ack) @@ -201,9 +201,9 @@ rbgmask_t dl_harq_proc::get_rbgmask() const return rbgmask; } -bool dl_harq_proc::has_pending_retx(uint32_t tb_idx, uint32_t tti_tx_dl) const +bool dl_harq_proc::has_pending_retx(uint32_t tb_idx, tti_point tti_tx_dl) const { - return (tti_point{tti_tx_dl} >= to_tx_dl_ack(tti)) and has_pending_retx_common(tb_idx); + return (tti_tx_dl >= to_tx_dl_ack(tti)) and has_pending_retx_common(tb_idx); } int dl_harq_proc::get_tbs(uint32_t tb_idx) const @@ -314,10 +314,10 @@ void harq_entity::reset() } } -dl_harq_proc* harq_entity::get_empty_dl_harq(uint32_t tti_tx_dl) +dl_harq_proc* harq_entity::get_empty_dl_harq(tti_point tti_tx_dl) { if (not is_async) { - dl_harq_proc* h = &dl_harqs[tti_tx_dl % nof_dl_harqs()]; + dl_harq_proc* h = &dl_harqs[tti_tx_dl.to_uint() % nof_dl_harqs()]; return h->is_empty() ? h : nullptr; } @@ -325,19 +325,19 @@ dl_harq_proc* harq_entity::get_empty_dl_harq(uint32_t tti_tx_dl) return it != dl_harqs.end() ? &(*it) : nullptr; } -dl_harq_proc* harq_entity::get_pending_dl_harq(uint32_t tti_tx_dl) +dl_harq_proc* harq_entity::get_pending_dl_harq(tti_point tti_tx_dl) { if (not is_async) { - dl_harq_proc* h = &dl_harqs[tti_tx_dl % nof_dl_harqs()]; + dl_harq_proc* h = &dl_harqs[tti_tx_dl.to_uint() % nof_dl_harqs()]; return (h->has_pending_retx(0, tti_tx_dl) or h->has_pending_retx(1, tti_tx_dl)) ? h : nullptr; } return get_oldest_dl_harq(tti_tx_dl); } -std::pair harq_entity::set_ack_info(uint32_t tti_rx, uint32_t tb_idx, bool ack) +std::pair harq_entity::set_ack_info(tti_point tti_rx, uint32_t tb_idx, bool ack) { for (auto& h : dl_harqs) { - if (h.get_tti() + FDD_HARQ_DELAY_DL_MS == tti_point{tti_rx}) { + if (h.get_tti() + FDD_HARQ_DELAY_DL_MS == tti_rx) { if (h.set_ack(tb_idx, ack) == SRSLTE_SUCCESS) { return {h.get_id(), h.get_tbs(tb_idx)}; } @@ -347,25 +347,24 @@ std::pair harq_entity::set_ack_info(uint32_t tti_rx, uint32_t tb_ return {dl_harqs.size(), -1}; } -ul_harq_proc* harq_entity::get_ul_harq(uint32_t tti_tx_ul) +ul_harq_proc* harq_entity::get_ul_harq(tti_point tti_tx_ul) { - return &ul_harqs[tti_tx_ul % ul_harqs.size()]; + return &ul_harqs[tti_tx_ul.to_uint() % ul_harqs.size()]; } -int harq_entity::set_ul_crc(srslte::tti_point tti_rx, uint32_t tb_idx, bool ack_) +int harq_entity::set_ul_crc(tti_point tti_rx, uint32_t tb_idx, bool ack_) { - ul_harq_proc* h = get_ul_harq(tti_rx.to_uint()); + ul_harq_proc* h = get_ul_harq(tti_rx); uint32_t pid = h->get_id(); return h->set_ack(tb_idx, ack_) ? pid : -1; } -void harq_entity::reset_pending_data(srslte::tti_point tti_rx) +void harq_entity::reset_pending_data(tti_point tti_rx) { tti_point tti_tx_ul = to_tx_ul(tti_rx); - tti_point tti_tx_dl = to_tx_dl(tti_rx); // Reset ACK state of UL Harq - get_ul_harq(tti_tx_ul.to_uint())->reset_pending_data(); + get_ul_harq(tti_tx_ul)->reset_pending_data(); // Reset any DL harq which has 0 retxs for (auto& h : dl_harqs) { @@ -378,14 +377,13 @@ void harq_entity::reset_pending_data(srslte::tti_point tti_rx) * @param tti_tx_dl assumed to always be equal or ahead in time in comparison to current harqs * @return pointer to found dl_harq */ -dl_harq_proc* harq_entity::get_oldest_dl_harq(uint32_t tti_tx_dl) +dl_harq_proc* harq_entity::get_oldest_dl_harq(tti_point tti_tx_dl) { - tti_point t_tx_dl{tti_tx_dl}; - int oldest_idx = -1; - uint32_t oldest_tti = 0; + int oldest_idx = -1; + uint32_t oldest_tti = 0; for (const dl_harq_proc& h : dl_harqs) { if (h.has_pending_retx(0, tti_tx_dl) or h.has_pending_retx(1, tti_tx_dl)) { - uint32_t x = t_tx_dl - h.get_tti(); + uint32_t x = tti_tx_dl - h.get_tti(); if (x > oldest_tti) { oldest_idx = h.get_id(); oldest_tti = x; diff --git a/srsenb/src/stack/mac/sched_ue.cc b/srsenb/src/stack/mac/sched_ue.cc index f7717cbd7..3b0fc1b73 100644 --- a/srsenb/src/stack/mac/sched_ue.cc +++ b/srsenb/src/stack/mac/sched_ue.cc @@ -47,16 +47,6 @@ uint32_t get_tbs_bytes(uint32_t mcs, uint32_t nof_alloc_prb, bool use_tbs_index_ return (uint32_t)tbs / 8U; } -//! TS 36.321 sec 7.1.2 - MAC PDU subheader is 2 bytes if L<=128 and 3 otherwise -uint32_t get_mac_subheader_size(uint32_t sdu_bytes) -{ - return sdu_bytes == 0 ? 0 : (sdu_bytes > 128 ? 3 : 2); -} -uint32_t get_mac_sdu_and_subheader_size(uint32_t sdu_bytes) -{ - return sdu_bytes + get_mac_subheader_size(sdu_bytes); -} - /** * Count number of PRBs present in a DL RBG mask * @param bitmask DL RBG mask @@ -145,7 +135,7 @@ std::tuple false_position_method(int x1, int x2, YType y * *******************************************************/ -sched_ue::sched_ue() : log_h(srslte::logmap::get("MAC ")) +sched_ue::sched_ue() : log_h(srslte::logmap::get("MAC")) { reset(); } @@ -157,7 +147,7 @@ void sched_ue::init(uint16_t rnti_, const std::vector& cell Info("SCHED: Added user rnti=0x%x\n", rnti); } -void sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_) +void sched_ue::set_cfg(const ue_cfg_t& cfg_) { // for the first configured cc, set it as primary cc if (cfg.supported_cc_list.empty()) { @@ -187,8 +177,11 @@ void sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_) } // in case carriers have been added or modified bool scell_activation_state_changed = false; + enb_ue_cc_idx_map.clear(); + enb_ue_cc_idx_map.resize(cell_params_list->size(), -1); for (uint32_t ue_idx = 0; ue_idx < cfg.supported_cc_list.size(); ++ue_idx) { - auto& cc_cfg = cfg.supported_cc_list[ue_idx]; + auto& cc_cfg = cfg.supported_cc_list[ue_idx]; + enb_ue_cc_idx_map[cc_cfg.enb_cc_idx] = ue_idx; if (ue_idx >= prev_supported_cc_list.size()) { // New carrier needs to be added @@ -230,11 +223,12 @@ void sched_ue::reset() } } -void sched_ue::new_tti(srslte::tti_point new_tti) +void sched_ue::new_subframe(tti_point tti_rx, uint32_t enb_cc_idx) { - current_tti = new_tti; - - lch_handler.new_tti(); + if (current_tti != tti_rx) { + current_tti = tti_rx; + lch_handler.new_tti(); + } } /// sanity check the UE CC configuration @@ -272,10 +266,10 @@ void sched_ue::check_ue_cfg_correctness() const * *******************************************************/ -void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg_) +void sched_ue::set_bearer_cfg(uint32_t lc_id, const bearer_cfg_t& cfg_) { - cfg.ue_bearers[lc_id] = *cfg_; - lch_handler.config_lcid(lc_id, *cfg_); + cfg.ue_bearers[lc_id] = cfg_; + lch_handler.config_lcid(lc_id, cfg_); } void sched_ue::rem_bearer(uint32_t lc_id) @@ -284,10 +278,10 @@ void sched_ue::rem_bearer(uint32_t lc_id) lch_handler.config_lcid(lc_id, sched_interface::ue_bearer_cfg_t{}); } -void sched_ue::phy_config_enabled(uint32_t tti, bool enabled) +void sched_ue::phy_config_enabled(tti_point tti_rx, bool enabled) { for (cc_sched_ue& c : carriers) { - c.dl_cqi_tti = tti; + c.dl_cqi_tti_rx = tti_rx; } phy_config_dedicated_enabled = enabled; } @@ -335,12 +329,12 @@ void sched_ue::unset_sr() sr = false; } -bool sched_ue::pucch_sr_collision(uint32_t tti, uint32_t n_cce) +bool sched_ue::pucch_sr_collision(tti_point tti_tx_dl, uint32_t n_cce) { if (!phy_config_dedicated_enabled) { return false; } - if (cfg.pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&cfg.pucch_cfg, tti)) { + if (cfg.pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&cfg.pucch_cfg, tti_tx_dl.to_uint())) { return (n_cce + cfg.pucch_cfg.N_pucch_1) == cfg.pucch_cfg.n_pucch_sr; } return false; @@ -383,7 +377,7 @@ bool sched_ue::pdsch_enabled(srslte::tti_point tti_rx, uint32_t enb_cc_idx) cons return true; } -bool sched_ue::pusch_enabled(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const +bool sched_ue::pusch_enabled(tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const { if (carriers[0].get_cell_cfg()->enb_cc_idx != enb_cc_idx) { return true; @@ -407,7 +401,7 @@ bool sched_ue::pusch_enabled(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool return true; } -int sched_ue::set_ack_info(uint32_t tti_rx, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) +int sched_ue::set_ack_info(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) { int tbs_acked = -1; cc_sched_ue* c = find_ue_carrier(enb_cc_idx); @@ -415,9 +409,10 @@ int sched_ue::set_ack_info(uint32_t tti_rx, uint32_t enb_cc_idx, uint32_t tb_idx std::pair p2 = c->harq_ent.set_ack_info(tti_rx, tb_idx, ack); tbs_acked = p2.second; if (tbs_acked > 0) { - Debug("SCHED: Set DL ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, p2.first, tb_idx, tti_rx); + Debug( + "SCHED: Set DL ACK=%d for rnti=0x%x, pid=%d, tb=%d, tti=%d\n", ack, rnti, p2.first, tb_idx, tti_rx.to_uint()); } else { - Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx); + Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti_rx.to_uint()); } } else { log_h->warning("Received DL ACK for invalid cell index %d\n", enb_cc_idx); @@ -425,7 +420,7 @@ int sched_ue::set_ack_info(uint32_t tti_rx, uint32_t enb_cc_idx, uint32_t tb_idx return tbs_acked; } -void sched_ue::set_ul_crc(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool crc_res) +void sched_ue::set_ul_crc(tti_point tti_rx, uint32_t enb_cc_idx, bool crc_res) { cc_sched_ue* c = find_ue_carrier(enb_cc_idx); if (c != nullptr and c->cc_state() != cc_st::idle) { @@ -438,44 +433,44 @@ void sched_ue::set_ul_crc(srslte::tti_point tti_rx, uint32_t enb_cc_idx, bool cr } } -void sched_ue::set_dl_ri(uint32_t tti, uint32_t enb_cc_idx, uint32_t ri) +void sched_ue::set_dl_ri(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t ri) { cc_sched_ue* c = find_ue_carrier(enb_cc_idx); if (c != nullptr and c->cc_state() != cc_st::idle) { - c->dl_ri = ri; - c->dl_ri_tti = tti; + c->dl_ri = ri; + c->dl_ri_tti_rx = tti_rx; } else { log_h->warning("Received DL RI for invalid cell index %d\n", enb_cc_idx); } } -void sched_ue::set_dl_pmi(uint32_t tti, uint32_t enb_cc_idx, uint32_t pmi) +void sched_ue::set_dl_pmi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t pmi) { cc_sched_ue* c = find_ue_carrier(enb_cc_idx); if (c != nullptr and c->cc_state() != cc_st::idle) { - c->dl_pmi = pmi; - c->dl_pmi_tti = tti; + c->dl_pmi = pmi; + c->dl_pmi_tti_rx = tti_rx; } else { log_h->warning("Received DL PMI for invalid cell index %d\n", enb_cc_idx); } } -void sched_ue::set_dl_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi) +void sched_ue::set_dl_cqi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t cqi) { cc_sched_ue* c = find_ue_carrier(enb_cc_idx); if (c != nullptr and c->cc_state() != cc_st::idle) { - c->set_dl_cqi(tti, cqi); + c->set_dl_cqi(tti_rx, cqi); } else { log_h->warning("Received DL CQI for invalid enb cell index %d\n", enb_cc_idx); } } -void sched_ue::set_ul_cqi(uint32_t tti, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) +void sched_ue::set_ul_cqi(tti_point tti_rx, uint32_t enb_cc_idx, uint32_t cqi, uint32_t ul_ch_code) { cc_sched_ue* c = find_ue_carrier(enb_cc_idx); if (c != nullptr and c->cc_state() != cc_st::idle) { - c->ul_cqi = cqi; - c->ul_cqi_tti = tti; + c->ul_cqi = cqi; + c->ul_cqi_tti_rx = tti_rx; } else { log_h->warning("Received SNR info for invalid cell index %d\n", enb_cc_idx); } @@ -513,7 +508,7 @@ void sched_ue::tpc_dec() std::pair sched_ue::allocate_new_dl_mac_pdu(sched::dl_sched_data_t* data, dl_harq_proc* h, const rbgmask_t& user_mask, - uint32_t tti_tx_dl, + tti_point tti_tx_dl, uint32_t ue_cc_idx, uint32_t cfi, uint32_t tb) @@ -541,7 +536,7 @@ std::pair sched_ue::allocate_new_dl_mac_pdu(sched::dl_sched_data_t* da int sched_ue::generate_dl_dci_format(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti_tx_dl, + tti_point tti_tx_dl, uint32_t ue_cc_idx, uint32_t cfi, const rbgmask_t& user_mask) @@ -569,7 +564,7 @@ int sched_ue::generate_dl_dci_format(uint32_t pid, // > return 0 if allocation is invalid int sched_ue::generate_format1(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti_tx_dl, + tti_point tti_tx_dl, uint32_t ue_cc_idx, uint32_t cfi, const rbgmask_t& user_mask) @@ -636,7 +631,7 @@ int sched_ue::generate_format1(uint32_t pid, * @return pair with MCS and TBS (in bytes) */ std::pair sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_idx, - uint32_t tti_tx_dl, + tti_point tti_tx_dl, uint32_t nof_alloc_prbs, uint32_t cfi, const srslte_dci_dl_t& dci) @@ -648,7 +643,7 @@ std::pair sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_i srslte_pdsch_grant_t grant = {}; srslte_dl_sf_cfg_t dl_sf = {}; dl_sf.cfi = cfi; - dl_sf.tti = tti_tx_dl; + dl_sf.tti = tti_tx_dl.to_uint(); srslte_ra_dl_grant_to_grant_prb_allocation(&dci, &grant, carriers[ue_cc_idx].get_cell_cfg()->nof_prb()); uint32_t nof_re = srslte_ra_dl_grant_nof_re(&carriers[ue_cc_idx].get_cell_cfg()->cfg.cell, &dl_sf, &grant); @@ -677,7 +672,7 @@ std::pair sched_ue::compute_mcs_and_tbs(uint32_t ue_cc_i // Generates a Format2a dci int sched_ue::generate_format2a(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti_tx_dl, + tti_point tti_tx_dl, uint32_t ue_cc_idx, uint32_t cfi, const rbgmask_t& user_mask) @@ -757,13 +752,13 @@ int sched_ue::generate_format2a(uint32_t pid, // Generates a Format2 dci int sched_ue::generate_format2(uint32_t pid, sched_interface::dl_sched_data_t* data, - uint32_t tti, + tti_point tti_tx_dl, uint32_t cc_idx, uint32_t cfi, const rbgmask_t& user_mask) { /* Call Format 2a (common) */ - int ret = generate_format2a(pid, data, tti, cc_idx, cfi, user_mask); + int ret = generate_format2a(pid, data, tti_tx_dl, cc_idx, cfi, user_mask); /* Compute precoding information */ data->dci.format = SRSLTE_DCI_FORMAT2; @@ -777,7 +772,7 @@ int sched_ue::generate_format2(uint32_t pid, } int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, - uint32_t tti, + tti_point tti_tx_ul, uint32_t ue_cc_idx, prb_interval alloc, bool needs_pdcch, @@ -785,10 +780,10 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, int explicit_mcs, uci_pusch_t uci_type) { - ul_harq_proc* h = get_ul_harq(tti, ue_cc_idx); + ul_harq_proc* h = get_ul_harq(tti_tx_ul, ue_cc_idx); srslte_dci_ul_t* dci = &data->dci; - bool cqi_request = needs_cqi_unlocked(tti, true); + bool cqi_request = needs_cqi(tti_tx_ul.to_uint(), true); // Set DCI position data->needs_pdcch = needs_pdcch; @@ -808,7 +803,7 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8; } else { // dynamic mcs - uint32_t req_bytes = get_pending_ul_new_data(tti, ue_cc_idx); + uint32_t req_bytes = get_pending_ul_new_data(tti_tx_ul, ue_cc_idx); uint32_t N_srs = 0; uint32_t nof_symb = 2 * (SRSLTE_CP_NSYMB(cell.cp) - 1) - N_srs; uint32_t nof_re = nof_symb * alloc.length() * SRSLTE_NRE; @@ -835,14 +830,14 @@ int sched_ue::generate_format0(sched_interface::ul_sched_data_t* data, // NOTE: if (nof_re < nof_uci_re) we should set TBS=0 } } - h->new_tx(tti_point{tti}, mcs, tbs, alloc, nof_retx); + h->new_tx(tti_tx_ul, mcs, tbs, alloc, nof_retx); // Un-trigger the SR if data is allocated if (tbs > 0) { unset_sr(); } } else { // retx - h->new_retx(tti_point{tti}, &mcs, nullptr, alloc); + h->new_retx(tti_tx_ul, &mcs, nullptr, alloc); tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(mcs, false, true), alloc.length()) / 8; } @@ -901,26 +896,20 @@ uint32_t sched_ue::get_max_retx() return cfg.maxharq_tx; } -bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_be_sent) -{ - return needs_cqi_unlocked(tti, cc_idx, will_be_sent); -} - -// Private lock-free implemenentation -bool sched_ue::needs_cqi_unlocked(uint32_t tti, uint32_t cc_idx, bool will_be_sent) +bool sched_ue::needs_cqi(uint32_t tti, uint32_t cc_idx, bool will_send) { bool ret = false; if (phy_config_dedicated_enabled && cfg.supported_cc_list[0].aperiodic_cqi_period && lch_handler.has_pending_dl_txs()) { - uint32_t interval = srslte_tti_interval(tti, carriers[cc_idx].dl_cqi_tti); + uint32_t interval = srslte_tti_interval(tti, carriers[cc_idx].dl_cqi_tti_rx.to_uint()); bool needscqi = interval >= cfg.supported_cc_list[0].aperiodic_cqi_period; if (needscqi) { uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti); if (interval_sent >= 16) { - if (will_be_sent) { + if (will_send) { cqi_request_tti = tti; } - Debug("SCHED: Needs_cqi, last_sent=%d, will_be_sent=%d\n", cqi_request_tti, will_be_sent); + Debug("SCHED: Needs_cqi, last_sent=%d, will_be_sent=%d\n", cqi_request_tti, will_send); ret = true; } } @@ -1084,7 +1073,7 @@ uint32_t sched_ue::get_pending_ul_old_data() return pending_ul_data; } -uint32_t sched_ue::get_pending_ul_data_total(uint32_t tti, int this_ue_cc_idx) +uint32_t sched_ue::get_pending_ul_data_total(tti_point tti_tx_ul, int this_ue_cc_idx) { static constexpr uint32_t lbsr_size = 4, sbsr_size = 2; @@ -1119,7 +1108,7 @@ uint32_t sched_ue::get_pending_ul_data_total(uint32_t tti, int this_ue_cc_idx) } } for (uint32_t cc_idx = 0; cc_idx < carriers.size(); ++cc_idx) { - if (needs_cqi_unlocked(tti, cc_idx)) { + if (needs_cqi(tti_tx_ul.to_uint(), cc_idx)) { return 128; } } @@ -1128,9 +1117,9 @@ uint32_t sched_ue::get_pending_ul_data_total(uint32_t tti, int this_ue_cc_idx) return pending_data; } -uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti, int this_ue_cc_idx) +uint32_t sched_ue::get_pending_ul_new_data(tti_point tti_tx_ul, int this_ue_cc_idx) { - uint32_t pending_data = get_pending_ul_data_total(tti, this_ue_cc_idx); + uint32_t pending_data = get_pending_ul_data_total(tti_tx_ul, this_ue_cc_idx); // Subtract all the UL data already allocated in the UL harqs uint32_t pending_ul_data = get_pending_ul_old_data(); @@ -1156,7 +1145,7 @@ bool sched_ue::is_sr_triggered() } /* Gets HARQ process with oldest pending retx */ -dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx) +dl_harq_proc* sched_ue::get_pending_dl_harq(tti_point tti_tx_dl, uint32_t ue_cc_idx) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) { return carriers[ue_cc_idx].harq_ent.get_pending_dl_harq(tti_tx_dl); @@ -1164,7 +1153,7 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_i return nullptr; } -dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx) +dl_harq_proc* sched_ue::get_empty_dl_harq(tti_point tti_tx_dl, uint32_t ue_cc_idx) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) { return carriers[ue_cc_idx].harq_ent.get_empty_dl_harq(tti_tx_dl); @@ -1172,7 +1161,7 @@ dl_harq_proc* sched_ue::get_empty_dl_harq(uint32_t tti_tx_dl, uint32_t ue_cc_idx return nullptr; } -ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti_tx_ul, uint32_t ue_cc_idx) +ul_harq_proc* sched_ue::get_ul_harq(tti_point tti_tx_ul, uint32_t ue_cc_idx) { if (ue_cc_idx < carriers.size() and carriers[ue_cc_idx].cc_state() == cc_st::active) { return carriers[ue_cc_idx].harq_ent.get_ul_harq(tti_tx_ul); @@ -1203,13 +1192,13 @@ uint32_t sched_ue::get_aggr_level(uint32_t ue_cc_idx, uint32_t nof_bits) return carriers[ue_cc_idx].get_aggr_level(nof_bits); } -void sched_ue::finish_tti(const tti_params_t& tti_params, uint32_t enb_cc_idx) +void sched_ue::finish_tti(tti_point tti_rx, uint32_t enb_cc_idx) { cc_sched_ue* c = find_ue_carrier(enb_cc_idx); if (c != nullptr) { // Check that scell state needs to change - c->finish_tti(current_tti); + c->finish_tti(tti_rx); } } @@ -1271,10 +1260,7 @@ std::bitset sched_ue::scell_activation_mask() const int sched_ue::enb_to_ue_cc_idx(uint32_t enb_cc_idx) const { - auto it = std::find_if(carriers.begin(), carriers.end(), [enb_cc_idx](const cc_sched_ue& c) { - return c.get_cell_cfg()->enb_cc_idx == enb_cc_idx; - }); - return it != carriers.end() ? std::distance(carriers.begin(), it) : -1; + return enb_ue_cc_idx_map[enb_cc_idx]; } int cc_sched_ue::cqi_to_tbs(uint32_t nof_prb, uint32_t nof_re, bool is_ul, uint32_t* mcs) @@ -1364,14 +1350,14 @@ cc_sched_ue::cc_sched_ue(const sched_interface::ue_cfg_t& cfg_, void cc_sched_ue::reset() { - dl_ri = 0; - dl_ri_tti = 0; - dl_pmi = 0; - dl_pmi_tti = 0; - dl_cqi = 1; - dl_cqi_tti = 0; - ul_cqi = 1; - ul_cqi_tti = 0; + dl_ri = 0; + dl_ri_tti_rx = tti_point{}; + dl_pmi = 0; + dl_pmi_tti_rx = tti_point{}; + dl_cqi = 1; + dl_cqi_tti_rx = tti_point{}; + ul_cqi = 1; + ul_cqi_tti_rx = tti_point{}; harq_ent.reset(); } @@ -1418,7 +1404,7 @@ void cc_sched_ue::set_cfg(const sched_interface::ue_cfg_t& cfg_) } } -void cc_sched_ue::finish_tti(srslte::tti_point tti_rx) +void cc_sched_ue::finish_tti(tti_point tti_rx) { last_tti = tti_point{tti_rx}; @@ -1553,11 +1539,11 @@ uint32_t cc_sched_ue::get_required_prb_ul(uint32_t req_bytes) return req_prbs; } -void cc_sched_ue::set_dl_cqi(uint32_t tti_tx_dl, uint32_t dl_cqi_) +void cc_sched_ue::set_dl_cqi(tti_point tti_rx, uint32_t dl_cqi_) { - dl_cqi = dl_cqi_; - dl_cqi_tti = tti_tx_dl; - dl_cqi_rx = dl_cqi_rx or dl_cqi > 0; + dl_cqi = dl_cqi_; + dl_cqi_tti_rx = tti_rx; + dl_cqi_rx = dl_cqi_rx or dl_cqi > 0; if (ue_cc_idx > 0 and cc_state_ == cc_st::activating and dl_cqi_rx) { // Wait for SCell to receive a positive CQI before activating it cc_state_ = cc_st::active; diff --git a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc index 13bcd1083..baa7a5de7 100644 --- a/srsenb/src/stack/mac/schedulers/sched_time_rr.cc +++ b/srsenb/src/stack/mac/schedulers/sched_time_rr.cc @@ -31,7 +31,7 @@ void sched_time_rr::sched_dl_users(std::map& ue_db, sf_sched } // give priority in a time-domain RR basis. - uint32_t priority_idx = tti_sched->get_tti_tx_dl() % (uint32_t)ue_db.size(); + uint32_t priority_idx = tti_sched->get_tti_tx_dl().to_uint() % (uint32_t)ue_db.size(); sched_dl_retxs(ue_db, tti_sched, priority_idx); sched_dl_newtxs(ue_db, tti_sched, priority_idx); } @@ -94,7 +94,7 @@ void sched_time_rr::sched_ul_users(std::map& ue_db, sf_sched return; } // give priority in a time-domain RR basis. - uint32_t priority_idx = tti_sched->get_tti_tx_ul() % (uint32_t)ue_db.size(); + uint32_t priority_idx = tti_sched->get_tti_tx_ul().to_uint() % (uint32_t)ue_db.size(); sched_ul_retxs(ue_db, tti_sched, priority_idx); sched_ul_newtxs(ue_db, tti_sched, priority_idx); } diff --git a/srsenb/test/mac/sched_grid_test.cc b/srsenb/test/mac/sched_grid_test.cc index 196439e5c..5b83c5b1d 100644 --- a/srsenb/test/mac/sched_grid_test.cc +++ b/srsenb/test/mac/sched_grid_test.cc @@ -56,21 +56,21 @@ int test_pdcch_one_ue() uint32_t tti_counter = 0; for (; tti_counter < nof_ttis; ++tti_counter) { - tti_params_t tti_params{(start_tti + tti_counter).to_uint()}; - pdcch.new_tti(tti_params); + tti_point tti_rx = start_tti + tti_counter; + pdcch.new_tti(tti_rx); TESTASSERT(pdcch.nof_cces() == cell_params[ENB_CC_IDX].nof_cce_table[0]); TESTASSERT(pdcch.get_cfi() == 1); // Start at CFI=1 // Set DL CQI - it should affect aggregation level uint32_t dl_cqi = std::uniform_int_distribution{1, 25}(srsenb::get_rand_gen()); - sched_ue.set_dl_cqi(tti_params.tti_tx_dl, ENB_CC_IDX, dl_cqi); + sched_ue.set_dl_cqi(to_tx_dl(tti_rx), ENB_CC_IDX, dl_cqi); uint32_t aggr_idx = get_aggr_level(sched_ue, PCell_IDX, cell_params); uint32_t max_nof_cce_locs = - sched_ue.get_locations(ENB_CC_IDX, pdcch_grid_t::MAX_CFI, tti_params.sf_idx_tx_dl)->nof_loc[aggr_idx]; + sched_ue.get_locations(ENB_CC_IDX, pdcch_grid_t::MAX_CFI, to_tx_dl(tti_rx).sf_idx())->nof_loc[aggr_idx]; // allocate DL user - uint32_t prev_cfi = pdcch.get_cfi(); - srsenb::sched_dci_cce_t* dci_cce = sched_ue.get_locations(ENB_CC_IDX, prev_cfi, tti_params.sf_idx_tx_dl); + uint32_t prev_cfi = pdcch.get_cfi(); + srsenb::sched_dci_cce_t* dci_cce = sched_ue.get_locations(ENB_CC_IDX, prev_cfi, to_tx_dl(tti_rx).sf_idx()); uint32_t prev_nof_cce_locs = dci_cce->nof_loc[aggr_idx]; TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, aggr_idx, &sched_ue)); @@ -83,7 +83,7 @@ int test_pdcch_one_ue() TESTASSERT(pdcch.get_cfi() == prev_cfi); } - dci_cce = sched_ue.get_locations(ENB_CC_IDX, pdcch.get_cfi(), tti_params.sf_idx_tx_dl); + dci_cce = sched_ue.get_locations(ENB_CC_IDX, pdcch.get_cfi(), to_tx_dl(tti_rx).sf_idx()); uint32_t nof_dci_locs = dci_cce->nof_loc[aggr_idx]; const uint32_t* dci_locs = dci_cce->cce_start[aggr_idx]; @@ -115,7 +115,7 @@ int test_pdcch_one_ue() TESTASSERT(pdcch.get_cfi() == prev_cfi); } - dci_cce = sched_ue.get_locations(ENB_CC_IDX, pdcch.get_cfi(), tti_params.sf_idx_tx_dl); + dci_cce = sched_ue.get_locations(ENB_CC_IDX, pdcch.get_cfi(), to_tx_dl(tti_rx).sf_idx()); nof_dci_locs = dci_cce->nof_loc[aggr_idx]; dci_locs = dci_cce->cce_start[aggr_idx]; diff --git a/srsenb/test/mac/sched_test_common.cc b/srsenb/test/mac/sched_test_common.cc index 530330b2d..d9d747c01 100644 --- a/srsenb/test/mac/sched_test_common.cc +++ b/srsenb/test/mac/sched_test_common.cc @@ -238,11 +238,11 @@ int ue_ctxt_test::schedule_acks(cc_result result) return SRSLTE_SUCCESS; } -void user_state_sched_tester::new_tti(sched* sched_ptr, uint32_t tti_rx) +void user_state_sched_tester::new_tti(sched* sched_ptr, tti_point tti_rx) { tic++; for (auto& u : users) { - u.second.new_tti(sched_ptr, srslte::tti_point{tti_rx}); + u.second.new_tti(sched_ptr, tti_rx); } } @@ -315,7 +315,7 @@ int user_state_sched_tester::test_all(const sf_output_res_t& sf_out) * Sim Stats Storage **********************/ -void sched_result_stats::process_results(const tti_params_t& tti_params, +void sched_result_stats::process_results(tti_point tti_rx, const std::vector& dl_result, const std::vector& ul_result) { @@ -377,7 +377,7 @@ int common_sched_tester::add_user(uint16_t rnti, const ue_ctxt_test_cfg& ue_cfg_ // "New user added in a non-PRACH TTI\n"); dl_sched_rar_info_t rar_info = {}; - rar_info.prach_tti = tti_info.tti_params.tti_rx; + rar_info.prach_tti = tti_rx.to_uint(); rar_info.temp_crnti = rnti; rar_info.msg3_size = 7; rar_info.preamble_idx = tti_info.nof_prachs++; @@ -407,34 +407,30 @@ void common_sched_tester::rem_user(uint16_t rnti) void common_sched_tester::new_test_tti() { - if (not tic.is_valid()) { - tic = srslte::tti_point{sim_args0.start_tti}; + if (not tti_rx.is_valid()) { + tti_rx = srslte::tti_point{sim_args0.start_tti}; } else { - tic++; + tti_rx++; } - tti_info.tti_params = tti_params_t{tic.to_uint()}; tti_info.nof_prachs = 0; tti_info.dl_sched_result.clear(); tti_info.ul_sched_result.clear(); tti_info.dl_sched_result.resize(sched_cell_params.size()); tti_info.ul_sched_result.resize(sched_cell_params.size()); - tester_log->step(tti_info.tti_params.tti_rx); + tester_log->step(tti_rx.to_uint()); } int common_sched_tester::process_results() { // Perform common eNB result tests - sf_output_res_t sf_out{sched_cell_params, - srslte::tti_point{tti_info.tti_params.tti_rx}, - tti_info.ul_sched_result, - tti_info.dl_sched_result}; + sf_output_res_t sf_out{sched_cell_params, tti_rx, tti_info.ul_sched_result, tti_info.dl_sched_result}; TESTASSERT(test_all_common(sf_out) == SRSLTE_SUCCESS); TESTASSERT(ue_tester->test_all(sf_out) == SRSLTE_SUCCESS); - sched_stats->process_results(tti_info.tti_params, tti_info.dl_sched_result, tti_info.ul_sched_result); + sched_stats->process_results(tti_rx, tti_info.dl_sched_result, tti_info.ul_sched_result); return SRSLTE_SUCCESS; } @@ -470,7 +466,7 @@ int common_sched_tester::process_tti_events(const tti_ev& tti_ev) if (user != nullptr) { const auto& ue_sim_ctxt = user->ue_ctxt->get_ctxt(); if (not ue_sim_ctxt.msg4_tti_rx.is_valid() and ue_sim_ctxt.msg3_tti_rx.is_valid() and - to_tx_ul(ue_sim_ctxt.msg3_tti_rx) <= tic) { + to_tx_ul(ue_sim_ctxt.msg3_tti_rx) <= tti_rx) { // Msg3 has been received but Msg4 has not been yet transmitted // Setup default UE config reconf_user(user->rnti, generate_setup_ue_cfg(sim_args0.default_ue_sim_cfg.ue_cfg)); @@ -516,7 +512,7 @@ int common_sched_tester::process_tti_events(const tti_ev& tti_ev) if (ue_ev.buffer_ev->sr_data > 0 and user->drb_cfg_flag) { uint32_t tot_ul_data = - ue_db[ue_ev.rnti].get_pending_ul_new_data(tti_info.tti_params.tti_tx_ul, -1) + ue_ev.buffer_ev->sr_data; + ue_db[ue_ev.rnti].get_pending_ul_new_data(to_tx_ul(tti_rx), -1) + ue_ev.buffer_ev->sr_data; uint32_t lcg = 1; ul_bsr(ue_ev.rnti, lcg, tot_ul_data); } @@ -528,20 +524,20 @@ int common_sched_tester::process_tti_events(const tti_ev& tti_ev) int common_sched_tester::run_tti(const tti_ev& tti_events) { new_test_tti(); - tester_log->info("---- tti=%u | nof_ues=%zd ----\n", tic.to_uint(), ue_db.size()); + tester_log->info("---- tti=%u | nof_ues=%zd ----\n", tti_rx.to_uint(), ue_db.size()); - ue_tester->new_tti(this, tti_info.tti_params.tti_rx); + ue_tester->new_tti(this, tti_rx); process_tti_events(tti_events); before_sched(); // Call scheduler for all carriers tti_info.dl_sched_result.resize(sched_cell_params.size()); for (uint32_t i = 0; i < sched_cell_params.size(); ++i) { - dl_sched(tti_info.tti_params.tti_tx_dl, i, tti_info.dl_sched_result[i]); + dl_sched(to_tx_dl(tti_rx).to_uint(), i, tti_info.dl_sched_result[i]); } tti_info.ul_sched_result.resize(sched_cell_params.size()); for (uint32_t i = 0; i < sched_cell_params.size(); ++i) { - ul_sched(tti_info.tti_params.tti_tx_ul, i, tti_info.ul_sched_result[i]); + ul_sched(to_tx_ul(tti_rx).to_uint(), i, tti_info.ul_sched_result[i]); } process_results(); diff --git a/srsenb/test/mac/sched_test_common.h b/srsenb/test/mac/sched_test_common.h index 1cd75d58b..b1a4fdc89 100644 --- a/srsenb/test/mac/sched_test_common.h +++ b/srsenb/test/mac/sched_test_common.h @@ -94,7 +94,7 @@ public: cell_params(cell_params_) {} - void new_tti(sched* sched_ptr, uint32_t tti_rx); + void new_tti(sched* sched_ptr, tti_point tti_rx); bool user_exists(uint16_t rnti) const { return users.find(rnti) != users.end(); } const ue_ctxt_test* get_user_ctxt(uint16_t rnti) const { @@ -128,7 +128,7 @@ public: cell_params(std::move(cell_params_)) {} - void process_results(const tti_params_t& tti_params, + void process_results(tti_point tti_rx, const std::vector& dl_result, const std::vector& ul_result); @@ -151,7 +151,6 @@ class common_sched_tester : public sched { public: struct tti_info_t { - tti_params_t tti_params{10241}; uint32_t nof_prachs = 0; std::vector dl_sched_result; std::vector ul_sched_result; @@ -176,9 +175,9 @@ public: srslte::log* tester_log = nullptr; // tti specific params - tti_info_t tti_info; - srslte::tti_point tic; - uint32_t tti_count = 0; + tti_info_t tti_info; + tti_point tti_rx; + uint32_t tti_count = 0; // testers std::unique_ptr ue_tester; diff --git a/srsenb/test/mac/sched_test_rand.cc b/srsenb/test/mac/sched_test_rand.cc index c088a21fc..2a3ea6575 100644 --- a/srsenb/test/mac/sched_test_rand.cc +++ b/srsenb/test/mac/sched_test_rand.cc @@ -160,17 +160,16 @@ void sched_tester::before_sched() uint16_t rnti = it.first; srsenb::sched_ue* user = &it.second; tester_user_results d; - srsenb::ul_harq_proc* hul = user->get_ul_harq(tti_info.tti_params.tti_tx_ul, CARRIER_IDX); + srsenb::ul_harq_proc* hul = user->get_ul_harq(srsenb::to_tx_ul(tti_rx), CARRIER_IDX); d.ul_pending_data = get_ul_buffer(rnti); // user->get_pending_ul_new_data(tti_info.tti_params.tti_tx_ul) or hul->has_pending_retx(); // // get_ul_buffer(rnti); d.dl_pending_data = get_dl_buffer(rnti); d.has_ul_retx = hul->has_pending_retx(); d.has_ul_tx = d.has_ul_retx or d.ul_pending_data > 0; - srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(tti_info.tti_params.tti_tx_dl, CARRIER_IDX); - d.has_dl_tx = - (hdl != nullptr) or - (it.second.get_empty_dl_harq(tti_info.tti_params.tti_tx_dl, CARRIER_IDX) != nullptr and d.dl_pending_data > 0); + srsenb::dl_harq_proc* hdl = user->get_pending_dl_harq(srsenb::to_tx_dl(tti_rx), CARRIER_IDX); + d.has_dl_tx = (hdl != nullptr) or (it.second.get_empty_dl_harq(srsenb::to_tx_dl(tti_rx), CARRIER_IDX) != nullptr and + d.dl_pending_data > 0); d.has_ul_newtx = not d.has_ul_retx and d.ul_pending_data > 0; tti_data.ue_data.insert(std::make_pair(rnti, d)); tti_data.total_ues.dl_pending_data += d.dl_pending_data; @@ -184,7 +183,7 @@ void sched_tester::before_sched() tti_data.ue_data[rnti].dl_harqs[i] = h; } // NOTE: ACK might have just cleared the harq for tti_info.tti_params.tti_tx_ul - tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(tti_info.tti_params.tti_tx_ul, CARRIER_IDX); + tti_data.ue_data[rnti].ul_harq = *user->get_ul_harq(srsenb::to_tx_ul(tti_rx), CARRIER_IDX); } // TODO: Check whether pending pending_rar.rar_tti correspond to a prach_tti @@ -192,11 +191,9 @@ void sched_tester::before_sched() int sched_tester::process_results() { - const srsenb::cc_sched_result* cc_result = - sched_results.get_cc(srslte::tti_point{tti_info.tti_params.tti_rx}, CARRIER_IDX); - srsenb::sf_output_res_t sf_out{ - sched_cell_params, tti_point{tti_info.tti_params.tti_rx}, tti_info.ul_sched_result, tti_info.dl_sched_result}; - TESTASSERT(tti_info.tti_params.tti_rx == cc_result->tti_params.tti_rx); + const srsenb::cc_sched_result* cc_result = sched_results.get_cc(tti_rx, CARRIER_IDX); + srsenb::sf_output_res_t sf_out{sched_cell_params, tti_rx, tti_info.ul_sched_result, tti_info.dl_sched_result}; + TESTASSERT(tti_rx == cc_result->tti_rx); // Common tests TESTASSERT(test_pdcch_collisions(sf_out, CARRIER_IDX, &cc_result->pdcch_mask) == SRSLTE_SUCCESS); @@ -248,10 +245,10 @@ int sched_tester::test_harqs() uint32_t h_id = data.dci.pid; uint16_t rnti = data.dci.rnti; const srsenb::dl_harq_proc& h = ue_db[rnti].get_dl_harq(h_id, CARRIER_IDX); - CONDERROR(h.get_tti() != tti_point{tti_info.tti_params.tti_tx_dl}, + CONDERROR(h.get_tti() != srsenb::to_tx_dl(tti_rx), "The scheduled DL harq pid=%d does not a valid tti=%u\n", h_id, - tti_info.tti_params.tti_tx_dl); + srsenb::to_tx_dl(tti_rx).to_uint()); CONDERROR(h.get_n_cce() != data.dci.location.ncce, "Harq DCI location does not match with result\n"); } @@ -259,7 +256,7 @@ int sched_tester::test_harqs() for (uint32_t i = 0; i < tti_info.ul_sched_result[CARRIER_IDX].nof_phich_elems; ++i) { const auto& phich = tti_info.ul_sched_result[CARRIER_IDX].phich[i]; const auto& hprev = tti_data.ue_data[phich.rnti].ul_harq; - const auto* h = ue_db[phich.rnti].get_ul_harq(tti_info.tti_params.tti_tx_ul, CARRIER_IDX); + const auto* h = ue_db[phich.rnti].get_ul_harq(srsenb::to_tx_ul(tti_rx), CARRIER_IDX); CONDERROR(not hprev.has_pending_phich(), "Alloc PHICH did not have any pending ack\n"); bool maxretx_flag = hprev.nof_retx(0) + 1 >= hprev.max_nof_retx(); if (phich.phich == sched_interface::ul_sched_phich_t::ACK) {