diff --git a/srsenb/hdr/stack/mac/nr/sched_nr.h b/srsenb/hdr/stack/mac/nr/sched_nr.h index c34e0605f..b1cf3502e 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr.h @@ -38,7 +38,7 @@ public: void new_tti(tti_point tti_rx) override; int generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result); - void dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) override; + void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override; void ul_sr_info(tti_point tti_rx, uint16_t rnti) override; private: @@ -50,7 +50,7 @@ private: using sched_worker_manager = sched_nr_impl::sched_worker_manager; sched_worker_manager sched_workers; - std::array, SCHED_NR_MAX_CARRIERS> sched_results; + std::array, SCHED_NR_NOF_SUBFRAMES> sched_results; using ue_map_t = sched_nr_impl::ue_map_t; std::mutex ue_db_mutex; diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_common.h b/srsenb/hdr/stack/mac/nr/sched_nr_common.h index f120fa802..996003cc2 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_common.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_common.h @@ -13,12 +13,21 @@ #ifndef SRSRAN_SCHED_NR_COMMON_H #define SRSRAN_SCHED_NR_COMMON_H -#include "srsran/adt/circular_map.h" +#include "sched_nr_interface.h" +#include "srsran/adt/bounded_bitset.h" namespace srsenb { const static size_t SCHED_NR_MAX_USERS = 4; const static size_t SCHED_NR_NOF_SUBFRAMES = 10; +const static size_t SCHED_NR_MAX_NOF_RBGS = 25; +const static size_t SCHED_NR_NOF_HARQS = 16; + +namespace sched_nr_impl { + +using rbgmask_t = srsran::bounded_bitset; + +} // namespace sched_nr_impl } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_harq.h b/srsenb/hdr/stack/mac/nr/sched_nr_harq.h index 6b6fe2ed2..e090e109a 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_harq.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_harq.h @@ -13,24 +13,29 @@ #ifndef SRSRAN_SCHED_NR_HARQ_H #define SRSRAN_SCHED_NR_HARQ_H +#include "sched_nr_common.h" #include "srsran/common/tti_point.h" #include namespace srsenb { namespace sched_nr_impl { -template -class harq +class harq_proc { public: - harq() = default; + explicit harq_proc(uint32_t id_, uint32_t max_nof_tb_ = 1) : pid(id_), max_nof_tb(max_nof_tb_) {} - bool empty() const - { - return std::all_of(tb.begin(), tb.end(), [](const tb_t t) { return not t.active; }); - } + bool empty() const { return not tb[0].active and not tb[1].active; } bool empty(uint32_t tb_idx) const { return tb[tb_idx].active; } + void ack_info(uint32_t tb_idx, bool ack) { tb[tb_idx].ack_state = ack; } + + bool has_pending_retx(tti_point tti_rx) const { return not empty() and tti_tx + ack_delay <= tti_rx; } + + bool new_tx(tti_point tti_tx, const rbgmask_t& rbgmask, uint32_t mcs, uint32_t ack_delay); + + const uint32_t pid; + private: struct tb_t { bool active = false; @@ -40,19 +45,58 @@ private: uint32_t mcs = 0; }; - uint32_t id; - tti_point tti_tx; - std::array tb; + const uint32_t max_nof_tb; + + tti_point tti_tx; + uint32_t ack_delay = 0; + rbgmask_t rbgmask; + std::array tb; }; class harq_entity { public: - void dl_ack_info(tti_point tti_rx, uint32_t tb_idx, bool ack) {} + harq_entity(); + void new_tti(tti_point tti_rx_); + + void dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { dl_harqs[pid].ack_info(tb_idx, ack); } + + harq_proc& get_dl_harq(uint32_t pid) { return dl_harqs[pid]; } + + harq_proc* find_pending_dl_retx() + { + return find_dl([this](const harq_proc& h) { return h.has_pending_retx(tti_rx); }); + } + harq_proc* find_pending_ul_retx() + { + return find_ul([this](const harq_proc& h) { return h.has_pending_retx(tti_rx); }); + } + harq_proc* find_empty_dl_harq() + { + return find_dl([](const harq_proc& h) { return h.empty(); }); + } + harq_proc* find_empty_ul_harq() + { + return find_ul([](const harq_proc& h) { return h.empty(); }); + } private: - std::array, 16> dl_harqs; - std::array, 16> ul_harqs; + template + harq_proc* find_dl(Predicate p) + { + auto it = std::find_if(dl_harqs.begin(), dl_harqs.end(), p); + return (it == dl_harqs.end()) ? nullptr : &(*it); + } + template + harq_proc* find_ul(Predicate p) + { + auto it = std::find_if(ul_harqs.begin(), ul_harqs.end(), p); + return (it == ul_harqs.end()) ? nullptr : &(*it); + } + + tti_point tti_rx; + std::vector dl_harqs; + std::vector ul_harqs; }; } // namespace sched_nr_impl diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_interface.h b/srsenb/hdr/stack/mac/nr/sched_nr_interface.h index fb1fe93fc..e38d4e8ad 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_interface.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_interface.h @@ -18,10 +18,16 @@ namespace srsenb { -const static size_t SCHED_NR_MAX_CARRIERS = 4; -const static uint16_t SCHED_NR_INVALID_RNTI = 0; +const static size_t SCHED_NR_MAX_CARRIERS = 4; +const static uint16_t SCHED_NR_INVALID_RNTI = 0; +const static size_t SCHED_NR_MAX_PDSCH_DATA = 16; +const static size_t SCHED_NR_MAX_PUSCH_DATA = 16; +const static size_t SCHED_NR_MAX_TB = 2; -struct sched_nr_cell_cfg {}; +struct sched_nr_cell_cfg { + uint32_t nof_prb; + uint32_t nof_rbg; +}; struct sched_nr_cfg { uint32_t nof_concurrent_subframes = 1; @@ -37,9 +43,21 @@ struct sched_nr_ue_cfg { srsran::bounded_vector carriers; }; +struct sched_nr_data_t { + srsran::bounded_vector tbs; +}; + +struct sched_nr_dl_res_t { + srsran::bounded_vector data; +}; + +struct sched_nr_ul_res_t { + srsran::bounded_vector pusch; +}; + struct sched_nr_res_t { - struct dl_result {}; - struct ul_result {}; + sched_nr_dl_res_t dl_res; + sched_nr_ul_res_t ul_res; }; class sched_nr_interface @@ -49,8 +67,8 @@ public: virtual void ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& ue_cfg) = 0; virtual void new_tti(tti_point tti_rx) = 0; - virtual void dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) = 0; - virtual void ul_sr_info(tti_point, uint16_t rnti) = 0; + virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0; + virtual void ul_sr_info(tti_point, uint16_t rnti) = 0; }; } // namespace srsenb diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h b/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h new file mode 100644 index 000000000..38ae6a366 --- /dev/null +++ b/srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h @@ -0,0 +1,49 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2021 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#ifndef SRSRAN_SCHED_NR_RB_GRID_H +#define SRSRAN_SCHED_NR_RB_GRID_H + +#include "sched_nr_interface.h" +#include "sched_nr_ue.h" + +namespace srsenb { +namespace sched_nr_impl { + +class slot_grid +{ +public: + explicit slot_grid(uint32_t cc, const sched_nr_cfg& cfg_); + void new_tti(tti_point tti_rx_, sched_nr_res_t& sched_res_); + bool alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask); + bool alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask); + + void generate_dcis(); + + tti_point tti_tx_dl() const { return tti_rx + TX_ENB_DELAY; } + tti_point tti_tx_ul() const { return tti_tx_dl() + K2; } + +private: + static const size_t K0 = 0, K1 = 4, K2 = 4; + const uint32_t cc; + const sched_nr_cfg& cfg; + + tti_point tti_rx; + rbgmask_t pdsch_mask; + rbgmask_t pusch_mask; + sched_nr_res_t* sched_res = nullptr; +}; + +} // namespace sched_nr_impl +} // namespace srsenb + +#endif // SRSRAN_SCHED_NR_RB_GRID_H diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_ue.h b/srsenb/hdr/stack/mac/nr/sched_nr_ue.h index 5eccca607..16688f11c 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_ue.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_ue.h @@ -16,6 +16,7 @@ #include "sched_nr_common.h" #include "sched_nr_harq.h" #include "sched_nr_interface.h" +#include "srsran/adt/circular_map.h" #include "srsran/adt/move_callback.h" #include "srsran/adt/pool/cached_alloc.h" @@ -25,51 +26,61 @@ namespace sched_nr_impl { class ue_carrier; -class bwp_ue +class slot_ue { public: - bwp_ue() = default; - explicit bwp_ue(ue_carrier& carrier_, tti_point tti_rx_); - ~bwp_ue(); - bwp_ue(bwp_ue&& other) noexcept : carrier(other.carrier) { other.carrier = nullptr; } - bwp_ue& operator=(bwp_ue&& other) noexcept - { - carrier = other.carrier; - other.carrier = nullptr; - return *this; - } - bool empty() const { return carrier == nullptr; } + slot_ue() = default; + explicit slot_ue(bool& busy_signal, tti_point tti_rx_, uint32_t cc); + ~slot_ue(); + slot_ue(slot_ue&&) noexcept = default; + slot_ue& operator=(slot_ue&&) noexcept = default; + bool empty() const { return busy_signal == nullptr; } + void release(); tti_point tti_rx; uint32_t cc = SCHED_NR_MAX_CARRIERS; + // UE parameters common to all sectors const sched_nr_ue_cfg* cfg = nullptr; bool pending_sr; + // UE parameters that are sector specific + uint32_t dl_cqi; + uint32_t ul_cqi; + harq_proc* h_dl = nullptr; + harq_proc* h_ul = nullptr; + private: - ue_carrier* carrier = nullptr; + struct noop { + void operator()(bool* ptr) {} + }; + std::unique_ptr busy_signal; }; class ue_carrier { public: ue_carrier(uint16_t rnti, uint32_t cc, const sched_nr_ue_cfg& cfg); - bwp_ue try_reserve(tti_point tti_rx); - void push_feedback(srsran::move_callback callback); + slot_ue try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& cfg); + void push_feedback(srsran::move_callback callback); + void set_cfg(const sched_nr_ue_cfg& uecfg); const uint16_t rnti; const uint32_t cc; + // Channel state + uint32_t dl_cqi = 1; + uint32_t ul_cqi = 0; + harq_entity harq_ent; private: - friend class bwp_ue; - void release() { busy = false; } + const sched_nr_ue_cfg* cfg = nullptr; - const sched_nr_ue_cfg* cfg; + bool busy{false}; + tti_point last_tti_rx; srsran::deque > pending_feedback; - bool busy{false}; }; class ue @@ -77,7 +88,7 @@ class ue public: ue(uint16_t rnti, const sched_nr_ue_cfg& cfg); - bwp_ue try_reserve(tti_point tti_rx, uint32_t cc); + slot_ue try_reserve(tti_point tti_rx, uint32_t cc); void set_cfg(const sched_nr_ue_cfg& cfg); diff --git a/srsenb/hdr/stack/mac/nr/sched_nr_worker.h b/srsenb/hdr/stack/mac/nr/sched_nr_worker.h index 67a31878f..af1c79734 100644 --- a/srsenb/hdr/stack/mac/nr/sched_nr_worker.h +++ b/srsenb/hdr/stack/mac/nr/sched_nr_worker.h @@ -14,6 +14,7 @@ #define SRSRAN_SCHED_NR_WORKER_H #include "sched_nr_common.h" +#include "sched_nr_rb_grid.h" #include "sched_nr_ue.h" #include "srsran/adt/circular_array.h" #include "srsran/adt/optional.h" @@ -26,23 +27,27 @@ namespace srsenb { namespace sched_nr_impl { -class bwp_worker +class carrier_slot_worker { public: - explicit bwp_worker(uint32_t cc_, ue_map_t& ue_db_) : cc(cc_), ue_db(ue_db_) {} + explicit carrier_slot_worker(uint32_t cc_, const sched_nr_cfg& cfg_) : cc(cc_), cfg(cfg_), res_grid(cc, cfg) {} - void start(tti_point tti_rx_); + void start(tti_point tti_rx_, sched_nr_res_t& bwp_result, ue_map_t& ue_db_); void run(); void end_tti(); bool running() const { return tti_rx.is_valid(); } private: - ue_map_t& ue_db; + void alloc_dl_ues(); + void alloc_ul_ues(); + + const uint32_t cc; + const sched_nr_cfg& cfg; tti_point tti_rx; - uint32_t cc; + slot_grid res_grid; - srsran::circular_array bwp_ues; + srsran::static_circular_map slot_ues; }; class sched_worker_manager @@ -55,22 +60,23 @@ public: void reserve_workers(tti_point tti_rx, srsran::span sf_result_); void start_tti(tti_point tti_rx); - bool run_tti(tti_point tti_rx, uint32_t cc); + bool run_tti(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result); void end_tti(tti_point tti_rx); private: const sched_nr_cfg& cfg; + ue_map_t& ue_db; - struct sf_worker_ctxt { - sem_t sf_sem; - tti_point tti_rx; - srsran::span sf_result; - int worker_count = 0; - std::vector workers; + struct slot_worker_ctxt { + sem_t sf_sem; + tti_point tti_rx; + srsran::span sf_result; + int worker_count = 0; + std::vector workers; }; - std::vector > sf_ctxts; + std::vector > slot_ctxts; - sf_worker_ctxt& get_sf(tti_point tti_rx); + slot_worker_ctxt& get_sf(tti_point tti_rx); }; } // namespace sched_nr_impl diff --git a/srsenb/src/stack/mac/nr/CMakeLists.txt b/srsenb/src/stack/mac/nr/CMakeLists.txt index ee33f96bc..480fb0d24 100644 --- a/srsenb/src/stack/mac/nr/CMakeLists.txt +++ b/srsenb/src/stack/mac/nr/CMakeLists.txt @@ -6,6 +6,6 @@ # the distribution. # -set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc) +set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc sched_nr_rb_grid.cc sched_nr_harq.cc) add_library(srsgnb_mac STATIC ${SOURCES}) diff --git a/srsenb/src/stack/mac/nr/sched_nr.cc b/srsenb/src/stack/mac/nr/sched_nr.cc index 83423f734..8d0797318 100644 --- a/srsenb/src/stack/mac/nr/sched_nr.cc +++ b/srsenb/src/stack/mac/nr/sched_nr.cc @@ -15,7 +15,6 @@ namespace srsenb { -using sched_nr_impl::bwp_worker; using sched_nr_impl::sched_worker_manager; using sched_nr_impl::ue; using sched_nr_impl::ue_carrier; @@ -100,7 +99,7 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const sched_nr_ue_cfg& uecfg) void sched_nr::new_tti(tti_point tti_rx) { - // Lock subframe workers to provided tti_rx + // Lock slot workers for provided tti_rx sched_workers.reserve_workers(tti_rx, sched_results[tti_rx.sf_idx()]); { @@ -113,33 +112,25 @@ void sched_nr::new_tti(tti_point tti_rx) } } +/// Generate {tti,cc} scheduling decision int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result) -{ - // Generate {tti,cc} scheduling decision - run_tti(tti_rx, cc); - - // copy scheduling decision result - result = sched_results[tti_rx.sf_idx()][cc]; - - return SRSRAN_SUCCESS; -} - -void sched_nr::run_tti(tti_point tti_rx, uint32_t cc) { // unlocked, parallel region - bool all_workers_finished = sched_workers.run_tti(tti_rx, cc); + bool all_workers_finished = sched_workers.run_tti(tti_rx, cc, result); if (all_workers_finished) { // once all workers of the same subframe finished, synchronize sched outcome with ue_db std::lock_guard lock(ue_db_mutex); sched_workers.end_tti(tti_rx); } + + return SRSRAN_SUCCESS; } -void sched_nr::dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) +void sched_nr::dl_ack_info(uint16_t rnti, uint32_t pid, uint32_t cc, uint32_t tb_idx, bool ack) { pending_events->push_cc_feedback( - rnti, cc, [tti_rx, tb_idx, ack](ue_carrier& ue_cc) { ue_cc.harq_ent.dl_ack_info(tti_rx, tb_idx, ack); }); + rnti, cc, [pid, tb_idx, ack](ue_carrier& ue_cc) { ue_cc.harq_ent.dl_ack_info(pid, tb_idx, ack); }); } void sched_nr::ul_sr_info(tti_point tti_rx, uint16_t rnti) diff --git a/srsenb/src/stack/mac/nr/sched_nr_harq.cc b/srsenb/src/stack/mac/nr/sched_nr_harq.cc new file mode 100644 index 000000000..9c9c0c627 --- /dev/null +++ b/srsenb/src/stack/mac/nr/sched_nr_harq.cc @@ -0,0 +1,46 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2021 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "srsenb/hdr/stack/mac/nr/sched_nr_harq.h" + +namespace srsenb { +namespace sched_nr_impl { + +bool harq_proc::new_tx(tti_point tti_tx_, const rbgmask_t& rbgmask_, uint32_t mcs, uint32_t ack_delay_) +{ + if (not empty()) { + return false; + } + tti_tx = tti_tx_; + ack_delay = ack_delay_; + rbgmask = rbgmask_; + tb[0].mcs = mcs; + return true; +} + +harq_entity::harq_entity() +{ + dl_harqs.reserve(SCHED_NR_NOF_HARQS); + ul_harqs.reserve(SCHED_NR_NOF_HARQS); + for (uint32_t pid = 0; pid < SCHED_NR_NOF_HARQS; ++pid) { + dl_harqs.emplace_back(pid); + ul_harqs.emplace_back(pid); + } +} + +void harq_entity::new_tti(tti_point tti_rx_) +{ + tti_rx = tti_rx_; +} + +} // namespace sched_nr_impl +} // namespace srsenb \ No newline at end of file diff --git a/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc b/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc new file mode 100644 index 000000000..c27ff43a7 --- /dev/null +++ b/srsenb/src/stack/mac/nr/sched_nr_rb_grid.cc @@ -0,0 +1,80 @@ +/** + * + * \section COPYRIGHT + * + * Copyright 2013-2021 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h" + +namespace srsenb { +namespace sched_nr_impl { + +slot_grid::slot_grid(uint32_t cc_, const sched_nr_cfg& cfg_) : cc(cc_), cfg(cfg_) {} + +void slot_grid::new_tti(tti_point tti_rx_, sched_nr_res_t& sched_res_) +{ + tti_rx = tti_rx_; + sched_res = &sched_res_; + + pdsch_mask.reset(); + pusch_mask.reset(); + *sched_res = {}; +} + +bool slot_grid::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask) +{ + const uint32_t tbs = 100, mcs = 20; + if (ue.h_dl == nullptr) { + return false; + } + if ((pdsch_mask & dl_mask).any()) { + return false; + } + if (sched_res->dl_res.data.full()) { + return false; + } + if (not ue.h_dl->new_tx(tti_tx_dl(), dl_mask, mcs, K1)) { + return false; + } + + pdsch_mask |= dl_mask; + sched_res->dl_res.data.emplace_back(); + sched_nr_data_t& data = sched_res->dl_res.data.back(); + data.tbs.resize(1); + data.tbs[0] = tbs; + + return true; +} + +bool slot_grid::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask) +{ + const uint32_t tbs = 100, mcs = 20; + if ((pusch_mask & ul_mask).any()) { + return false; + } + if (sched_res->ul_res.pusch.full()) { + return false; + } + if (not ue.h_ul->new_tx(tti_tx_ul(), ul_mask, mcs, 0)) { + return false; + } + + pusch_mask |= ul_mask; + sched_res->ul_res.pusch.emplace_back(); + sched_nr_data_t& data = sched_res->ul_res.pusch.back(); + data.tbs.resize(1); + data.tbs[0] = tbs; + + return true; +} + +void slot_grid::generate_dcis() {} + +} // namespace sched_nr_impl +} // namespace srsenb \ No newline at end of file diff --git a/srsenb/src/stack/mac/nr/sched_nr_ue.cc b/srsenb/src/stack/mac/nr/sched_nr_ue.cc index c772978e6..9d3d0dbef 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_ue.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_ue.cc @@ -15,36 +15,86 @@ namespace srsenb { namespace sched_nr_impl { -bwp_ue::bwp_ue(ue_carrier& carrier_, tti_point tti_rx_) : carrier(&carrier_), tti_rx(tti_rx_), cc(carrier_.cc) {} +slot_ue::slot_ue(bool& busy_signal_, tti_point tti_rx_, uint32_t cc_) : + busy_signal(&busy_signal_), tti_rx(tti_rx_), cc(cc_) +{} -bwp_ue::~bwp_ue() +slot_ue::~slot_ue() { - if (carrier != nullptr) { - carrier->release(); + release(); +} + +void slot_ue::release() +{ + if (busy_signal != nullptr) { + *busy_signal = false; } } -ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const sched_nr_ue_cfg& cfg_) : rnti(rnti_), cc(cc_), cfg(&cfg_) {} +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const sched_nr_ue_cfg& uecfg_) : rnti(rnti_), cc(cc_), cfg(&uecfg_) +{} + +void ue_carrier::set_cfg(const sched_nr_ue_cfg& uecfg) +{ + cfg = &uecfg; +} void ue_carrier::push_feedback(srsran::move_callback callback) { pending_feedback.push_back(std::move(callback)); } -bwp_ue ue_carrier::try_reserve(tti_point tti_rx) +slot_ue ue_carrier::try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& uecfg_) { - if (busy) { - return bwp_ue(); + slot_ue sfu = (busy) ? slot_ue() : slot_ue(busy, tti_rx, cc); + if (sfu.empty()) { + return sfu; } - // successfully acquired + // successfully acquired. Process any CC-specific pending feedback busy = true; + if (cfg != &uecfg_) { + set_cfg(uecfg_); + } while (not pending_feedback.empty()) { pending_feedback.front()(*this); pending_feedback.pop_front(); } - return bwp_ue(*this, tti_rx); + if (not last_tti_rx.is_valid()) { + last_tti_rx = tti_rx; + harq_ent.new_tti(tti_rx); + } else { + while (last_tti_rx++ < tti_rx) { + harq_ent.new_tti(tti_rx); + } + } + + // set UE parameters common to all carriers + sfu.cfg = &uecfg_; + + // copy cc-specific parameters and find available HARQs + sfu.dl_cqi = dl_cqi; + sfu.ul_cqi = ul_cqi; + sfu.h_dl = harq_ent.find_pending_dl_retx(); + if (sfu.h_dl == nullptr) { + sfu.h_dl = harq_ent.find_empty_dl_harq(); + } + sfu.h_ul = harq_ent.find_pending_ul_retx(); + if (sfu.h_ul == nullptr) { + sfu.h_ul = harq_ent.find_empty_ul_harq(); + } + + if (sfu.h_dl == nullptr and sfu.h_ul == nullptr) { + // there needs to be at least one available HARQ for newtx/retx + sfu.release(); + return sfu; + } + return sfu; } +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + ue::ue(uint16_t rnti, const sched_nr_ue_cfg& cfg) { for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) { @@ -60,18 +110,17 @@ void ue::set_cfg(const sched_nr_ue_cfg& cfg) ue_cfgs[current_idx] = cfg; } -bwp_ue ue::try_reserve(tti_point tti_rx, uint32_t cc) +slot_ue ue::try_reserve(tti_point tti_rx, uint32_t cc) { if (carriers[cc] == nullptr) { - return bwp_ue(); + return slot_ue(); } - bwp_ue sfu = carriers[cc]->try_reserve(tti_rx); + slot_ue sfu = carriers[cc]->try_reserve(tti_rx, ue_cfgs[current_idx]); if (sfu.empty()) { - return bwp_ue(); + return slot_ue(); } // set UE-common parameters sfu.pending_sr = pending_sr; - sfu.cfg = &ue_cfgs[current_idx]; return sfu; } diff --git a/srsenb/src/stack/mac/nr/sched_nr_worker.cc b/srsenb/src/stack/mac/nr/sched_nr_worker.cc index 1f673a086..be64998ac 100644 --- a/srsenb/src/stack/mac/nr/sched_nr_worker.cc +++ b/srsenb/src/stack/mac/nr/sched_nr_worker.cc @@ -16,7 +16,7 @@ namespace srsenb { namespace sched_nr_impl { /// Called at the beginning of TTI in a locked context, to reserve available UE resources -void bwp_worker::start(tti_point tti_rx_) +void carrier_slot_worker::start(tti_point tti_rx_, sched_nr_res_t& bwp_result_, ue_map_t& ue_db) { srsran_assert(not running(), "scheduler worker::start() called for active worker"); // Try reserve UE cells for this worker @@ -24,63 +24,101 @@ void bwp_worker::start(tti_point tti_rx_) uint16_t rnti = ue_pair.first; ue& u = *ue_pair.second; - bwp_ue sfu0 = u.try_reserve(tti_rx, cc); - if (sfu0.empty()) { + slot_ues.insert(rnti, u.try_reserve(tti_rx, cc)); + if (slot_ues[rnti].empty()) { // Failed to synchronize because UE is being used by another worker + slot_ues.erase(rnti); continue; } - // Synchronization of UE for this {tti, cc} was successful - bwp_ues[rnti] = std::move(sfu0); + // UE acquired successfully for scheduling in this {tti, cc} } + res_grid.new_tti(tti_rx_, bwp_result_); tti_rx = tti_rx_; } -void bwp_worker::run() +void carrier_slot_worker::run() { srsran_assert(running(), "scheduler worker::run() called for non-active worker"); + + // Prioritize PDCCH scheduling for DL and UL data in a RoundRobin fashion + if ((tti_rx.to_uint() & 0x1u) == 0) { + alloc_dl_ues(); + alloc_ul_ues(); + } else { + alloc_ul_ues(); + alloc_dl_ues(); + } + + // Select the winner PDCCH allocation combination, store all the scheduling results + res_grid.generate_dcis(); } -void bwp_worker::end_tti() +void carrier_slot_worker::end_tti() { srsran_assert(running(), "scheduler worker::end() called for non-active worker"); // releases UE resources - for (bwp_ue& u : bwp_ues) { - if (not u.empty()) { - u = {}; - } - } + slot_ues.clear(); tti_rx = {}; } +void carrier_slot_worker::alloc_dl_ues() +{ + if (slot_ues.empty()) { + return; + } + slot_ue& ue = slot_ues.begin()->second; + if (ue.h_dl == nullptr) { + return; + } + + rbgmask_t dlmask(cfg.cells[cc].nof_rbg); + dlmask.fill(0, dlmask.size(), true); + res_grid.alloc_pdsch(ue, dlmask); +} +void carrier_slot_worker::alloc_ul_ues() +{ + if (slot_ues.empty()) { + return; + } + slot_ue& ue = slot_ues.begin()->second; + if (ue.h_ul == nullptr) { + return; + } + + rbgmask_t ulmask(cfg.cells[cc].nof_rbg); + ulmask.fill(0, ulmask.size(), true); + res_grid.alloc_pusch(ue, ulmask); +} + /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_nr_cfg& cfg_) : cfg(cfg_) +sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_nr_cfg& cfg_) : cfg(cfg_), ue_db(ue_db_) { // Note: For now, we only allow parallelism at the sector level - sf_ctxts.resize(cfg.nof_concurrent_subframes); + slot_ctxts.resize(cfg.nof_concurrent_subframes); for (size_t i = 0; i < cfg.nof_concurrent_subframes; ++i) { - sf_ctxts[i].reset(new sf_worker_ctxt()); - sem_init(&sf_ctxts[i]->sf_sem, 0, 1); - sf_ctxts[i]->workers.reserve(cfg.cells.size()); + slot_ctxts[i].reset(new slot_worker_ctxt()); + sem_init(&slot_ctxts[i]->sf_sem, 0, 1); + slot_ctxts[i]->workers.reserve(cfg.cells.size()); for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) { - sf_ctxts[i]->workers.emplace_back(cc, ue_db_); + slot_ctxts[i]->workers.emplace_back(cc, cfg); } } } sched_worker_manager::~sched_worker_manager() { - for (uint32_t sf = 0; sf < sf_ctxts.size(); ++sf) { - sem_destroy(&sf_ctxts[sf]->sf_sem); + for (uint32_t sf = 0; sf < slot_ctxts.size(); ++sf) { + sem_destroy(&slot_ctxts[sf]->sf_sem); } } -sched_worker_manager::sf_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx) +sched_worker_manager::slot_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx) { - return *sf_ctxts[tti_rx.to_uint() % sf_ctxts.size()]; + return *slot_ctxts[tti_rx.to_uint() % slot_ctxts.size()]; } void sched_worker_manager::reserve_workers(tti_point tti_rx_, srsran::span sf_result_) @@ -91,7 +129,7 @@ void sched_worker_manager::reserve_workers(tti_point tti_rx_, srsran::span(sf_worker_ctxt.workers.size()); } void sched_worker_manager::start_tti(tti_point tti_rx_) @@ -100,11 +138,11 @@ void sched_worker_manager::start_tti(tti_point tti_rx_) srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) { - sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx); + sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx, sf_worker_ctxt.sf_result[cc], ue_db); } } -bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc) +bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, sched_nr_res_t& result) { auto& sf_worker_ctxt = get_sf(tti_rx_); srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); @@ -116,6 +154,9 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc) // Get {tti, cc} scheduling decision sf_worker_ctxt.workers[cc].run(); + // copy sched result + result = sf_worker_ctxt.sf_result[cc]; + // decrement the number of active workers --sf_worker_ctxt.worker_count; srsran_assert(sf_worker_ctxt.worker_count >= 0, "invalid number of calls to run_tti(tti, cc)"); @@ -132,6 +173,8 @@ void sched_worker_manager::end_tti(tti_point tti_rx_) for (auto& worker : sf_worker_ctxt.workers) { worker.end_tti(); } + + sf_worker_ctxt.sf_result = {}; sem_post(&sf_worker_ctxt.sf_sem); } diff --git a/srsenb/test/mac/nr/sched_nr_test.cc b/srsenb/test/mac/nr/sched_nr_test.cc index 338e514b6..3a2d3da13 100644 --- a/srsenb/test/mac/nr/sched_nr_test.cc +++ b/srsenb/test/mac/nr/sched_nr_test.cc @@ -112,7 +112,7 @@ void sched_nr_cfg_parallel_sf_test() int main() { - srsran::get_background_workers().set_nof_workers(4); + srsran::get_background_workers().set_nof_workers(8); srsenb::sched_nr_cfg_serialized_test(); srsenb::sched_nr_cfg_parallel_cc_test();