sched,nr: implementation of slot grid class

This commit is contained in:
Francisco 2021-06-22 13:00:48 +01:00 committed by Francisco Paisana
parent 4fa27f3841
commit 407da794e1
14 changed files with 462 additions and 116 deletions

View File

@ -38,7 +38,7 @@ public:
void new_tti(tti_point tti_rx) override; void new_tti(tti_point tti_rx) override;
int generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result); int generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result);
void dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) override; void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) override;
void ul_sr_info(tti_point tti_rx, uint16_t rnti) override; void ul_sr_info(tti_point tti_rx, uint16_t rnti) override;
private: private:
@ -50,7 +50,7 @@ private:
using sched_worker_manager = sched_nr_impl::sched_worker_manager; using sched_worker_manager = sched_nr_impl::sched_worker_manager;
sched_worker_manager sched_workers; sched_worker_manager sched_workers;
std::array<std::array<sched_nr_res_t, SCHED_NR_MAX_CARRIERS>, SCHED_NR_MAX_CARRIERS> sched_results; std::array<std::array<sched_nr_res_t, SCHED_NR_MAX_CARRIERS>, SCHED_NR_NOF_SUBFRAMES> sched_results;
using ue_map_t = sched_nr_impl::ue_map_t; using ue_map_t = sched_nr_impl::ue_map_t;
std::mutex ue_db_mutex; std::mutex ue_db_mutex;

View File

@ -13,12 +13,21 @@
#ifndef SRSRAN_SCHED_NR_COMMON_H #ifndef SRSRAN_SCHED_NR_COMMON_H
#define SRSRAN_SCHED_NR_COMMON_H #define SRSRAN_SCHED_NR_COMMON_H
#include "srsran/adt/circular_map.h" #include "sched_nr_interface.h"
#include "srsran/adt/bounded_bitset.h"
namespace srsenb { namespace srsenb {
const static size_t SCHED_NR_MAX_USERS = 4; const static size_t SCHED_NR_MAX_USERS = 4;
const static size_t SCHED_NR_NOF_SUBFRAMES = 10; const static size_t SCHED_NR_NOF_SUBFRAMES = 10;
const static size_t SCHED_NR_MAX_NOF_RBGS = 25;
const static size_t SCHED_NR_NOF_HARQS = 16;
namespace sched_nr_impl {
using rbgmask_t = srsran::bounded_bitset<SCHED_NR_MAX_NOF_RBGS, true>;
} // namespace sched_nr_impl
} // namespace srsenb } // namespace srsenb

View File

@ -13,24 +13,29 @@
#ifndef SRSRAN_SCHED_NR_HARQ_H #ifndef SRSRAN_SCHED_NR_HARQ_H
#define SRSRAN_SCHED_NR_HARQ_H #define SRSRAN_SCHED_NR_HARQ_H
#include "sched_nr_common.h"
#include "srsran/common/tti_point.h" #include "srsran/common/tti_point.h"
#include <array> #include <array>
namespace srsenb { namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
template <size_t NofTbs> class harq_proc
class harq
{ {
public: public:
harq() = default; explicit harq_proc(uint32_t id_, uint32_t max_nof_tb_ = 1) : pid(id_), max_nof_tb(max_nof_tb_) {}
bool empty() const bool empty() const { return not tb[0].active and not tb[1].active; }
{
return std::all_of(tb.begin(), tb.end(), [](const tb_t t) { return not t.active; });
}
bool empty(uint32_t tb_idx) const { return tb[tb_idx].active; } bool empty(uint32_t tb_idx) const { return tb[tb_idx].active; }
void ack_info(uint32_t tb_idx, bool ack) { tb[tb_idx].ack_state = ack; }
bool has_pending_retx(tti_point tti_rx) const { return not empty() and tti_tx + ack_delay <= tti_rx; }
bool new_tx(tti_point tti_tx, const rbgmask_t& rbgmask, uint32_t mcs, uint32_t ack_delay);
const uint32_t pid;
private: private:
struct tb_t { struct tb_t {
bool active = false; bool active = false;
@ -40,19 +45,58 @@ private:
uint32_t mcs = 0; uint32_t mcs = 0;
}; };
uint32_t id; const uint32_t max_nof_tb;
tti_point tti_tx;
std::array<tb_t, NofTbs> tb; tti_point tti_tx;
uint32_t ack_delay = 0;
rbgmask_t rbgmask;
std::array<tb_t, SCHED_NR_MAX_TB> tb;
}; };
class harq_entity class harq_entity
{ {
public: public:
void dl_ack_info(tti_point tti_rx, uint32_t tb_idx, bool ack) {} harq_entity();
void new_tti(tti_point tti_rx_);
void dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { dl_harqs[pid].ack_info(tb_idx, ack); }
harq_proc& get_dl_harq(uint32_t pid) { return dl_harqs[pid]; }
harq_proc* find_pending_dl_retx()
{
return find_dl([this](const harq_proc& h) { return h.has_pending_retx(tti_rx); });
}
harq_proc* find_pending_ul_retx()
{
return find_ul([this](const harq_proc& h) { return h.has_pending_retx(tti_rx); });
}
harq_proc* find_empty_dl_harq()
{
return find_dl([](const harq_proc& h) { return h.empty(); });
}
harq_proc* find_empty_ul_harq()
{
return find_ul([](const harq_proc& h) { return h.empty(); });
}
private: private:
std::array<harq<1>, 16> dl_harqs; template <typename Predicate>
std::array<harq<1>, 16> ul_harqs; harq_proc* find_dl(Predicate p)
{
auto it = std::find_if(dl_harqs.begin(), dl_harqs.end(), p);
return (it == dl_harqs.end()) ? nullptr : &(*it);
}
template <typename Predicate>
harq_proc* find_ul(Predicate p)
{
auto it = std::find_if(ul_harqs.begin(), ul_harqs.end(), p);
return (it == ul_harqs.end()) ? nullptr : &(*it);
}
tti_point tti_rx;
std::vector<harq_proc> dl_harqs;
std::vector<harq_proc> ul_harqs;
}; };
} // namespace sched_nr_impl } // namespace sched_nr_impl

View File

@ -18,10 +18,16 @@
namespace srsenb { namespace srsenb {
const static size_t SCHED_NR_MAX_CARRIERS = 4; const static size_t SCHED_NR_MAX_CARRIERS = 4;
const static uint16_t SCHED_NR_INVALID_RNTI = 0; const static uint16_t SCHED_NR_INVALID_RNTI = 0;
const static size_t SCHED_NR_MAX_PDSCH_DATA = 16;
const static size_t SCHED_NR_MAX_PUSCH_DATA = 16;
const static size_t SCHED_NR_MAX_TB = 2;
struct sched_nr_cell_cfg {}; struct sched_nr_cell_cfg {
uint32_t nof_prb;
uint32_t nof_rbg;
};
struct sched_nr_cfg { struct sched_nr_cfg {
uint32_t nof_concurrent_subframes = 1; uint32_t nof_concurrent_subframes = 1;
@ -37,9 +43,21 @@ struct sched_nr_ue_cfg {
srsran::bounded_vector<sched_nr_ue_cc_cfg, SCHED_NR_MAX_CARRIERS> carriers; srsran::bounded_vector<sched_nr_ue_cc_cfg, SCHED_NR_MAX_CARRIERS> carriers;
}; };
struct sched_nr_data_t {
srsran::bounded_vector<uint32_t, SCHED_NR_MAX_TB> tbs;
};
struct sched_nr_dl_res_t {
srsran::bounded_vector<sched_nr_data_t, SCHED_NR_MAX_PDSCH_DATA> data;
};
struct sched_nr_ul_res_t {
srsran::bounded_vector<sched_nr_data_t, SCHED_NR_MAX_PUSCH_DATA> pusch;
};
struct sched_nr_res_t { struct sched_nr_res_t {
struct dl_result {}; sched_nr_dl_res_t dl_res;
struct ul_result {}; sched_nr_ul_res_t ul_res;
}; };
class sched_nr_interface class sched_nr_interface
@ -49,8 +67,8 @@ public:
virtual void ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& ue_cfg) = 0; virtual void ue_cfg(uint16_t rnti, const sched_nr_ue_cfg& ue_cfg) = 0;
virtual void new_tti(tti_point tti_rx) = 0; virtual void new_tti(tti_point tti_rx) = 0;
virtual void dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) = 0; virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0;
virtual void ul_sr_info(tti_point, uint16_t rnti) = 0; virtual void ul_sr_info(tti_point, uint16_t rnti) = 0;
}; };
} // namespace srsenb } // namespace srsenb

View File

@ -0,0 +1,49 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_SCHED_NR_RB_GRID_H
#define SRSRAN_SCHED_NR_RB_GRID_H
#include "sched_nr_interface.h"
#include "sched_nr_ue.h"
namespace srsenb {
namespace sched_nr_impl {
class slot_grid
{
public:
explicit slot_grid(uint32_t cc, const sched_nr_cfg& cfg_);
void new_tti(tti_point tti_rx_, sched_nr_res_t& sched_res_);
bool alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask);
bool alloc_pusch(slot_ue& ue, const rbgmask_t& dl_mask);
void generate_dcis();
tti_point tti_tx_dl() const { return tti_rx + TX_ENB_DELAY; }
tti_point tti_tx_ul() const { return tti_tx_dl() + K2; }
private:
static const size_t K0 = 0, K1 = 4, K2 = 4;
const uint32_t cc;
const sched_nr_cfg& cfg;
tti_point tti_rx;
rbgmask_t pdsch_mask;
rbgmask_t pusch_mask;
sched_nr_res_t* sched_res = nullptr;
};
} // namespace sched_nr_impl
} // namespace srsenb
#endif // SRSRAN_SCHED_NR_RB_GRID_H

View File

@ -16,6 +16,7 @@
#include "sched_nr_common.h" #include "sched_nr_common.h"
#include "sched_nr_harq.h" #include "sched_nr_harq.h"
#include "sched_nr_interface.h" #include "sched_nr_interface.h"
#include "srsran/adt/circular_map.h"
#include "srsran/adt/move_callback.h" #include "srsran/adt/move_callback.h"
#include "srsran/adt/pool/cached_alloc.h" #include "srsran/adt/pool/cached_alloc.h"
@ -25,51 +26,61 @@ namespace sched_nr_impl {
class ue_carrier; class ue_carrier;
class bwp_ue class slot_ue
{ {
public: public:
bwp_ue() = default; slot_ue() = default;
explicit bwp_ue(ue_carrier& carrier_, tti_point tti_rx_); explicit slot_ue(bool& busy_signal, tti_point tti_rx_, uint32_t cc);
~bwp_ue(); ~slot_ue();
bwp_ue(bwp_ue&& other) noexcept : carrier(other.carrier) { other.carrier = nullptr; } slot_ue(slot_ue&&) noexcept = default;
bwp_ue& operator=(bwp_ue&& other) noexcept slot_ue& operator=(slot_ue&&) noexcept = default;
{ bool empty() const { return busy_signal == nullptr; }
carrier = other.carrier; void release();
other.carrier = nullptr;
return *this;
}
bool empty() const { return carrier == nullptr; }
tti_point tti_rx; tti_point tti_rx;
uint32_t cc = SCHED_NR_MAX_CARRIERS; uint32_t cc = SCHED_NR_MAX_CARRIERS;
// UE parameters common to all sectors
const sched_nr_ue_cfg* cfg = nullptr; const sched_nr_ue_cfg* cfg = nullptr;
bool pending_sr; bool pending_sr;
// UE parameters that are sector specific
uint32_t dl_cqi;
uint32_t ul_cqi;
harq_proc* h_dl = nullptr;
harq_proc* h_ul = nullptr;
private: private:
ue_carrier* carrier = nullptr; struct noop {
void operator()(bool* ptr) {}
};
std::unique_ptr<bool, noop> busy_signal;
}; };
class ue_carrier class ue_carrier
{ {
public: public:
ue_carrier(uint16_t rnti, uint32_t cc, const sched_nr_ue_cfg& cfg); ue_carrier(uint16_t rnti, uint32_t cc, const sched_nr_ue_cfg& cfg);
bwp_ue try_reserve(tti_point tti_rx); slot_ue try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& cfg);
void push_feedback(srsran::move_callback<void(ue_carrier&)> callback); void push_feedback(srsran::move_callback<void(ue_carrier&)> callback);
void set_cfg(const sched_nr_ue_cfg& uecfg);
const uint16_t rnti; const uint16_t rnti;
const uint32_t cc; const uint32_t cc;
// Channel state
uint32_t dl_cqi = 1;
uint32_t ul_cqi = 0;
harq_entity harq_ent; harq_entity harq_ent;
private: private:
friend class bwp_ue; const sched_nr_ue_cfg* cfg = nullptr;
void release() { busy = false; }
const sched_nr_ue_cfg* cfg; bool busy{false};
tti_point last_tti_rx;
srsran::deque<srsran::move_callback<void(ue_carrier&)> > pending_feedback; srsran::deque<srsran::move_callback<void(ue_carrier&)> > pending_feedback;
bool busy{false};
}; };
class ue class ue
@ -77,7 +88,7 @@ class ue
public: public:
ue(uint16_t rnti, const sched_nr_ue_cfg& cfg); ue(uint16_t rnti, const sched_nr_ue_cfg& cfg);
bwp_ue try_reserve(tti_point tti_rx, uint32_t cc); slot_ue try_reserve(tti_point tti_rx, uint32_t cc);
void set_cfg(const sched_nr_ue_cfg& cfg); void set_cfg(const sched_nr_ue_cfg& cfg);

View File

@ -14,6 +14,7 @@
#define SRSRAN_SCHED_NR_WORKER_H #define SRSRAN_SCHED_NR_WORKER_H
#include "sched_nr_common.h" #include "sched_nr_common.h"
#include "sched_nr_rb_grid.h"
#include "sched_nr_ue.h" #include "sched_nr_ue.h"
#include "srsran/adt/circular_array.h" #include "srsran/adt/circular_array.h"
#include "srsran/adt/optional.h" #include "srsran/adt/optional.h"
@ -26,23 +27,27 @@
namespace srsenb { namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
class bwp_worker class carrier_slot_worker
{ {
public: public:
explicit bwp_worker(uint32_t cc_, ue_map_t& ue_db_) : cc(cc_), ue_db(ue_db_) {} explicit carrier_slot_worker(uint32_t cc_, const sched_nr_cfg& cfg_) : cc(cc_), cfg(cfg_), res_grid(cc, cfg) {}
void start(tti_point tti_rx_); void start(tti_point tti_rx_, sched_nr_res_t& bwp_result, ue_map_t& ue_db_);
void run(); void run();
void end_tti(); void end_tti();
bool running() const { return tti_rx.is_valid(); } bool running() const { return tti_rx.is_valid(); }
private: private:
ue_map_t& ue_db; void alloc_dl_ues();
void alloc_ul_ues();
const uint32_t cc;
const sched_nr_cfg& cfg;
tti_point tti_rx; tti_point tti_rx;
uint32_t cc; slot_grid res_grid;
srsran::circular_array<bwp_ue, SCHED_NR_MAX_USERS> bwp_ues; srsran::static_circular_map<uint16_t, slot_ue, SCHED_NR_MAX_USERS> slot_ues;
}; };
class sched_worker_manager class sched_worker_manager
@ -55,22 +60,23 @@ public:
void reserve_workers(tti_point tti_rx, srsran::span<sched_nr_res_t> sf_result_); void reserve_workers(tti_point tti_rx, srsran::span<sched_nr_res_t> sf_result_);
void start_tti(tti_point tti_rx); void start_tti(tti_point tti_rx);
bool run_tti(tti_point tti_rx, uint32_t cc); bool run_tti(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result);
void end_tti(tti_point tti_rx); void end_tti(tti_point tti_rx);
private: private:
const sched_nr_cfg& cfg; const sched_nr_cfg& cfg;
ue_map_t& ue_db;
struct sf_worker_ctxt { struct slot_worker_ctxt {
sem_t sf_sem; sem_t sf_sem;
tti_point tti_rx; tti_point tti_rx;
srsran::span<sched_nr_res_t> sf_result; srsran::span<sched_nr_res_t> sf_result;
int worker_count = 0; int worker_count = 0;
std::vector<bwp_worker> workers; std::vector<carrier_slot_worker> workers;
}; };
std::vector<std::unique_ptr<sf_worker_ctxt> > sf_ctxts; std::vector<std::unique_ptr<slot_worker_ctxt> > slot_ctxts;
sf_worker_ctxt& get_sf(tti_point tti_rx); slot_worker_ctxt& get_sf(tti_point tti_rx);
}; };
} // namespace sched_nr_impl } // namespace sched_nr_impl

View File

@ -6,6 +6,6 @@
# the distribution. # the distribution.
# #
set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc) set(SOURCES mac_nr.cc sched_nr.cc sched_nr_ue.cc sched_nr_worker.cc sched_nr_rb_grid.cc sched_nr_harq.cc)
add_library(srsgnb_mac STATIC ${SOURCES}) add_library(srsgnb_mac STATIC ${SOURCES})

View File

@ -15,7 +15,6 @@
namespace srsenb { namespace srsenb {
using sched_nr_impl::bwp_worker;
using sched_nr_impl::sched_worker_manager; using sched_nr_impl::sched_worker_manager;
using sched_nr_impl::ue; using sched_nr_impl::ue;
using sched_nr_impl::ue_carrier; using sched_nr_impl::ue_carrier;
@ -100,7 +99,7 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const sched_nr_ue_cfg& uecfg)
void sched_nr::new_tti(tti_point tti_rx) void sched_nr::new_tti(tti_point tti_rx)
{ {
// Lock subframe workers to provided tti_rx // Lock slot workers for provided tti_rx
sched_workers.reserve_workers(tti_rx, sched_results[tti_rx.sf_idx()]); sched_workers.reserve_workers(tti_rx, sched_results[tti_rx.sf_idx()]);
{ {
@ -113,33 +112,25 @@ void sched_nr::new_tti(tti_point tti_rx)
} }
} }
/// Generate {tti,cc} scheduling decision
int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result) int sched_nr::generate_sched_result(tti_point tti_rx, uint32_t cc, sched_nr_res_t& result)
{
// Generate {tti,cc} scheduling decision
run_tti(tti_rx, cc);
// copy scheduling decision result
result = sched_results[tti_rx.sf_idx()][cc];
return SRSRAN_SUCCESS;
}
void sched_nr::run_tti(tti_point tti_rx, uint32_t cc)
{ {
// unlocked, parallel region // unlocked, parallel region
bool all_workers_finished = sched_workers.run_tti(tti_rx, cc); bool all_workers_finished = sched_workers.run_tti(tti_rx, cc, result);
if (all_workers_finished) { if (all_workers_finished) {
// once all workers of the same subframe finished, synchronize sched outcome with ue_db // once all workers of the same subframe finished, synchronize sched outcome with ue_db
std::lock_guard<std::mutex> lock(ue_db_mutex); std::lock_guard<std::mutex> lock(ue_db_mutex);
sched_workers.end_tti(tti_rx); sched_workers.end_tti(tti_rx);
} }
return SRSRAN_SUCCESS;
} }
void sched_nr::dl_ack_info(tti_point tti_rx, uint16_t rnti, uint32_t cc, uint32_t tb_idx, bool ack) void sched_nr::dl_ack_info(uint16_t rnti, uint32_t pid, uint32_t cc, uint32_t tb_idx, bool ack)
{ {
pending_events->push_cc_feedback( pending_events->push_cc_feedback(
rnti, cc, [tti_rx, tb_idx, ack](ue_carrier& ue_cc) { ue_cc.harq_ent.dl_ack_info(tti_rx, tb_idx, ack); }); rnti, cc, [pid, tb_idx, ack](ue_carrier& ue_cc) { ue_cc.harq_ent.dl_ack_info(pid, tb_idx, ack); });
} }
void sched_nr::ul_sr_info(tti_point tti_rx, uint16_t rnti) void sched_nr::ul_sr_info(tti_point tti_rx, uint16_t rnti)

View File

@ -0,0 +1,46 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_harq.h"
namespace srsenb {
namespace sched_nr_impl {
bool harq_proc::new_tx(tti_point tti_tx_, const rbgmask_t& rbgmask_, uint32_t mcs, uint32_t ack_delay_)
{
if (not empty()) {
return false;
}
tti_tx = tti_tx_;
ack_delay = ack_delay_;
rbgmask = rbgmask_;
tb[0].mcs = mcs;
return true;
}
harq_entity::harq_entity()
{
dl_harqs.reserve(SCHED_NR_NOF_HARQS);
ul_harqs.reserve(SCHED_NR_NOF_HARQS);
for (uint32_t pid = 0; pid < SCHED_NR_NOF_HARQS; ++pid) {
dl_harqs.emplace_back(pid);
ul_harqs.emplace_back(pid);
}
}
void harq_entity::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
}
} // namespace sched_nr_impl
} // namespace srsenb

View File

@ -0,0 +1,80 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/nr/sched_nr_rb_grid.h"
namespace srsenb {
namespace sched_nr_impl {
slot_grid::slot_grid(uint32_t cc_, const sched_nr_cfg& cfg_) : cc(cc_), cfg(cfg_) {}
void slot_grid::new_tti(tti_point tti_rx_, sched_nr_res_t& sched_res_)
{
tti_rx = tti_rx_;
sched_res = &sched_res_;
pdsch_mask.reset();
pusch_mask.reset();
*sched_res = {};
}
bool slot_grid::alloc_pdsch(slot_ue& ue, const rbgmask_t& dl_mask)
{
const uint32_t tbs = 100, mcs = 20;
if (ue.h_dl == nullptr) {
return false;
}
if ((pdsch_mask & dl_mask).any()) {
return false;
}
if (sched_res->dl_res.data.full()) {
return false;
}
if (not ue.h_dl->new_tx(tti_tx_dl(), dl_mask, mcs, K1)) {
return false;
}
pdsch_mask |= dl_mask;
sched_res->dl_res.data.emplace_back();
sched_nr_data_t& data = sched_res->dl_res.data.back();
data.tbs.resize(1);
data.tbs[0] = tbs;
return true;
}
bool slot_grid::alloc_pusch(slot_ue& ue, const rbgmask_t& ul_mask)
{
const uint32_t tbs = 100, mcs = 20;
if ((pusch_mask & ul_mask).any()) {
return false;
}
if (sched_res->ul_res.pusch.full()) {
return false;
}
if (not ue.h_ul->new_tx(tti_tx_ul(), ul_mask, mcs, 0)) {
return false;
}
pusch_mask |= ul_mask;
sched_res->ul_res.pusch.emplace_back();
sched_nr_data_t& data = sched_res->ul_res.pusch.back();
data.tbs.resize(1);
data.tbs[0] = tbs;
return true;
}
void slot_grid::generate_dcis() {}
} // namespace sched_nr_impl
} // namespace srsenb

View File

@ -15,36 +15,86 @@
namespace srsenb { namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
bwp_ue::bwp_ue(ue_carrier& carrier_, tti_point tti_rx_) : carrier(&carrier_), tti_rx(tti_rx_), cc(carrier_.cc) {} slot_ue::slot_ue(bool& busy_signal_, tti_point tti_rx_, uint32_t cc_) :
busy_signal(&busy_signal_), tti_rx(tti_rx_), cc(cc_)
{}
bwp_ue::~bwp_ue() slot_ue::~slot_ue()
{ {
if (carrier != nullptr) { release();
carrier->release(); }
void slot_ue::release()
{
if (busy_signal != nullptr) {
*busy_signal = false;
} }
} }
ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const sched_nr_ue_cfg& cfg_) : rnti(rnti_), cc(cc_), cfg(&cfg_) {} ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue_carrier::ue_carrier(uint16_t rnti_, uint32_t cc_, const sched_nr_ue_cfg& uecfg_) : rnti(rnti_), cc(cc_), cfg(&uecfg_)
{}
void ue_carrier::set_cfg(const sched_nr_ue_cfg& uecfg)
{
cfg = &uecfg;
}
void ue_carrier::push_feedback(srsran::move_callback<void(ue_carrier&)> callback) void ue_carrier::push_feedback(srsran::move_callback<void(ue_carrier&)> callback)
{ {
pending_feedback.push_back(std::move(callback)); pending_feedback.push_back(std::move(callback));
} }
bwp_ue ue_carrier::try_reserve(tti_point tti_rx) slot_ue ue_carrier::try_reserve(tti_point tti_rx, const sched_nr_ue_cfg& uecfg_)
{ {
if (busy) { slot_ue sfu = (busy) ? slot_ue() : slot_ue(busy, tti_rx, cc);
return bwp_ue(); if (sfu.empty()) {
return sfu;
} }
// successfully acquired // successfully acquired. Process any CC-specific pending feedback
busy = true; busy = true;
if (cfg != &uecfg_) {
set_cfg(uecfg_);
}
while (not pending_feedback.empty()) { while (not pending_feedback.empty()) {
pending_feedback.front()(*this); pending_feedback.front()(*this);
pending_feedback.pop_front(); pending_feedback.pop_front();
} }
return bwp_ue(*this, tti_rx); if (not last_tti_rx.is_valid()) {
last_tti_rx = tti_rx;
harq_ent.new_tti(tti_rx);
} else {
while (last_tti_rx++ < tti_rx) {
harq_ent.new_tti(tti_rx);
}
}
// set UE parameters common to all carriers
sfu.cfg = &uecfg_;
// copy cc-specific parameters and find available HARQs
sfu.dl_cqi = dl_cqi;
sfu.ul_cqi = ul_cqi;
sfu.h_dl = harq_ent.find_pending_dl_retx();
if (sfu.h_dl == nullptr) {
sfu.h_dl = harq_ent.find_empty_dl_harq();
}
sfu.h_ul = harq_ent.find_pending_ul_retx();
if (sfu.h_ul == nullptr) {
sfu.h_ul = harq_ent.find_empty_ul_harq();
}
if (sfu.h_dl == nullptr and sfu.h_ul == nullptr) {
// there needs to be at least one available HARQ for newtx/retx
sfu.release();
return sfu;
}
return sfu;
} }
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue::ue(uint16_t rnti, const sched_nr_ue_cfg& cfg) ue::ue(uint16_t rnti, const sched_nr_ue_cfg& cfg)
{ {
for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) { for (uint32_t cc = 0; cc < cfg.carriers.size(); ++cc) {
@ -60,18 +110,17 @@ void ue::set_cfg(const sched_nr_ue_cfg& cfg)
ue_cfgs[current_idx] = cfg; ue_cfgs[current_idx] = cfg;
} }
bwp_ue ue::try_reserve(tti_point tti_rx, uint32_t cc) slot_ue ue::try_reserve(tti_point tti_rx, uint32_t cc)
{ {
if (carriers[cc] == nullptr) { if (carriers[cc] == nullptr) {
return bwp_ue(); return slot_ue();
} }
bwp_ue sfu = carriers[cc]->try_reserve(tti_rx); slot_ue sfu = carriers[cc]->try_reserve(tti_rx, ue_cfgs[current_idx]);
if (sfu.empty()) { if (sfu.empty()) {
return bwp_ue(); return slot_ue();
} }
// set UE-common parameters // set UE-common parameters
sfu.pending_sr = pending_sr; sfu.pending_sr = pending_sr;
sfu.cfg = &ue_cfgs[current_idx];
return sfu; return sfu;
} }

View File

@ -16,7 +16,7 @@ namespace srsenb {
namespace sched_nr_impl { namespace sched_nr_impl {
/// Called at the beginning of TTI in a locked context, to reserve available UE resources /// Called at the beginning of TTI in a locked context, to reserve available UE resources
void bwp_worker::start(tti_point tti_rx_) void carrier_slot_worker::start(tti_point tti_rx_, sched_nr_res_t& bwp_result_, ue_map_t& ue_db)
{ {
srsran_assert(not running(), "scheduler worker::start() called for active worker"); srsran_assert(not running(), "scheduler worker::start() called for active worker");
// Try reserve UE cells for this worker // Try reserve UE cells for this worker
@ -24,63 +24,101 @@ void bwp_worker::start(tti_point tti_rx_)
uint16_t rnti = ue_pair.first; uint16_t rnti = ue_pair.first;
ue& u = *ue_pair.second; ue& u = *ue_pair.second;
bwp_ue sfu0 = u.try_reserve(tti_rx, cc); slot_ues.insert(rnti, u.try_reserve(tti_rx, cc));
if (sfu0.empty()) { if (slot_ues[rnti].empty()) {
// Failed to synchronize because UE is being used by another worker // Failed to synchronize because UE is being used by another worker
slot_ues.erase(rnti);
continue; continue;
} }
// Synchronization of UE for this {tti, cc} was successful // UE acquired successfully for scheduling in this {tti, cc}
bwp_ues[rnti] = std::move(sfu0);
} }
res_grid.new_tti(tti_rx_, bwp_result_);
tti_rx = tti_rx_; tti_rx = tti_rx_;
} }
void bwp_worker::run() void carrier_slot_worker::run()
{ {
srsran_assert(running(), "scheduler worker::run() called for non-active worker"); srsran_assert(running(), "scheduler worker::run() called for non-active worker");
// Prioritize PDCCH scheduling for DL and UL data in a RoundRobin fashion
if ((tti_rx.to_uint() & 0x1u) == 0) {
alloc_dl_ues();
alloc_ul_ues();
} else {
alloc_ul_ues();
alloc_dl_ues();
}
// Select the winner PDCCH allocation combination, store all the scheduling results
res_grid.generate_dcis();
} }
void bwp_worker::end_tti() void carrier_slot_worker::end_tti()
{ {
srsran_assert(running(), "scheduler worker::end() called for non-active worker"); srsran_assert(running(), "scheduler worker::end() called for non-active worker");
// releases UE resources // releases UE resources
for (bwp_ue& u : bwp_ues) { slot_ues.clear();
if (not u.empty()) {
u = {};
}
}
tti_rx = {}; tti_rx = {};
} }
void carrier_slot_worker::alloc_dl_ues()
{
if (slot_ues.empty()) {
return;
}
slot_ue& ue = slot_ues.begin()->second;
if (ue.h_dl == nullptr) {
return;
}
rbgmask_t dlmask(cfg.cells[cc].nof_rbg);
dlmask.fill(0, dlmask.size(), true);
res_grid.alloc_pdsch(ue, dlmask);
}
void carrier_slot_worker::alloc_ul_ues()
{
if (slot_ues.empty()) {
return;
}
slot_ue& ue = slot_ues.begin()->second;
if (ue.h_ul == nullptr) {
return;
}
rbgmask_t ulmask(cfg.cells[cc].nof_rbg);
ulmask.fill(0, ulmask.size(), true);
res_grid.alloc_pusch(ue, ulmask);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_nr_cfg& cfg_) : cfg(cfg_) sched_worker_manager::sched_worker_manager(ue_map_t& ue_db_, const sched_nr_cfg& cfg_) : cfg(cfg_), ue_db(ue_db_)
{ {
// Note: For now, we only allow parallelism at the sector level // Note: For now, we only allow parallelism at the sector level
sf_ctxts.resize(cfg.nof_concurrent_subframes); slot_ctxts.resize(cfg.nof_concurrent_subframes);
for (size_t i = 0; i < cfg.nof_concurrent_subframes; ++i) { for (size_t i = 0; i < cfg.nof_concurrent_subframes; ++i) {
sf_ctxts[i].reset(new sf_worker_ctxt()); slot_ctxts[i].reset(new slot_worker_ctxt());
sem_init(&sf_ctxts[i]->sf_sem, 0, 1); sem_init(&slot_ctxts[i]->sf_sem, 0, 1);
sf_ctxts[i]->workers.reserve(cfg.cells.size()); slot_ctxts[i]->workers.reserve(cfg.cells.size());
for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) { for (uint32_t cc = 0; cc < cfg.cells.size(); ++cc) {
sf_ctxts[i]->workers.emplace_back(cc, ue_db_); slot_ctxts[i]->workers.emplace_back(cc, cfg);
} }
} }
} }
sched_worker_manager::~sched_worker_manager() sched_worker_manager::~sched_worker_manager()
{ {
for (uint32_t sf = 0; sf < sf_ctxts.size(); ++sf) { for (uint32_t sf = 0; sf < slot_ctxts.size(); ++sf) {
sem_destroy(&sf_ctxts[sf]->sf_sem); sem_destroy(&slot_ctxts[sf]->sf_sem);
} }
} }
sched_worker_manager::sf_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx) sched_worker_manager::slot_worker_ctxt& sched_worker_manager::get_sf(tti_point tti_rx)
{ {
return *sf_ctxts[tti_rx.to_uint() % sf_ctxts.size()]; return *slot_ctxts[tti_rx.to_uint() % slot_ctxts.size()];
} }
void sched_worker_manager::reserve_workers(tti_point tti_rx_, srsran::span<sched_nr_res_t> sf_result_) void sched_worker_manager::reserve_workers(tti_point tti_rx_, srsran::span<sched_nr_res_t> sf_result_)
@ -91,7 +129,7 @@ void sched_worker_manager::reserve_workers(tti_point tti_rx_, srsran::span<sched
sf_worker_ctxt.sf_result = sf_result_; sf_worker_ctxt.sf_result = sf_result_;
sf_worker_ctxt.tti_rx = tti_rx_; sf_worker_ctxt.tti_rx = tti_rx_;
sf_worker_ctxt.worker_count = sf_worker_ctxt.workers.size(); sf_worker_ctxt.worker_count = static_cast<int>(sf_worker_ctxt.workers.size());
} }
void sched_worker_manager::start_tti(tti_point tti_rx_) void sched_worker_manager::start_tti(tti_point tti_rx_)
@ -100,11 +138,11 @@ void sched_worker_manager::start_tti(tti_point tti_rx_)
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) { for (uint32_t cc = 0; cc < sf_worker_ctxt.workers.size(); ++cc) {
sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx); sf_worker_ctxt.workers[cc].start(sf_worker_ctxt.tti_rx, sf_worker_ctxt.sf_result[cc], ue_db);
} }
} }
bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc) bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc, sched_nr_res_t& result)
{ {
auto& sf_worker_ctxt = get_sf(tti_rx_); auto& sf_worker_ctxt = get_sf(tti_rx_);
srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments"); srsran_assert(sf_worker_ctxt.tti_rx == tti_rx_, "invalid run_tti(tti, cc) arguments");
@ -116,6 +154,9 @@ bool sched_worker_manager::run_tti(tti_point tti_rx_, uint32_t cc)
// Get {tti, cc} scheduling decision // Get {tti, cc} scheduling decision
sf_worker_ctxt.workers[cc].run(); sf_worker_ctxt.workers[cc].run();
// copy sched result
result = sf_worker_ctxt.sf_result[cc];
// decrement the number of active workers // decrement the number of active workers
--sf_worker_ctxt.worker_count; --sf_worker_ctxt.worker_count;
srsran_assert(sf_worker_ctxt.worker_count >= 0, "invalid number of calls to run_tti(tti, cc)"); srsran_assert(sf_worker_ctxt.worker_count >= 0, "invalid number of calls to run_tti(tti, cc)");
@ -132,6 +173,8 @@ void sched_worker_manager::end_tti(tti_point tti_rx_)
for (auto& worker : sf_worker_ctxt.workers) { for (auto& worker : sf_worker_ctxt.workers) {
worker.end_tti(); worker.end_tti();
} }
sf_worker_ctxt.sf_result = {};
sem_post(&sf_worker_ctxt.sf_sem); sem_post(&sf_worker_ctxt.sf_sem);
} }

View File

@ -112,7 +112,7 @@ void sched_nr_cfg_parallel_sf_test()
int main() int main()
{ {
srsran::get_background_workers().set_nof_workers(4); srsran::get_background_workers().set_nof_workers(8);
srsenb::sched_nr_cfg_serialized_test(); srsenb::sched_nr_cfg_serialized_test();
srsenb::sched_nr_cfg_parallel_cc_test(); srsenb::sched_nr_cfg_parallel_cc_test();