nr,sched,test: update sched nr test to better reflect MAC-PHY FAPI

This commit is contained in:
Francisco 2021-10-12 20:54:25 +01:00 committed by Francisco Paisana
parent cc0255e4f1
commit ea324e8cbe
9 changed files with 378 additions and 174 deletions

View File

@ -282,10 +282,13 @@ public:
bool push_blocking(const T& t) { return push_(t, true); }
srsran::error_type<T> push_blocking(T&& t) { return push_(std::move(t), true); }
bool try_pop(T& obj) { return pop_(obj, false); }
T pop_blocking()
T pop_blocking(bool* success = nullptr)
{
T obj{};
pop_(obj, true);
T obj{};
bool ret = pop_(obj, true);
if (success != nullptr) {
*success = ret;
}
return obj;
}
bool pop_wait_until(T& obj, const std::chrono::system_clock::time_point& until) { return pop_(obj, true, &until); }
@ -590,12 +593,6 @@ public:
base_t(push_callback, pop_callback, size)
{}
void set_size(size_t size) { base_t::circ_buffer.set_size(size); }
template <typename F>
bool apply_first(const F& func)
{
return base_t::apply_first(func);
}
};
} // namespace srsran

View File

@ -144,6 +144,40 @@ private:
bool running = false;
};
/// Class used to create a single worker with an input task queue with a single reader
class task_worker : public thread
{
using task_t = srsran::move_callback<void(), default_move_callback_buffer_size, true>;
public:
task_worker(std::string thread_name_,
uint32_t queue_size,
bool start_deferred = false,
int32_t prio_ = -1,
uint32_t mask_ = 255);
task_worker(const task_worker&) = delete;
task_worker(task_worker&&) = delete;
task_worker& operator=(const task_worker&) = delete;
task_worker& operator=(task_worker&&) = delete;
~task_worker();
void stop();
void start(int32_t prio_ = -1, uint32_t mask_ = 255);
void push_task(task_t&& task);
uint32_t nof_pending_tasks() const;
private:
void run_thread() override;
// args
int32_t prio = -1;
uint32_t mask = 255;
srslog::basic_logger& logger;
srsran::dyn_blocking_queue<task_t> pending_tasks;
};
srsran::task_thread_pool& get_background_workers();
} // namespace srsran

View File

@ -392,6 +392,75 @@ void task_thread_pool::worker_t::run_thread()
running = false;
}
task_worker::task_worker(std::string thread_name_,
uint32_t queue_size,
bool start_deferred,
int32_t prio_,
uint32_t mask_) :
thread(std::move(thread_name_)),
prio(prio_),
mask(mask_),
pending_tasks(queue_size),
logger(srslog::fetch_basic_logger("POOL"))
{
if (not start_deferred) {
start(prio_, mask_);
}
}
task_worker::~task_worker()
{
stop();
}
void task_worker::stop()
{
if (not pending_tasks.is_stopped()) {
pending_tasks.stop();
wait_thread_finish();
}
}
void task_worker::start(int32_t prio_, uint32_t mask_)
{
prio = prio_;
mask = mask_;
if (mask == 255) {
thread::start(prio);
} else {
thread::start_cpu_mask(prio, mask);
}
}
void task_worker::push_task(task_t&& task)
{
auto ret = pending_tasks.try_push(std::move(task));
if (ret.is_error()) {
logger.error("Cannot push anymore tasks into the worker queue. maximum size is %u",
uint32_t(pending_tasks.max_size()));
return;
}
}
uint32_t task_worker::nof_pending_tasks() const
{
return pending_tasks.size();
}
void task_worker::run_thread()
{
while (true) {
bool success;
task_t task = pending_tasks.pop_blocking(&success);
if (not success) {
break;
}
task();
}
logger.info("Task worker %s finished.", thread::get_name().c_str());
}
// Global thread pool for long, low-priority tasks
task_thread_pool& get_background_workers()
{

View File

@ -25,7 +25,7 @@ int harq_proc::ack_info(uint32_t tb_idx, bool ack)
if (ack) {
tb[tb_idx].active = false;
}
return tb[tb_idx].tbs;
return ack ? tb[tb_idx].tbs : 0;
}
void harq_proc::new_slot(slot_point slot_rx)

View File

@ -37,7 +37,7 @@ sched_nr_ue_sim::sched_nr_ue_sim(uint16_t rnti_,
}
}
int sched_nr_ue_sim::update(const sched_nr_cc_output_res_t& cc_out)
int sched_nr_ue_sim::update(const sched_nr_cc_result_view& cc_out)
{
update_dl_harqs(cc_out);
@ -60,7 +60,7 @@ int sched_nr_ue_sim::update(const sched_nr_cc_output_res_t& cc_out)
return SRSRAN_SUCCESS;
}
void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out)
void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_result_view& cc_out)
{
uint32_t cc = cc_out.cc;
for (uint32_t i = 0; i < cc_out.dl_cc_result->dl_sched.pdcch_dl.size(); ++i) {
@ -90,6 +90,8 @@ void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_output_res_t& cc_out)
}
}
namespace detail {
sched_nr_sim_base::sched_nr_sim_base(const sched_nr_interface::sched_args_t& sched_args,
const std::vector<sched_nr_interface::cell_cfg_t>& cell_cfg_list,
std::string test_name_) :
@ -105,6 +107,8 @@ sched_nr_sim_base::sched_nr_sim_base(const sched_nr_interface::sched_args_t&
}
sched_ptr->config(sched_args, cell_cfg_list); // call parent cfg
cc_results.resize(cell_params.size());
TESTASSERT(cell_params.size() > 0);
}
@ -118,9 +122,11 @@ int sched_nr_sim_base::add_user(uint16_t rnti,
slot_point tti_rx,
uint32_t preamble_idx)
{
sched_ptr->ue_cfg(rnti, ue_cfg_);
std::lock_guard<std::mutex> lock(std::mutex);
TESTASSERT(ue_db.count(rnti) == 0);
sched_ptr->ue_cfg(rnti, ue_cfg_);
ue_db.insert(std::make_pair(rnti, sched_nr_ue_sim(rnti, ue_cfg_, current_slot_tx, preamble_idx)));
sched_nr_interface::rar_info_t rach_info{};
@ -133,17 +139,18 @@ int sched_nr_sim_base::add_user(uint16_t rnti,
return SRSRAN_SUCCESS;
}
void sched_nr_sim_base::new_slot(slot_point slot_tx)
void sched_nr_sim_base::new_slot_(slot_point slot_tx)
{
std::unique_lock<std::mutex> lock(mutex);
while (cc_finished > 0) {
cvar.wait(lock);
}
logger.set_context(slot_tx.to_uint());
mac_logger.set_context(slot_tx.to_uint());
// Clear previous slot results
for (uint32_t cc = 0; cc < cc_results.size(); ++cc) {
cc_results[cc] = {};
}
logger.info("---------------- TTI=%d ---------------", slot_tx.to_uint());
current_slot_tx = slot_tx;
cc_finished = cell_params.size();
// Process pending feedback
for (auto& ue : ue_db) {
ue_nr_slot_events events;
set_default_slot_events(ue.second.get_ctxt(), events);
@ -152,27 +159,42 @@ void sched_nr_sim_base::new_slot(slot_point slot_tx)
}
}
void sched_nr_sim_base::update(sched_nr_cc_output_res_t& cc_out)
void sched_nr_sim_base::generate_cc_result_(uint32_t cc)
{
std::unique_lock<std::mutex> lock(mutex);
// Run scheduler
auto tp1 = std::chrono::steady_clock::now();
sched_ptr->run_slot(current_slot_tx, cc, cc_results[cc].dl_res);
sched_ptr->get_ul_sched(current_slot_tx, cc, cc_results[cc].ul_res);
auto tp2 = std::chrono::steady_clock::now();
cc_results[cc].sched_latency_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - tp1);
}
sim_nr_enb_ctxt_t ctxt;
ctxt = get_enb_ctxt();
void sched_nr_sim_base::process_results()
{
sched_nr_cc_result_view cc_out;
cc_out.slot = current_slot_tx;
for (uint32_t cc = 0; cc < cell_params.size(); ++cc) {
cc_out.cc = cc;
cc_out.dl_cc_result = &cc_results[cc].dl_res;
cc_out.ul_cc_result = &cc_results[cc].ul_res;
// Run common tests
test_dl_pdcch_consistency(cc_out.dl_cc_result->dl_sched.pdcch_dl);
test_pdsch_consistency(cc_out.dl_cc_result->dl_sched.pdsch);
test_ssb_scheduled_grant(cc_out.slot, ctxt.cell_params[cc_out.cc].cfg, cc_out.dl_cc_result->dl_sched.ssb);
// Run common tests
test_dl_pdcch_consistency(cc_out.dl_cc_result->dl_sched.pdcch_dl);
test_pdsch_consistency(cc_out.dl_cc_result->dl_sched.pdsch);
test_ssb_scheduled_grant(cc_out.slot, cell_params[cc_out.cc].cfg, cc_out.dl_cc_result->dl_sched.ssb);
// Run UE-dedicated tests
test_dl_sched_result(ctxt, cc_out);
// Run UE-dedicated tests
sim_nr_enb_ctxt_t ctxt;
ctxt = get_enb_ctxt();
test_dl_sched_result(ctxt, cc_out);
for (auto& u : ue_db) {
u.second.update(cc_out);
}
// Derived class-defined tests
process_cc_result(cc_results[cc]);
if (--cc_finished <= 0) {
cvar.notify_one();
// Update UE state
for (auto& u : ue_db) {
u.second.update(cc_out);
}
}
}
@ -263,4 +285,76 @@ sim_nr_enb_ctxt_t sched_nr_sim_base::get_enb_ctxt() const
return ctxt;
}
} // namespace detail
void sched_nr_sim::new_slot(slot_point slot_tx)
{
current_slot_tx = slot_tx;
nof_cc_remaining = cell_params.size();
this->new_slot_(slot_tx);
}
void sched_nr_sim::generate_cc_result(uint32_t cc)
{
// Run scheduler
this->generate_cc_result_(cc);
if (--nof_cc_remaining > 0) {
// there are still missing CC results
return;
}
// Run tests and update UE state
this->process_results();
}
void sched_nr_sim_parallel::new_slot(slot_point slot_tx)
{
// Block concurrent or out-of-order calls to the scheduler
{
std::unique_lock<std::mutex> lock(mutex);
while (nof_cc_remaining > 0 or (current_slot_tx.valid() and current_slot_tx + 1 != slot_tx)) {
cvar.wait(lock);
}
current_slot_tx = slot_tx;
nof_cc_remaining = cell_params.size();
}
// Run common new_slot updates
this->new_slot_(slot_tx);
}
void sched_nr_sim_parallel::generate_cc_result(uint32_t cc)
{
// Run scheduler
this->generate_cc_result_(cc);
{
std::unique_lock<std::mutex> lock(mutex);
if (--nof_cc_remaining > 0) {
// there are still missing CC results
return;
}
// Run tests and update UE state
this->process_results();
}
// Notify waiting workers
cvar.notify_one();
}
sched_nr_sim_parallel::~sched_nr_sim_parallel()
{
stop();
}
void sched_nr_sim_parallel::stop()
{
std::unique_lock<std::mutex> lock(mutex);
while (nof_cc_remaining > 0) {
cvar.wait(lock);
}
}
} // namespace srsenb

View File

@ -33,7 +33,7 @@ struct ue_nr_harq_ctxt_t {
uint32_t tbs = 0;
slot_point last_slot_tx, first_slot_tx, last_slot_ack;
};
struct sched_nr_cc_output_res_t {
struct sched_nr_cc_result_view {
slot_point slot;
uint32_t cc;
const sched_nr_interface::dl_sched_res_t* dl_cc_result;
@ -86,21 +86,32 @@ public:
slot_point prach_slot_rx,
uint32_t preamble_idx);
int update(const sched_nr_cc_output_res_t& cc_out);
int update(const sched_nr_cc_result_view& cc_out);
const sim_nr_ue_ctxt_t& get_ctxt() const { return ctxt; }
sim_nr_ue_ctxt_t& get_ctxt() { return ctxt; }
private:
void update_dl_harqs(const sched_nr_cc_output_res_t& sf_out);
void update_dl_harqs(const sched_nr_cc_result_view& sf_out);
srslog::basic_logger& logger;
sim_nr_ue_ctxt_t ctxt;
};
namespace detail {
/// Implementation of features common to sched_nr_sim_parallel and sched_nr_sim
class sched_nr_sim_base
{
public:
struct cc_result_t {
slot_point slot_tx;
uint32_t cc;
sched_nr_interface::dl_sched_res_t dl_res;
sched_nr_interface::ul_sched_t ul_res;
std::chrono::nanoseconds sched_latency_ns;
};
sched_nr_sim_base(const sched_nr_interface::sched_args_t& sched_args,
const std::vector<sched_nr_interface::cell_cfg_t>& cell_params_,
std::string test_name);
@ -108,8 +119,77 @@ public:
int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, slot_point tti_rx, uint32_t preamble_idx);
srsran::const_span<sched_nr_impl::cell_params_t> get_cell_params() { return cell_params; }
// configurable by simulator concrete implementation
virtual void set_external_slot_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_slot_events& pending_events) {}
// configurable by simulator concrete implementation
virtual void process_cc_result(const cc_result_t& cc_out) {}
protected:
void new_slot_(slot_point slot_tx);
void generate_cc_result_(uint32_t cc);
sim_nr_enb_ctxt_t get_enb_ctxt() const;
int set_default_slot_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_slot_events& pending_events);
int apply_slot_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_slot_events& events);
/// Runs general tests to verify result consistency, and updates UE state
void process_results();
std::string test_name;
srslog::basic_logger& logger;
srslog::basic_logger& mac_logger;
std::unique_ptr<sched_nr> sched_ptr;
std::vector<sched_nr_impl::cell_params_t> cell_params;
uint32_t nof_cc_remaining = 0;
slot_point current_slot_tx;
std::vector<cc_result_t> cc_results;
std::map<uint16_t, sched_nr_ue_sim> ue_db;
};
} // namespace detail
class sched_nr_sim_parallel : public detail::sched_nr_sim_base
{
using base_t = detail::sched_nr_sim_base;
public:
using base_t::base_t;
~sched_nr_sim_parallel();
void stop();
int add_user(uint16_t rnti, const sched_nr_interface::ue_cfg_t& ue_cfg_, slot_point tti_rx, uint32_t preamble_idx)
{
std::lock_guard<std::mutex> lock(mutex);
return base_t::add_user(rnti, ue_cfg_, tti_rx, preamble_idx);
}
slot_point get_slot_tx() const
{
std::lock_guard<std::mutex> lock(mutex);
return current_slot_tx;
}
void new_slot(slot_point slot_tx);
void update(sched_nr_cc_output_res_t& cc_out);
void generate_cc_result(uint32_t cc);
private:
mutable std::mutex mutex;
std::condition_variable cvar;
};
class sched_nr_sim : public detail::sched_nr_sim_base
{
using base_t = detail::sched_nr_sim_base;
public:
using sched_nr_sim_base::sched_nr_sim_base;
void new_slot(slot_point slot_tx);
void generate_cc_result(uint32_t cc);
sched_nr_ue_sim& at(uint16_t rnti) { return ue_db.at(rnti); }
const sched_nr_ue_sim& at(uint16_t rnti) const { return ue_db.at(rnti); }
@ -123,45 +203,18 @@ public:
auto it = ue_db.find(rnti);
return it != ue_db.end() ? &it->second : nullptr;
}
bool user_exists(uint16_t rnti) const { return ue_db.count(rnti) > 0; }
const sched_nr_interface::ue_cfg_t* get_user_cfg(uint16_t rnti) const
{
const sched_nr_ue_sim* ret = find_rnti(rnti);
return ret == nullptr ? nullptr : &ret->get_ctxt().ue_cfg;
}
sched_nr* get_sched() { return sched_ptr.get(); }
srsran::const_span<sched_nr_impl::cell_params_t> get_cell_params() { return cell_params; }
slot_point get_slot_rx() const
{
std::lock_guard<std::mutex> lock(mutex);
return current_slot_tx;
}
sim_nr_enb_ctxt_t get_enb_ctxt() const;
bool user_exists(uint16_t rnti) const { return ue_db.count(rnti) > 0; }
sched_nr* get_sched() { return sched_ptr.get(); }
std::map<uint16_t, sched_nr_ue_sim>::iterator begin() { return ue_db.begin(); }
std::map<uint16_t, sched_nr_ue_sim>::iterator end() { return ue_db.end(); }
// configurable by simulator concrete implementation
virtual void set_external_slot_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_slot_events& pending_events) {}
private:
int set_default_slot_events(const sim_nr_ue_ctxt_t& ue_ctxt, ue_nr_slot_events& pending_events);
int apply_slot_events(sim_nr_ue_ctxt_t& ue_ctxt, const ue_nr_slot_events& events);
std::string test_name;
srslog::basic_logger& logger;
srslog::basic_logger& mac_logger;
std::unique_ptr<sched_nr> sched_ptr;
std::vector<sched_nr_impl::cell_params_t> cell_params;
slot_point current_slot_tx;
int cc_finished = 0;
std::map<uint16_t, sched_nr_ue_sim> ue_db;
mutable std::mutex mutex;
std::condition_variable cvar;
slot_point get_slot_tx() const { return current_slot_tx; }
};
} // namespace srsenb

View File

@ -24,155 +24,114 @@ using dl_sched_t = sched_nr_interface::dl_sched_t;
static const srsran::phy_cfg_nr_t default_phy_cfg =
srsran::phy_cfg_nr_default_t{srsran::phy_cfg_nr_default_t::reference_cfg_t{}};
struct task_job_manager {
std::mutex mutex;
int res_count = 0;
int pdsch_count = 0;
srslog::basic_logger& test_logger = srslog::fetch_basic_logger("TEST");
struct slot_guard {
int count = 0;
std::condition_variable cvar;
};
srsran::bounded_vector<slot_guard, 10> slot_counter{};
class sched_nr_tester : public sched_nr_sim_parallel
{
public:
using sched_nr_sim_parallel::sched_nr_sim_parallel;
explicit task_job_manager(int max_concurrent_slots = 4) : slot_counter(max_concurrent_slots) {}
void process_cc_result(const cc_result_t& cc_result) override
{
tot_latency_sched_ns += cc_result.sched_latency_ns.count();
result_count++;
pdsch_count += cc_result.dl_res.dl_sched.pdcch_dl.size();
void start_slot(slot_point slot, int nof_sectors)
{
std::unique_lock<std::mutex> lock(mutex);
auto& sl = slot_counter[slot.to_uint() % slot_counter.size()];
while (sl.count > 0) {
sl.cvar.wait(lock);
}
sl.count = nof_sectors;
}
void finish_cc(slot_point slot,
const sched_nr_interface::dl_sched_res_t& dl_res,
const sched_nr_interface::ul_sched_t& ul_res)
{
std::unique_lock<std::mutex> lock(mutex);
TESTASSERT(dl_res.dl_sched.pdcch_dl.size() <= 1);
res_count++;
pdsch_count += dl_res.dl_sched.pdcch_dl.size();
auto& sl = slot_counter[slot.to_uint() % slot_counter.size()];
if (--sl.count == 0) {
sl.cvar.notify_one();
}
}
void wait_task_finish()
{
std::unique_lock<std::mutex> lock(mutex);
for (auto& sl : slot_counter) {
while (sl.count > 0) {
sl.cvar.wait(lock);
}
sl.count = 1;
TESTASSERT(cc_result.dl_res.dl_sched.pdcch_dl.size() <= 1);
if (srsran_duplex_nr_is_dl(&cell_params[cc_result.cc].cfg.duplex, 0, current_slot_tx.slot_idx())) {
TESTASSERT(cc_result.dl_res.dl_sched.pdcch_dl.size() == 1 or not cc_result.dl_res.dl_sched.ssb.empty());
}
}
void print_results() const
{
test_logger.info("TESTER: %f PDSCH/{slot,cc} were allocated", pdsch_count / (double)res_count);
test_logger.info("TESTER: %f PDSCH/{slot,cc} were allocated", pdsch_count / (double)result_count);
srslog::flush();
}
srslog::basic_logger& test_logger = srslog::fetch_basic_logger("TEST");
uint64_t tot_latency_sched_ns = 0;
uint32_t result_count = 0;
uint32_t pdsch_count = 0;
};
void sched_nr_cfg_serialized_test()
{
uint32_t max_nof_ttis = 1000, nof_sectors = 4;
task_job_manager tasks;
uint32_t max_nof_ttis = 1000, nof_sectors = 4;
uint16_t rnti = 0x4601;
sched_nr_interface::sched_args_t cfg;
cfg.auto_refill_buffer = true;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg = get_default_cells_cfg(nof_sectors);
sched_nr_sim_base sched_tester(cfg, cells_cfg, "Serialized Test");
sched_nr_tester tester(cfg, cells_cfg, "Serialized Test");
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(nof_sectors);
sched_tester.add_user(0x46, uecfg, slot_point{0, 0}, 0);
tester.add_user(rnti, uecfg, slot_point{0, 0}, 0);
std::vector<long> count_per_cc(nof_sectors, 0);
for (uint32_t nof_slots = 0; nof_slots < max_nof_ttis; ++nof_slots) {
slot_point slot_rx(0, nof_slots % 10240);
slot_point slot_tx = slot_rx + TX_ENB_DELAY;
tasks.start_slot(slot_rx, nof_sectors);
sched_tester.new_slot(slot_tx);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
sched_nr_interface::dl_sched_res_t dl_res;
sched_nr_interface::ul_sched_t ul_res;
auto tp1 = std::chrono::steady_clock::now();
TESTASSERT(sched_tester.get_sched()->run_slot(slot_tx, cc, dl_res) == SRSRAN_SUCCESS);
TESTASSERT(sched_tester.get_sched()->get_ul_sched(slot_tx, cc, ul_res) == SRSRAN_SUCCESS);
auto tp2 = std::chrono::steady_clock::now();
count_per_cc[cc] += std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - tp1).count();
sched_nr_cc_output_res_t out{slot_tx, cc, &dl_res, &ul_res};
sched_tester.update(out);
tasks.finish_cc(slot_rx, dl_res, ul_res);
TESTASSERT(not srsran_duplex_nr_is_dl(&cells_cfg[cc].duplex, 0, (slot_tx).slot_idx()) or
(dl_res.dl_sched.pdcch_dl.size() == 1 or not dl_res.dl_sched.ssb.empty()));
tester.new_slot(slot_tx);
for (uint32_t cc = 0; cc != cells_cfg.size(); ++cc) {
tester.generate_cc_result(cc);
}
}
tasks.print_results();
tester.stop();
tester.print_results();
// TESTASSERT(tasks.pdsch_count == (int)(max_nof_ttis * nof_sectors * 0.6));
double final_avg_usec = 0;
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
final_avg_usec += count_per_cc[cc];
}
final_avg_usec = final_avg_usec / 1000.0 / max_nof_ttis;
double final_avg_usec = tester.tot_latency_sched_ns;
final_avg_usec = final_avg_usec / 1000.0 / max_nof_ttis;
printf("Total time taken per slot: %f usec\n", final_avg_usec);
}
void sched_nr_cfg_parallel_cc_test()
{
uint32_t nof_sectors = 4;
uint32_t max_nof_ttis = 1000;
task_job_manager tasks;
uint32_t nof_sectors = 4;
uint32_t max_nof_ttis = 1000;
uint16_t rnti = 0x4601;
// Initiate CC Workers
std::vector<std::unique_ptr<srsran::task_worker> > cc_workers;
cc_workers.reserve(nof_sectors - 1);
for (uint32_t i = 0; i < nof_sectors - 1; ++i) {
fmt::memory_buffer fmtbuf;
fmt::format_to(fmtbuf, "worker{}", i);
cc_workers.emplace_back(new srsran::task_worker{to_string(fmtbuf), 10});
}
sched_nr_interface::sched_args_t cfg;
cfg.auto_refill_buffer = true;
std::vector<sched_nr_interface::cell_cfg_t> cells_cfg = get_default_cells_cfg(nof_sectors);
sched_nr_sim_base sched_tester(cfg, cells_cfg, "Parallel CC Test");
sched_nr_tester tester(cfg, cells_cfg, "Parallel CC Test");
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(cells_cfg.size());
sched_tester.add_user(0x46, uecfg, slot_point{0, 0}, 0);
tester.add_user(rnti, uecfg, slot_point{0, 0}, 0);
std::array<std::atomic<long>, SRSRAN_MAX_CARRIERS> nano_count{};
for (uint32_t nof_slots = 0; nof_slots < max_nof_ttis; ++nof_slots) {
slot_point slot_rx(0, nof_slots % 10240);
slot_point slot_tx = slot_rx + TX_ENB_DELAY;
tasks.start_slot(slot_tx, nof_sectors);
sched_tester.new_slot(slot_tx);
for (uint32_t cc = 0; cc < cells_cfg.size(); ++cc) {
srsran::get_background_workers().push_task([cc, slot_tx, &tasks, &sched_tester, &nano_count]() {
sched_nr_interface::dl_sched_res_t dl_res;
sched_nr_interface::ul_sched_t ul_res;
auto tp1 = std::chrono::steady_clock::now();
TESTASSERT(sched_tester.get_sched()->run_slot(slot_tx, cc, dl_res) == SRSRAN_SUCCESS);
TESTASSERT(sched_tester.get_sched()->get_ul_sched(slot_tx, cc, ul_res) == SRSRAN_SUCCESS);
auto tp2 = std::chrono::steady_clock::now();
nano_count[cc].fetch_add(std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - tp1).count(),
std::memory_order_relaxed);
sched_nr_cc_output_res_t out{slot_tx, cc, &dl_res, &ul_res};
sched_tester.update(out);
tasks.finish_cc(slot_tx, dl_res, ul_res);
});
tester.new_slot(slot_tx);
// Run scheduler in parallel for {0, cc-2} and in the same thread for last cc
for (uint32_t cc = 0; cc != cells_cfg.size() - 1; ++cc) {
cc_workers[cc]->push_task([cc, &tester]() { tester.generate_cc_result(cc); });
}
tester.generate_cc_result(cells_cfg.size() - 1);
}
tasks.wait_task_finish();
// Wait for all jobs to finish
tester.stop();
tester.print_results();
tasks.print_results();
// TESTASSERT(tasks.pdsch_count == (int)(max_nof_ttis * nof_sectors * 0.6));
double final_avg_usec = 0;
for (uint32_t i = 0; i < nof_sectors; ++i) {
final_avg_usec += nano_count[i];
}
final_avg_usec = final_avg_usec / 1000.0 / max_nof_ttis / nof_sectors;
double final_avg_usec = tester.tot_latency_sched_ns;
final_avg_usec = final_avg_usec / 1000.0 / max_nof_ttis / nof_sectors;
printf("Total time taken per slot [usec]: %f\n", final_avg_usec);
}
@ -181,17 +140,15 @@ void sched_nr_cfg_parallel_cc_test()
int main()
{
auto& test_logger = srslog::fetch_basic_logger("TEST");
test_logger.set_level(srslog::basic_levels::info);
auto& mac_logger = srslog::fetch_basic_logger("MAC");
mac_logger.set_level(srslog::basic_levels::info);
test_logger.set_level(srslog::basic_levels::error);
auto& mac_nr_logger = srslog::fetch_basic_logger("MAC-NR");
mac_nr_logger.set_level(srslog::basic_levels::error);
auto& pool_logger = srslog::fetch_basic_logger("POOL");
pool_logger.set_level(srslog::basic_levels::info);
pool_logger.set_level(srslog::basic_levels::debug);
// Start the log backend.
srslog::init();
srsran::get_background_workers().set_nof_workers(6);
srsenb::sched_nr_cfg_serialized_test();
srsenb::sched_nr_cfg_parallel_cc_test();
}

View File

@ -18,7 +18,7 @@ namespace srsenb {
using namespace srsenb::sched_nr_impl;
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_output_res_t& cc_out)
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_result_view& cc_out)
{
slot_point pdcch_slot = cc_out.slot;
const pdcch_dl_list_t& pdcchs = cc_out.dl_cc_result->dl_sched.pdcch_dl;

View File

@ -17,7 +17,7 @@
namespace srsenb {
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_output_res_t& cc_out);
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_result_view& cc_out);
}