moved pdcch sched to separate file

This commit is contained in:
Francisco 2021-02-15 13:54:10 +00:00 committed by Francisco Paisana
parent c1a1c92e1b
commit 6b3cf6c597
7 changed files with 413 additions and 370 deletions

View File

@ -91,13 +91,16 @@ struct rbg_interval : public srslte::interval<uint32_t> {
static rbg_interval rbgmask_to_rbgs(const rbgmask_t& mask);
};
//! Struct to express a {min,...,max} range of PRBs
/// Struct to express a {min,...,max} range of PRBs
struct prb_interval : public srslte::interval<uint32_t> {
using interval::interval;
static prb_interval rbgs_to_prbs(const rbg_interval& rbgs, uint32_t cell_nof_prb);
static prb_interval riv_to_prbs(uint32_t riv, uint32_t nof_prbs, int nof_vrbs = -1);
};
/// Type of Allocation stored in PDSCH/PUSCH
enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
} // namespace srsenb
#endif // SRSLTE_SCHED_COMMON_H

View File

@ -14,6 +14,7 @@
#define SRSLTE_SCHED_GRID_H
#include "lib/include/srslte/interfaces/sched_interface.h"
#include "sched_phy_ch/pdcch_sched.h"
#include "sched_ue.h"
#include "srslte/adt/bounded_bitset.h"
#include "srslte/common/log.h"
@ -23,10 +24,7 @@
namespace srsenb {
//! Type of Allocation
enum class alloc_type_t { DL_BC, DL_PCCH, DL_RAR, DL_DATA, UL_DATA };
//! Result of alloc attempt
/// Error code of alloc attempt
struct alloc_outcome_t {
enum result_enum {
SUCCESS,
@ -90,79 +88,7 @@ private:
std::array<sf_sched_result, TTIMOD_SZ> results;
};
//! Class responsible for managing a PDCCH CCE grid, namely cce allocs, and avoid collisions.
class pdcch_grid_t
{
public:
const static uint32_t MAX_CFI = 3;
struct alloc_t {
uint16_t rnti = 0;
srslte_dci_location_t dci_pos = {0, 0};
pdcch_mask_t current_mask; ///< this PDCCH alloc mask
pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree route)
};
using alloc_result_t = std::vector<const alloc_t*>;
pdcch_grid_t() : logger(srslog::fetch_basic_logger("MAC")) {}
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx_);
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr);
bool set_cfi(uint32_t cfi);
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; }
size_t nof_allocs() const { return dci_record_list.size(); }
size_t nof_alloc_combinations() const { return get_alloc_tree().nof_leaves(); }
std::string result_to_string(bool verbose = false) const;
private:
struct alloc_tree_t {
struct node_t {
int parent_idx;
alloc_t node;
node_t(int i, const alloc_t& a) : parent_idx(i), node(a) {}
};
// state
size_t nof_cces;
std::vector<node_t> dci_alloc_tree;
size_t prev_start = 0, prev_end = 0;
explicit alloc_tree_t(size_t nof_cces_) : nof_cces(nof_cces_) {}
size_t nof_leaves() const { return prev_end - prev_start; }
void reset();
};
struct alloc_record_t {
sched_ue* user;
uint32_t aggr_idx;
alloc_type_t alloc_type;
};
const alloc_tree_t& get_alloc_tree() const { return alloc_trees[current_cfix]; }
const sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const;
// PDCCH allocation algorithm
bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix);
static bool add_tree_node_leaves(alloc_tree_t& tree,
int node_idx,
const alloc_record_t& dci_record,
const sched_dci_cce_t& dci_locs,
tti_point tti_tx_dl);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
srslog::basic_logger& logger;
// tti vars
tti_point tti_rx;
uint32_t current_cfix = 0;
std::vector<alloc_tree_t> alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index
std::vector<alloc_record_t> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
};
//! manages a subframe grid resources, namely CCE and DL/UL RB allocations
/// manages a subframe grid resources, namely CCE and DL/UL RB allocations
class sf_grid_t
{
public:
@ -183,10 +109,10 @@ public:
bool find_ul_alloc(uint32_t L, prb_interval* alloc) const;
// getters
const rbgmask_t& get_dl_mask() const { return dl_mask; }
const prbmask_t& get_ul_mask() const { return ul_mask; }
uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); }
const pdcch_grid_t& get_pdcch_grid() const { return pdcch_alloc; }
const rbgmask_t& get_dl_mask() const { return dl_mask; }
const prbmask_t& get_ul_mask() const { return ul_mask; }
uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); }
const pdcch_sched& get_pdcch_grid() const { return pdcch_alloc; }
private:
alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = nullptr);
@ -198,7 +124,7 @@ private:
uint32_t si_n_rbg = 0, rar_n_rbg = 0;
// derived
pdcch_grid_t pdcch_alloc = {};
pdcch_sched pdcch_alloc = {};
// internal state
tti_point tti_rx;
@ -302,14 +228,14 @@ public:
private:
ctrl_code_t alloc_dl_ctrl(uint32_t aggr_lvl, uint32_t tbs_bytes, uint16_t rnti);
int generate_format1a(prb_interval prb_range, uint32_t tbs, uint32_t rv, uint16_t rnti, srslte_dci_dl_t* dci);
void set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list);
void set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list);
void set_bc_sched_result(const pdcch_sched::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_rar_sched_result(const pdcch_sched::alloc_result_t& dci_result, sched_interface::dl_sched_res_t* dl_result);
void set_dl_data_sched_result(const pdcch_sched::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list);
void set_ul_sched_result(const pdcch_sched::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list);
// consts
const sched_cell_params_t* cc_cfg = nullptr;

View File

@ -0,0 +1,96 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "../sched_common.h"
#ifndef SRSLTE_PDCCH_SCHED_H
#define SRSLTE_PDCCH_SCHED_H
namespace srsenb {
class sched_ue;
/// Class responsible for managing a PDCCH CCE grid, namely CCE allocs, and avoid collisions.
class pdcch_sched
{
public:
const static uint32_t MAX_CFI = 3;
struct alloc_t {
uint16_t rnti = 0;
srslte_dci_location_t dci_pos = {0, 0};
pdcch_mask_t current_mask; ///< this PDCCH alloc mask
pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree route)
};
using alloc_result_t = std::vector<const alloc_t*>;
pdcch_sched() : logger(srslog::fetch_basic_logger("MAC")) {}
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx_);
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr);
bool set_cfi(uint32_t cfi);
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; }
size_t nof_allocs() const { return dci_record_list.size(); }
size_t nof_alloc_combinations() const { return get_alloc_tree().nof_leaves(); }
std::string result_to_string(bool verbose = false) const;
private:
struct alloc_tree_t {
struct node_t {
int parent_idx;
alloc_t node;
node_t(int i, const alloc_t& a) : parent_idx(i), node(a) {}
};
// state
size_t nof_cces;
std::vector<node_t> dci_alloc_tree;
size_t prev_start = 0, prev_end = 0;
explicit alloc_tree_t(size_t nof_cces_) : nof_cces(nof_cces_) {}
size_t nof_leaves() const { return prev_end - prev_start; }
void reset();
};
struct alloc_record_t {
sched_ue* user;
uint32_t aggr_idx;
alloc_type_t alloc_type;
};
const alloc_tree_t& get_alloc_tree() const { return alloc_trees[current_cfix]; }
const sched_dci_cce_t* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const;
// PDCCH allocation algorithm
bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix);
static bool add_tree_node_leaves(alloc_tree_t& tree,
int node_idx,
const alloc_record_t& dci_record,
const sched_dci_cce_t& dci_locs,
tti_point tti_tx_dl);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
srslog::basic_logger& logger;
// tti vars
tti_point tti_rx;
uint32_t current_cfix = 0;
std::vector<alloc_tree_t> alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index
std::vector<alloc_record_t> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
};
} // namespace srsenb
#endif // SRSLTE_PDCCH_SCHED_H

View File

@ -9,7 +9,7 @@
add_subdirectory(schedulers)
set(SOURCES mac.cc ue.cc sched.cc sched_carrier.cc sched_grid.cc sched_ue_ctrl/sched_harq.cc sched_ue.cc
sched_ue_ctrl/sched_lch.cc sched_ue_ctrl/sched_ue_cell.cc sched_helpers.cc)
sched_ue_ctrl/sched_lch.cc sched_ue_ctrl/sched_ue_cell.cc sched_phy_ch/pdcch_sched.cc sched_helpers.cc)
add_library(srsenb_mac STATIC ${SOURCES} $<TARGET_OBJECTS:mac_schedulers>)
if(ENABLE_5GNR)

View File

@ -117,268 +117,6 @@ cc_sched_result* sched_result_list::get_cc(srslte::tti_point tti_rx, uint32_t en
* PDCCH Allocation Methods
*******************************************************/
void pdcch_grid_t::alloc_tree_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
}
void pdcch_grid_t::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
// init alloc trees
alloc_trees.reserve(cc_cfg->sched_cfg->max_nof_ctrl_symbols);
for (uint32_t i = 0; i < cc_cfg->sched_cfg->max_nof_ctrl_symbols; ++i) {
alloc_trees.emplace_back(cc_cfg->nof_cce_table[i]);
}
}
void pdcch_grid_t::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
// Reset back all CFIs
for (auto& t : alloc_trees) {
t.reset();
}
dci_record_list.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
}
const sched_dci_cce_t* pdcch_grid_t::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_PCCH:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_RAR:
return &cc_cfg->rar_locations[cfix][to_tx_dl(tti_rx).sf_idx()];
case alloc_type_t::DL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
case alloc_type_t::UL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
default:
break;
}
return nullptr;
}
bool pdcch_grid_t::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user)
{
// TODO: Make the alloc tree update lazy
alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type};
// Try to allocate user in PDCCH for given CFI. If it fails, increment CFI.
uint32_t first_cfi = get_cfi();
bool success;
do {
success = alloc_dci_record(record, get_cfi() - 1);
} while (not success and get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols and set_cfi(get_cfi() + 1));
if (not success) {
// DCI allocation failed. go back to original CFI
if (get_cfi() != first_cfi and not set_cfi(first_cfi)) {
logger.error("SCHED: Failed to return back to original PDCCH state");
}
return false;
}
// DCI record allocation successful
dci_record_list.push_back(record);
return true;
}
bool pdcch_grid_t::alloc_dci_record(const alloc_record_t& record, uint32_t cfix)
{
bool ret = false;
auto& tree = alloc_trees[cfix];
// Get DCI Location Table
const sched_dci_cce_t* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix);
if (dci_locs == nullptr or dci_locs->nof_loc[record.aggr_idx] == 0) {
return ret;
}
if (tree.prev_end > 0) {
for (size_t j = tree.prev_start; j < tree.prev_end; ++j) {
ret |= add_tree_node_leaves(tree, (int)j, record, *dci_locs, to_tx_dl(tti_rx));
}
} else {
ret = add_tree_node_leaves(tree, -1, record, *dci_locs, to_tx_dl(tti_rx));
}
if (ret) {
tree.prev_start = tree.prev_end;
tree.prev_end = tree.dci_alloc_tree.size();
}
return ret;
}
//! Algorithm to compute a valid PDCCH allocation
bool pdcch_grid_t::add_tree_node_leaves(alloc_tree_t& tree,
int parent_node_idx,
const alloc_record_t& dci_record,
const sched_dci_cce_t& dci_locs,
tti_point tti_tx_dl)
{
bool ret = false;
alloc_t alloc;
alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : (uint16_t)0u;
alloc.dci_pos.L = dci_record.aggr_idx;
// get cumulative pdcch mask
pdcch_mask_t cum_mask;
if (parent_node_idx >= 0) {
cum_mask = tree.dci_alloc_tree[parent_node_idx].node.total_mask;
} else {
cum_mask.resize(tree.nof_cces);
}
uint32_t nof_locs = dci_locs.nof_loc[dci_record.aggr_idx];
for (uint32_t i = 0; i < nof_locs; ++i) {
uint32_t startpos = dci_locs.cce_start[dci_record.aggr_idx][i];
if (dci_record.alloc_type == alloc_type_t::DL_DATA and dci_record.user->pucch_sr_collision(tti_tx_dl, startpos)) {
// will cause a collision in the PUCCH
continue;
}
pdcch_mask_t alloc_mask(tree.nof_cces);
alloc_mask.fill(startpos, startpos + (1u << dci_record.aggr_idx));
if ((cum_mask & alloc_mask).any()) {
// there is collision. Try another mask
continue;
}
// Allocation successful
alloc.current_mask = alloc_mask;
alloc.total_mask = cum_mask | alloc_mask;
alloc.dci_pos.ncce = startpos;
// Prune if repetition
uint32_t j = tree.prev_end;
for (; j < tree.dci_alloc_tree.size(); ++j) {
if (tree.dci_alloc_tree[j].node.total_mask == alloc.total_mask) {
break;
}
}
if (j < tree.dci_alloc_tree.size()) {
continue;
}
// Register allocation
tree.dci_alloc_tree.emplace_back(parent_node_idx, alloc);
ret = true;
}
return ret;
}
bool pdcch_grid_t::set_cfi(uint32_t cfi)
{
if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) {
logger.error("Invalid CFI value. Defaulting to current CFI.");
return false;
}
uint32_t new_cfix = cfi - 1;
if (new_cfix == current_cfix) {
return true;
}
// setup new PDCCH alloc tree
auto& new_tree = alloc_trees[new_cfix];
new_tree.reset();
if (not dci_record_list.empty()) {
// there are already PDCCH allocations
// Rebuild Allocation Tree
bool ret = true;
for (const auto& old_record : dci_record_list) {
ret &= alloc_dci_record(old_record, new_cfix);
}
if (not ret) {
// Fail to rebuild allocation tree. Go back to previous CFI
return false;
}
}
current_cfix = new_cfix;
// TODO: The estimation of the number of required prbs in metric depends on CFI. Analyse the consequences
return true;
}
void pdcch_grid_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
auto& tree = alloc_trees[current_cfix];
// if alloc tree is empty
if (tree.prev_start == tree.prev_end) {
if (vec != nullptr) {
vec->clear();
}
if (tot_mask != nullptr) {
tot_mask->resize(nof_cces());
tot_mask->reset();
}
return;
}
// set vector of allocations
if (vec != nullptr) {
vec->clear();
size_t i = tree.prev_start + idx;
while (tree.dci_alloc_tree[i].parent_idx >= 0) {
vec->push_back(&tree.dci_alloc_tree[i].node);
i = (size_t)tree.dci_alloc_tree[i].parent_idx;
}
vec->push_back(&tree.dci_alloc_tree[i].node);
std::reverse(vec->begin(), vec->end());
}
// set final cce mask
if (tot_mask != nullptr) {
*tot_mask = tree.dci_alloc_tree[tree.prev_start + idx].node.total_mask;
}
}
std::string pdcch_grid_t::result_to_string(bool verbose) const
{
auto& tree = alloc_trees[current_cfix];
std::stringstream ss;
ss << "cfi=" << get_cfi() << ", mask_size=" << nof_cces() << ", " << tree.prev_end - tree.prev_start
<< " DCI allocation combinations:\n";
// get all the possible combinations of DCI allocations
uint32_t count = 0;
for (size_t i = tree.prev_start; i < tree.prev_end; ++i) {
alloc_result_t vec;
pdcch_mask_t tot_mask;
get_allocs(&vec, &tot_mask, i - tree.prev_start);
ss << " combination " << count << ": mask=0x" << tot_mask.to_hex().c_str();
if (verbose) {
ss << ", DCI allocs:\n";
for (const auto& dci_alloc : vec) {
char hex[5];
sprintf(hex, "%x", dci_alloc->rnti);
ss << " > rnti=0x" << hex << ": " << dci_alloc->current_mask.to_hex().c_str() << " / "
<< dci_alloc->total_mask.to_hex().c_str() << "\n";
}
} else {
ss << "\n";
}
count++;
}
return ss.str();
}
/*******************************************************
* TTI resource Scheduling Methods
*******************************************************/
@ -920,8 +658,8 @@ bool sf_sched::alloc_phich(sched_ue* user, sched_interface::ul_sched_res_t* ul_s
return false;
}
void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result)
void sf_sched::set_bc_sched_result(const pdcch_sched::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result)
{
for (const auto& bc_alloc : bc_allocs) {
sched_interface::dl_sched_bc_t* bc = &dl_result->bc[dl_result->nof_bc_elems];
@ -987,8 +725,8 @@ void sf_sched::set_bc_sched_result(const pdcch_grid_t::alloc_result_t& dci_resul
}
}
void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result)
void sf_sched::set_rar_sched_result(const pdcch_sched::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result)
{
for (const auto& rar_alloc : rar_allocs) {
sched_interface::dl_sched_rar_t* rar = &dl_result->rar[dl_result->nof_rar_elems];
@ -1031,9 +769,9 @@ void sf_sched::set_rar_sched_result(const pdcch_grid_t::alloc_result_t& dci_resu
}
}
void sf_sched::set_dl_data_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list)
void sf_sched::set_dl_data_sched_result(const pdcch_sched::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list)
{
for (const auto& data_alloc : data_allocs) {
sched_interface::dl_sched_data_t* data = &dl_result->data[dl_result->nof_data_elems];
@ -1169,9 +907,9 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched,
}
}
void sf_sched::set_ul_sched_result(const pdcch_grid_t::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list)
void sf_sched::set_ul_sched_result(const pdcch_sched::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list)
{
/* Set UL data DCI locs and format */
for (const auto& ul_alloc : ul_data_allocs) {
@ -1274,7 +1012,7 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db)
}
/* Pick one of the possible DCI masks */
pdcch_grid_t::alloc_result_t dci_result;
pdcch_sched::alloc_result_t dci_result;
// tti_alloc.get_pdcch_grid().result_to_string();
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &cc_result->pdcch_mask);

View File

@ -0,0 +1,280 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsenb/hdr/stack/mac/sched_phy_ch/pdcch_sched.h"
#include "srsenb/hdr/stack/mac/sched_grid.h"
namespace srsenb {
void pdcch_sched::alloc_tree_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
}
void pdcch_sched::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
// init alloc trees
alloc_trees.reserve(cc_cfg->sched_cfg->max_nof_ctrl_symbols);
for (uint32_t i = 0; i < cc_cfg->sched_cfg->max_nof_ctrl_symbols; ++i) {
alloc_trees.emplace_back(cc_cfg->nof_cce_table[i]);
}
}
void pdcch_sched::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
// Reset back all CFIs
for (auto& t : alloc_trees) {
t.reset();
}
dci_record_list.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
}
const sched_dci_cce_t* pdcch_sched::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_PCCH:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_RAR:
return &cc_cfg->rar_locations[cfix][to_tx_dl(tti_rx).sf_idx()];
case alloc_type_t::DL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
case alloc_type_t::UL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
default:
break;
}
return nullptr;
}
bool pdcch_sched::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user)
{
// TODO: Make the alloc tree update lazy
alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type};
// Try to allocate user in PDCCH for given CFI. If it fails, increment CFI.
uint32_t first_cfi = get_cfi();
bool success;
do {
success = alloc_dci_record(record, get_cfi() - 1);
} while (not success and get_cfi() < cc_cfg->sched_cfg->max_nof_ctrl_symbols and set_cfi(get_cfi() + 1));
if (not success) {
// DCI allocation failed. go back to original CFI
if (get_cfi() != first_cfi and not set_cfi(first_cfi)) {
logger.error("SCHED: Failed to return back to original PDCCH state");
}
return false;
}
// DCI record allocation successful
dci_record_list.push_back(record);
return true;
}
bool pdcch_sched::alloc_dci_record(const alloc_record_t& record, uint32_t cfix)
{
bool ret = false;
auto& tree = alloc_trees[cfix];
// Get DCI Location Table
const sched_dci_cce_t* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix);
if (dci_locs == nullptr or dci_locs->nof_loc[record.aggr_idx] == 0) {
return ret;
}
if (tree.prev_end > 0) {
for (size_t j = tree.prev_start; j < tree.prev_end; ++j) {
ret |= add_tree_node_leaves(tree, (int)j, record, *dci_locs, to_tx_dl(tti_rx));
}
} else {
ret = add_tree_node_leaves(tree, -1, record, *dci_locs, to_tx_dl(tti_rx));
}
if (ret) {
tree.prev_start = tree.prev_end;
tree.prev_end = tree.dci_alloc_tree.size();
}
return ret;
}
//! Algorithm to compute a valid PDCCH allocation
bool pdcch_sched::add_tree_node_leaves(alloc_tree_t& tree,
int parent_node_idx,
const alloc_record_t& dci_record,
const sched_dci_cce_t& dci_locs,
tti_point tti_tx_dl)
{
bool ret = false;
alloc_t alloc;
alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : (uint16_t)0u;
alloc.dci_pos.L = dci_record.aggr_idx;
// get cumulative pdcch mask
pdcch_mask_t cum_mask;
if (parent_node_idx >= 0) {
cum_mask = tree.dci_alloc_tree[parent_node_idx].node.total_mask;
} else {
cum_mask.resize(tree.nof_cces);
}
uint32_t nof_locs = dci_locs.nof_loc[dci_record.aggr_idx];
for (uint32_t i = 0; i < nof_locs; ++i) {
uint32_t startpos = dci_locs.cce_start[dci_record.aggr_idx][i];
if (dci_record.alloc_type == alloc_type_t::DL_DATA and dci_record.user->pucch_sr_collision(tti_tx_dl, startpos)) {
// will cause a collision in the PUCCH
continue;
}
pdcch_mask_t alloc_mask(tree.nof_cces);
alloc_mask.fill(startpos, startpos + (1u << dci_record.aggr_idx));
if ((cum_mask & alloc_mask).any()) {
// there is collision. Try another mask
continue;
}
// Allocation successful
alloc.current_mask = alloc_mask;
alloc.total_mask = cum_mask | alloc_mask;
alloc.dci_pos.ncce = startpos;
// Prune if repetition
uint32_t j = tree.prev_end;
for (; j < tree.dci_alloc_tree.size(); ++j) {
if (tree.dci_alloc_tree[j].node.total_mask == alloc.total_mask) {
break;
}
}
if (j < tree.dci_alloc_tree.size()) {
continue;
}
// Register allocation
tree.dci_alloc_tree.emplace_back(parent_node_idx, alloc);
ret = true;
}
return ret;
}
bool pdcch_sched::set_cfi(uint32_t cfi)
{
if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) {
logger.error("Invalid CFI value. Defaulting to current CFI.");
return false;
}
uint32_t new_cfix = cfi - 1;
if (new_cfix == current_cfix) {
return true;
}
// setup new PDCCH alloc tree
auto& new_tree = alloc_trees[new_cfix];
new_tree.reset();
if (not dci_record_list.empty()) {
// there are already PDCCH allocations
// Rebuild Allocation Tree
bool ret = true;
for (const auto& old_record : dci_record_list) {
ret &= alloc_dci_record(old_record, new_cfix);
}
if (not ret) {
// Fail to rebuild allocation tree. Go back to previous CFI
return false;
}
}
current_cfix = new_cfix;
// TODO: The estimation of the number of required prbs in metric depends on CFI. Analyse the consequences
return true;
}
void pdcch_sched::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
auto& tree = alloc_trees[current_cfix];
// if alloc tree is empty
if (tree.prev_start == tree.prev_end) {
if (vec != nullptr) {
vec->clear();
}
if (tot_mask != nullptr) {
tot_mask->resize(nof_cces());
tot_mask->reset();
}
return;
}
// set vector of allocations
if (vec != nullptr) {
vec->clear();
size_t i = tree.prev_start + idx;
while (tree.dci_alloc_tree[i].parent_idx >= 0) {
vec->push_back(&tree.dci_alloc_tree[i].node);
i = (size_t)tree.dci_alloc_tree[i].parent_idx;
}
vec->push_back(&tree.dci_alloc_tree[i].node);
std::reverse(vec->begin(), vec->end());
}
// set final cce mask
if (tot_mask != nullptr) {
*tot_mask = tree.dci_alloc_tree[tree.prev_start + idx].node.total_mask;
}
}
std::string pdcch_sched::result_to_string(bool verbose) const
{
auto& tree = alloc_trees[current_cfix];
std::stringstream ss;
ss << "cfi=" << get_cfi() << ", mask_size=" << nof_cces() << ", " << tree.prev_end - tree.prev_start
<< " DCI allocation combinations:\n";
// get all the possible combinations of DCI allocations
uint32_t count = 0;
for (size_t i = tree.prev_start; i < tree.prev_end; ++i) {
alloc_result_t vec;
pdcch_mask_t tot_mask;
get_allocs(&vec, &tot_mask, i - tree.prev_start);
ss << " combination " << count << ": mask=0x" << tot_mask.to_hex().c_str();
if (verbose) {
ss << ", DCI allocs:\n";
for (const auto& dci_alloc : vec) {
char hex[5];
sprintf(hex, "%x", dci_alloc->rnti);
ss << " > rnti=0x" << hex << ": " << dci_alloc->current_mask.to_hex().c_str() << " / "
<< dci_alloc->total_mask.to_hex().c_str() << "\n";
}
} else {
ss << "\n";
}
count++;
}
return ss.str();
}
} // namespace srsenb

View File

@ -45,8 +45,8 @@ int test_pdcch_one_ue()
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[ENB_CC_IDX].set_cfg(ENB_CC_IDX, cell_cfg, sched_args));
pdcch_grid_t pdcch;
sched_ue sched_ue{};
pdcch_sched pdcch;
sched_ue sched_ue{};
sched_ue.init(rnti, cell_params);
sched_ue.set_cfg(ue_cfg);
@ -66,7 +66,7 @@ int test_pdcch_one_ue()
sched_ue.set_dl_cqi(to_tx_dl(tti_rx), ENB_CC_IDX, dl_cqi);
uint32_t aggr_idx = get_aggr_level(sched_ue, PCell_IDX, cell_params);
uint32_t max_nof_cce_locs =
sched_ue.get_locations(ENB_CC_IDX, pdcch_grid_t::MAX_CFI, to_tx_dl(tti_rx).sf_idx())->nof_loc[aggr_idx];
sched_ue.get_locations(ENB_CC_IDX, pdcch_sched::MAX_CFI, to_tx_dl(tti_rx).sf_idx())->nof_loc[aggr_idx];
// allocate DL user
uint32_t prev_cfi = pdcch.get_cfi();
@ -88,8 +88,8 @@ int test_pdcch_one_ue()
const uint32_t* dci_locs = dci_cce->cce_start[aggr_idx];
// TEST: Check the first alloc of the pdcch result (e.g. rnti, valid cce mask, etc.)
pdcch_grid_t::alloc_result_t pdcch_result;
pdcch_mask_t pdcch_mask;
pdcch_sched::alloc_result_t pdcch_result;
pdcch_mask_t pdcch_mask;
pdcch.get_allocs(&pdcch_result, &pdcch_mask, 0);
TESTASSERT(pdcch_result.size() == 1);
TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti());