eliminate old BFS-based PDCCH allocator. Improvement the scheduler speed by ~2x

This commit is contained in:
Francisco 2021-03-18 22:35:27 +00:00 committed by Francisco Paisana
parent 6bec92fbc9
commit 2054ad3f3c
5 changed files with 60 additions and 507 deletions

View File

@ -106,11 +106,11 @@ public:
bool find_ul_alloc(uint32_t L, prb_interval* alloc) const;
// getters
const rbgmask_t& get_dl_mask() const { return dl_mask; }
const prbmask_t& get_ul_mask() const { return ul_mask; }
uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); }
const sf_cch_allocator2& get_pdcch_grid() const { return pdcch_alloc; }
uint32_t get_pucch_width() const { return pucch_nrb; }
const rbgmask_t& get_dl_mask() const { return dl_mask; }
const prbmask_t& get_ul_mask() const { return ul_mask; }
uint32_t get_cfi() const { return pdcch_alloc.get_cfi(); }
const sf_cch_allocator& get_pdcch_grid() const { return pdcch_alloc; }
uint32_t get_pucch_width() const { return pucch_nrb; }
private:
alloc_result alloc_dl(uint32_t aggr_lvl,
@ -127,7 +127,7 @@ private:
prbmask_t pucch_mask;
// derived
sf_cch_allocator2 pdcch_alloc = {};
sf_cch_allocator pdcch_alloc = {};
// internal state
tti_point tti_rx;
@ -219,12 +219,12 @@ public:
const sched_cell_params_t* get_cc_cfg() const { return cc_cfg; }
private:
void set_dl_data_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list);
void set_ul_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list);
void set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list);
void set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list);
// consts
const sched_cell_params_t* cc_cfg = nullptr;

View File

@ -20,7 +20,8 @@ namespace srsenb {
class sched_ue;
class sf_cch_allocator2
/// Class responsible for managing a PDCCH CCE grid, namely CCE allocs, and avoid collisions.
class sf_cch_allocator
{
public:
const static uint32_t MAX_CFI = 3;
@ -34,18 +35,9 @@ public:
pdcch_mask_t total_mask, current_mask;
prbmask_t total_pucch_mask;
};
// struct alloc_t {
// int8_t pucch_n_prb; ///< this PUCCH resource identifier
// uint16_t rnti = 0;
// srslte_dci_location_t dci_pos = {0, 0};
// pdcch_mask_t current_mask; ///< this allocation PDCCH mask
// pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree
// route) prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current
// solution/tree route
// };
using alloc_result_t = srslte::bounded_vector<const tree_node*, 16>;
sf_cch_allocator2() : logger(srslog::fetch_basic_logger("MAC")) {}
sf_cch_allocator() : logger(srslog::fetch_basic_logger("MAC")) {}
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx_);
@ -95,101 +87,6 @@ private:
std::vector<alloc_record> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
};
/// Class responsible for managing a PDCCH CCE grid, namely CCE allocs, and avoid collisions.
class sf_cch_allocator
{
public:
const static uint32_t MAX_CFI = 3;
struct alloc_t {
int8_t pucch_n_prb; ///< this PUCCH resource identifier
uint16_t rnti = 0;
srslte_dci_location_t dci_pos = {0, 0};
pdcch_mask_t current_mask; ///< this allocation PDCCH mask
pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree route)
prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current solution/tree route
};
using alloc_result_t = srslte::bounded_vector<const alloc_t*, 16>;
sf_cch_allocator() : logger(srslog::fetch_basic_logger("MAC")) {}
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx_);
/**
* Allocates DCI space in PDCCH and PUCCH, avoiding in the process collisions with other users
* @param alloc_type allocation type (e.g. DL data, UL data, ctrl)
* @param aggr_idx Aggregation level index (0..3)
* @param user UE object or null in case of broadcast/RAR/paging allocation
* @param has_pusch_grant If the UE has already an PUSCH grant for UCI allocated
* @return if the allocation was successful
*/
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false);
void rem_last_dci();
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; }
size_t nof_allocs() const { return dci_record_list.size(); }
size_t nof_alloc_combinations() const { return get_alloc_tree().nof_leaves(); }
std::string result_to_string(bool verbose = false) const;
private:
/// DCI allocation parameters
struct alloc_record_t {
sched_ue* user;
uint32_t aggr_idx;
alloc_type_t alloc_type;
bool pusch_uci;
};
/// Tree-based data structure to store possible DCI allocation decisions
struct alloc_tree_t {
struct node_t {
int parent_idx;
alloc_t node;
node_t(int i, const alloc_t& a) : parent_idx(i), node(a) {}
};
// args
size_t nof_cces;
const sched_cell_params_t* cc_cfg = nullptr;
srslte_pucch_cfg_t* pucch_cfg_temp = nullptr;
uint32_t cfi;
// state
std::vector<node_t> dci_alloc_tree;
size_t prev_start = 0, prev_end = 0;
explicit alloc_tree_t(uint32_t this_cfi, const sched_cell_params_t& cc_params, srslte_pucch_cfg_t& pucch_cfg);
size_t nof_leaves() const { return prev_end - prev_start; }
void reset();
void get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const;
bool add_tree_node_leaves(int node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_rx);
std::string result_to_string(bool verbose) const;
};
const alloc_tree_t& get_alloc_tree() const { return alloc_trees[current_cfix]; }
const cce_cfi_position_table* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const;
// PDCCH allocation algorithm
bool set_cfi(uint32_t cfi);
bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
srslog::basic_logger& logger;
srslte_pucch_cfg_t pucch_cfg_common = {};
// tti vars
tti_point tti_rx;
uint32_t current_cfix = 0;
uint32_t current_max_cfix = 0;
std::vector<alloc_tree_t> alloc_trees; ///< List of PDCCH alloc trees, where index is the cfi index
std::vector<alloc_record_t> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
};
// Helper methods
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch);

View File

@ -645,9 +645,9 @@ alloc_result sf_sched::alloc_phich(sched_ue* user)
return alloc_result::no_rnti_opportunity;
}
void sf_sched::set_dl_data_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list)
void sf_sched::set_dl_data_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::dl_sched_res_t* dl_result,
sched_ue_list& ue_list)
{
for (const auto& data_alloc : data_allocs) {
dl_result->data.emplace_back();
@ -788,9 +788,9 @@ uci_pusch_t is_uci_included(const sf_sched* sf_sched,
}
}
void sf_sched::set_ul_sched_result(const sf_cch_allocator2::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list)
void sf_sched::set_ul_sched_result(const sf_cch_allocator::alloc_result_t& dci_result,
sched_interface::ul_sched_res_t* ul_result,
sched_ue_list& ue_list)
{
/* Set UL data DCI locs and format */
for (const auto& ul_alloc : ul_data_allocs) {
@ -902,7 +902,7 @@ void sf_sched::generate_sched_results(sched_ue_list& ue_db)
}
/* Pick one of the possible DCI masks */
sf_cch_allocator2::alloc_result_t dci_result;
sf_cch_allocator::alloc_result_t dci_result;
// tti_alloc.get_pdcch_grid().result_to_string();
tti_alloc.get_pdcch_grid().get_allocs(&dci_result, &cc_result->pdcch_mask);

View File

@ -24,13 +24,13 @@ bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti
return false;
}
void sf_cch_allocator2::init(const sched_cell_params_t& cell_params_)
void sf_cch_allocator::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
pucch_cfg_common = cc_cfg->pucch_cfg_common;
}
void sf_cch_allocator2::new_tti(tti_point tti_rx_)
void sf_cch_allocator::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
@ -41,17 +41,15 @@ void sf_cch_allocator2::new_tti(tti_point tti_rx_)
}
const cce_cfi_position_table*
sf_cch_allocator2::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
sf_cch_allocator::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_PCCH:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_RAR:
return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix];
case alloc_type_t::DL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
case alloc_type_t::UL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
default:
@ -60,7 +58,7 @@ sf_cch_allocator2::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, ui
return nullptr;
}
bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant)
bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant)
{
temp_dci_dfs.clear();
uint32_t start_cfix = current_cfix;
@ -71,6 +69,22 @@ bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sc
record.alloc_type = alloc_type;
record.pusch_uci = has_pusch_grant;
if (is_dl_ctrl_alloc(alloc_type) and nof_allocs() == 0 and cc_cfg->nof_prb() == 6 and
current_max_cfix > current_cfix) {
// Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc and a low number of PRBs,
// start with an CFI that maximizes nof potential CCE locs
uint32_t nof_locs = 0, lowest_cfix = current_cfix;
for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > lowest_cfix; --cfix_tmp) {
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp);
if ((*dci_locs)[record.aggr_idx].size() > nof_locs) {
nof_locs = (*dci_locs)[record.aggr_idx].size();
current_cfix = cfix_tmp;
} else {
break;
}
}
}
// Try to allocate grant. If it fails, attempt the same grant, but using a different permutation of past grant DCI
// positions
do {
@ -96,7 +110,7 @@ bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sc
return false;
}
bool sf_cch_allocator2::get_next_dfs()
bool sf_cch_allocator::get_next_dfs()
{
do {
uint32_t start_child_idx = 0;
@ -121,7 +135,7 @@ bool sf_cch_allocator2::get_next_dfs()
return true;
}
bool sf_cch_allocator2::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx)
bool sf_cch_allocator::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx)
{
// Get DCI Location Table
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, current_cfix);
@ -185,7 +199,7 @@ bool sf_cch_allocator2::alloc_dfs_node(const alloc_record& record, uint32_t star
return false;
}
void sf_cch_allocator2::rem_last_dci()
void sf_cch_allocator::rem_last_dci()
{
assert(not dci_record_list.empty());
@ -194,7 +208,7 @@ void sf_cch_allocator2::rem_last_dci()
dci_record_list.pop_back();
}
void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
if (vec != nullptr) {
vec->clear();
@ -215,7 +229,7 @@ void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask,
}
}
std::string sf_cch_allocator2::result_to_string(bool verbose) const
std::string sf_cch_allocator::result_to_string(bool verbose) const
{
fmt::basic_memory_buffer<char, 1024> strbuf;
if (dci_record_list.empty()) {
@ -245,362 +259,4 @@ std::string sf_cch_allocator2::result_to_string(bool verbose) const
return fmt::to_string(strbuf);
}
/////////////////////////
void sf_cch_allocator::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
pucch_cfg_common = cc_cfg->pucch_cfg_common;
// init alloc trees
alloc_trees.reserve(cc_cfg->sched_cfg->max_nof_ctrl_symbols);
for (uint32_t i = 0; i < cc_cfg->sched_cfg->max_nof_ctrl_symbols; ++i) {
alloc_trees.emplace_back(i + 1, *cc_cfg, pucch_cfg_common);
}
}
void sf_cch_allocator::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
// Reset back all CFIs
for (auto& t : alloc_trees) {
t.reset();
}
dci_record_list.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1;
}
const cce_cfi_position_table*
sf_cch_allocator::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_PCCH:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_RAR:
return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix];
case alloc_type_t::DL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
case alloc_type_t::UL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
default:
break;
}
return nullptr;
}
bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant)
{
// TODO: Make the alloc tree update lazy
alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type, .pusch_uci = has_pusch_grant};
if (is_dl_ctrl_alloc(alloc_type) and nof_allocs() == 0 and current_max_cfix > current_cfix) {
// Given that CFI is not currently dynamic for ctrl allocs, in case of SIB/RAR alloc, start with optimal CFI
// in terms of nof CCE locs
uint32_t nof_locs = 0, lowest_cfix = current_cfix;
for (uint32_t cfix_tmp = current_max_cfix; cfix_tmp > lowest_cfix; --cfix_tmp) {
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix_tmp);
if ((*dci_locs)[record.aggr_idx].size() > nof_locs) {
nof_locs = (*dci_locs)[record.aggr_idx].size();
current_cfix = cfix_tmp;
} else {
break;
}
}
}
// Try to allocate user in PDCCH for given CFI. If it fails, increment CFI.
uint32_t first_cfi = get_cfi();
bool success;
do {
success = alloc_dci_record(record, get_cfi() - 1);
} while (not success and current_cfix < current_max_cfix and set_cfi(get_cfi() + 1));
if (not success) {
// DCI allocation failed. go back to original CFI
if (get_cfi() != first_cfi and not set_cfi(first_cfi)) {
logger.error("SCHED: Failed to return back to original PDCCH state");
}
return false;
}
// DCI record allocation successful
dci_record_list.push_back(record);
if (is_dl_ctrl_alloc(alloc_type)) {
// Dynamic CFI not yet supported for DL control allocations, as coderate can be exceeded
current_max_cfix = current_cfix;
}
return true;
}
void sf_cch_allocator::rem_last_dci()
{
assert(not dci_record_list.empty());
// Remove DCI record
dci_record_list.pop_back();
// Remove leaves of PDCCH position decisions
auto& tree = alloc_trees[current_cfix];
tree.prev_end = tree.prev_start;
if (dci_record_list.empty()) {
tree.prev_start = 0;
} else {
tree.prev_start = tree.dci_alloc_tree[tree.prev_start].parent_idx;
// Discover other tree nodes with same level
while (tree.prev_start > 0) {
uint32_t count = 1;
int parent_idx = tree.dci_alloc_tree[tree.prev_start - 1].parent_idx;
while (parent_idx >= 0) {
count++;
parent_idx = tree.dci_alloc_tree[parent_idx].parent_idx;
}
if (count == dci_record_list.size()) {
tree.prev_start--;
} else {
break;
}
}
}
tree.dci_alloc_tree.erase(tree.dci_alloc_tree.begin() + tree.prev_end, tree.dci_alloc_tree.end());
}
bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t cfix)
{
bool ret = false;
auto& tree = alloc_trees[cfix];
// Get DCI Location Table
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, cfix);
if (dci_locs == nullptr or (*dci_locs)[record.aggr_idx].empty()) {
return ret;
}
if (tree.prev_end > 0) {
for (size_t j = tree.prev_start; j < tree.prev_end; ++j) {
ret |= tree.add_tree_node_leaves((int)j, record, *dci_locs, tti_rx);
}
} else {
ret = tree.add_tree_node_leaves(-1, record, *dci_locs, tti_rx);
}
if (ret) {
tree.prev_start = tree.prev_end;
tree.prev_end = tree.dci_alloc_tree.size();
}
return ret;
}
bool sf_cch_allocator::set_cfi(uint32_t cfi)
{
if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) {
logger.error("Invalid CFI value. Defaulting to current CFI.");
return false;
}
uint32_t new_cfix = cfi - 1;
if (new_cfix == current_cfix) {
return true;
}
// setup new PDCCH alloc tree
auto& new_tree = alloc_trees[new_cfix];
new_tree.reset();
if (not dci_record_list.empty()) {
// there are already PDCCH allocations
// Rebuild Allocation Tree
bool ret = true;
for (const auto& old_record : dci_record_list) {
ret &= alloc_dci_record(old_record, new_cfix);
}
if (not ret) {
// Fail to rebuild allocation tree. Go back to previous CFI
return false;
}
}
current_cfix = new_cfix;
return true;
}
void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
alloc_trees[current_cfix].get_allocs(vec, tot_mask, idx);
}
std::string sf_cch_allocator::result_to_string(bool verbose) const
{
return alloc_trees[current_cfix].result_to_string(verbose);
}
sf_cch_allocator::alloc_tree_t::alloc_tree_t(uint32_t this_cfi,
const sched_cell_params_t& cc_params,
srslte_pucch_cfg_t& pucch_cfg_common) :
cfi(this_cfi), cc_cfg(&cc_params), pucch_cfg_temp(&pucch_cfg_common), nof_cces(cc_params.nof_cce_table[this_cfi - 1])
{
dci_alloc_tree.reserve(8);
}
void sf_cch_allocator::alloc_tree_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
}
/// Algorithm to compute a valid PDCCH allocation
bool sf_cch_allocator::alloc_tree_t::add_tree_node_leaves(int parent_node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_rx_)
{
bool ret = false;
alloc_t alloc;
alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : SRSLTE_INVALID_RNTI;
alloc.dci_pos.L = dci_record.aggr_idx;
// get cumulative pdcch & pucch masks
pdcch_mask_t parent_total_mask;
prbmask_t parent_pucch_mask;
if (parent_node_idx >= 0) {
parent_total_mask = dci_alloc_tree[parent_node_idx].node.total_mask;
parent_pucch_mask = dci_alloc_tree[parent_node_idx].node.total_pucch_mask;
} else {
parent_total_mask.resize(nof_cces);
parent_pucch_mask.resize(cc_cfg->nof_prb());
}
for (uint32_t i = 0; i < dci_locs[dci_record.aggr_idx].size(); ++i) {
int8_t pucch_prbidx = -1;
uint32_t ncce_pos = dci_locs[dci_record.aggr_idx][i];
if (dci_record.alloc_type == alloc_type_t::DL_DATA and not dci_record.pusch_uci) {
// The UE needs to allocate space in PUCCH for HARQ-ACK
pucch_cfg_temp->n_pucch = ncce_pos + pucch_cfg_temp->N_pucch_1;
if (is_pucch_sr_collision(
dci_record.user->get_ue_cfg().pucch_cfg, to_tx_dl_ack(tti_rx_), pucch_cfg_temp->n_pucch)) {
// avoid collision of HARQ-ACK with own SR n(1)_pucch
continue;
}
pucch_prbidx = srslte_pucch_n_prb(&cc_cfg->cfg.cell, pucch_cfg_temp, 0);
if (not cc_cfg->sched_cfg->pucch_mux_enabled and parent_pucch_mask.test(pucch_prbidx)) {
// PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position
continue;
}
}
pdcch_mask_t alloc_mask(nof_cces);
alloc_mask.fill(ncce_pos, ncce_pos + (1u << dci_record.aggr_idx));
if ((parent_total_mask & alloc_mask).any()) {
// there is a PDCCH collision. Try another CCE position
continue;
}
// Allocation successful
alloc.current_mask = alloc_mask;
alloc.total_mask = parent_total_mask | alloc_mask;
alloc.dci_pos.ncce = ncce_pos;
alloc.pucch_n_prb = pucch_prbidx;
alloc.total_pucch_mask = parent_pucch_mask;
if (pucch_prbidx >= 0) {
alloc.total_pucch_mask.set(pucch_prbidx);
}
// Prune if repetition of total_masks
uint32_t j = prev_end;
for (; j < dci_alloc_tree.size(); ++j) {
if (dci_alloc_tree[j].node.total_mask == alloc.total_mask) {
// leave nested for-loop
break;
}
}
if (j < dci_alloc_tree.size()) {
continue;
}
// Register allocation
dci_alloc_tree.emplace_back(parent_node_idx, alloc);
ret = true;
}
return ret;
}
void sf_cch_allocator::alloc_tree_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
// if alloc tree is empty
if (prev_start == prev_end) {
if (vec != nullptr) {
vec->clear();
}
if (tot_mask != nullptr) {
tot_mask->resize(nof_cces);
tot_mask->reset();
}
return;
}
// set vector of allocations
if (vec != nullptr) {
vec->clear();
size_t i = prev_start + idx;
while (dci_alloc_tree[i].parent_idx >= 0) {
vec->push_back(&dci_alloc_tree[i].node);
i = (size_t)dci_alloc_tree[i].parent_idx;
}
vec->push_back(&dci_alloc_tree[i].node);
std::reverse(vec->begin(), vec->end());
}
// set final cce mask
if (tot_mask != nullptr) {
*tot_mask = dci_alloc_tree[prev_start + idx].node.total_mask;
}
}
std::string sf_cch_allocator::alloc_tree_t::result_to_string(bool verbose) const
{
// get all the possible combinations of DCI pos allocations
fmt::basic_memory_buffer<char, 1024> strbuf;
fmt::format_to(strbuf,
"SCHED: PDCCH allocations cfi={}, nof_cce={}, {} possible combinations:\n",
cfi,
nof_cces,
prev_end - prev_start);
uint32_t count = 0;
for (size_t i = prev_start; i < prev_end; ++i) {
alloc_result_t vec;
pdcch_mask_t tot_mask;
get_allocs(&vec, &tot_mask, i - prev_start);
fmt::format_to(strbuf, "[{}]: total mask=0x{:x}", count, tot_mask);
if (verbose) {
fmt::format_to(strbuf, ", allocations:\n");
for (const auto& dci_alloc : vec) {
fmt::format_to(strbuf,
" > rnti=0x{:0x}: 0x{:x} / 0x{:x}\n",
dci_alloc->rnti,
dci_alloc->current_mask,
dci_alloc->total_mask);
}
} else {
fmt::format_to(strbuf, "\n");
}
count++;
}
return fmt::to_string(strbuf);
}
} // namespace srsenb

View File

@ -45,8 +45,8 @@ int test_pdcch_one_ue()
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[ENB_CC_IDX].set_cfg(ENB_CC_IDX, cell_cfg, sched_args));
sf_cch_allocator2 pdcch;
sched_ue sched_ue{rnti, cell_params, ue_cfg};
sf_cch_allocator pdcch;
sched_ue sched_ue{rnti, cell_params, ue_cfg};
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_allocs() == 0);
@ -84,8 +84,8 @@ int test_pdcch_one_ue()
const cce_position_list& dci_locs = (*dci_cce)[aggr_idx];
// TEST: Check the first alloc of the pdcch result (e.g. rnti, valid cce mask, etc.)
sf_cch_allocator2::alloc_result_t pdcch_result;
pdcch_mask_t pdcch_mask;
sf_cch_allocator::alloc_result_t pdcch_result;
pdcch_mask_t pdcch_mask;
pdcch.get_allocs(&pdcch_result, &pdcch_mask, 0);
TESTASSERT(pdcch_result.size() == 1);
TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti());
@ -141,8 +141,8 @@ int test_pdcch_ue_and_sibs()
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args));
sf_cch_allocator2 pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg};
sf_cch_allocator pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg};
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_allocs() == 0);
@ -166,9 +166,9 @@ int test_pdcch_ue_and_sibs()
TESTASSERT(pdcch.nof_allocs() == 2);
// TEST: DCI positions
uint32_t cfi = pdcch.get_cfi();
sf_cch_allocator2::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
uint32_t cfi = pdcch.get_cfi();
sf_cch_allocator::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
pdcch.get_allocs(&dci_result, &result_pdcch_mask);
TESTASSERT(dci_result.size() == 2);
const cce_position_list& bc_dci_locs = cell_params[0].common_locations[cfi - 1][2];
@ -189,10 +189,10 @@ int test_6prbs()
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args));
sf_cch_allocator2 pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg};
sf_cch_allocator2::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
sf_cch_allocator pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg};
sf_cch_allocator::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_allocs() == 0);