implemented a DFS-based PDCCH allocator

This commit is contained in:
Francisco 2021-03-18 21:38:48 +00:00 committed by Francisco Paisana
parent a2e2501424
commit 8347cabe4f
5 changed files with 358 additions and 36 deletions

View File

@ -14,6 +14,7 @@
#define SRSLTE_SCHED_GRID_H
#include "lib/include/srslte/interfaces/sched_interface.h"
#include "sched_phy_ch/sched_result.h"
#include "sched_phy_ch/sf_cch_allocator.h"
#include "sched_ue.h"
#include "srslte/adt/bounded_bitset.h"
@ -37,16 +38,6 @@ enum class alloc_result {
};
const char* to_string(alloc_result res);
//! Result of a Subframe sched computation
struct cc_sched_result {
bool generated = false;
rbgmask_t dl_mask = {}; ///< Accumulation of all DL RBG allocations
prbmask_t ul_mask = {}; ///< Accumulation of all UL PRB allocations
pdcch_mask_t pdcch_mask = {}; ///< Accumulation of all CCE allocations
sched_interface::dl_sched_res_t dl_sched_result = {};
sched_interface::ul_sched_res_t ul_sched_result = {};
};
struct sf_sched_result {
tti_point tti_rx;
std::vector<cc_sched_result> enb_cc_list;

View File

@ -0,0 +1,41 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2020 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSLTE_SCHED_RESULT_H
#define SRSLTE_SCHED_RESULT_H
#include "../sched_common.h"
namespace srsenb {
/// Result of a Subframe sched computation
struct cc_sched_result {
bool generated = false;
tti_point tti_rx{};
/// Accumulation of all DL RBG allocations
rbgmask_t dl_mask = {};
/// Accumulation of all UL PRB allocations
prbmask_t ul_mask = {};
/// Accumulation of all CCE allocations
pdcch_mask_t pdcch_mask = {};
/// Individual allocations information
sched_interface::dl_sched_res_t dl_sched_result = {};
sched_interface::ul_sched_res_t ul_sched_result = {};
};
} // namespace srsenb
#endif // SRSLTE_SCHED_RESULT_H

View File

@ -11,6 +11,7 @@
*/
#include "../sched_common.h"
#include "sched_result.h"
#ifndef SRSLTE_PDCCH_SCHED_H
#define SRSLTE_PDCCH_SCHED_H
@ -19,6 +20,81 @@ namespace srsenb {
class sched_ue;
class sf_cch_allocator2
{
public:
const static uint32_t MAX_CFI = 3;
struct tree_node {
int8_t pucch_n_prb = -1; ///< this PUCCH resource identifier
uint16_t rnti = SRSLTE_INVALID_RNTI;
uint32_t record_idx = 0;
uint32_t dci_pos_idx = 0;
srslte_dci_location_t dci_pos = {0, 0};
/// Accumulation of all PDCCH masks for the current solution (DFS path)
pdcch_mask_t total_mask, current_mask;
prbmask_t total_pucch_mask;
};
// struct alloc_t {
// int8_t pucch_n_prb; ///< this PUCCH resource identifier
// uint16_t rnti = 0;
// srslte_dci_location_t dci_pos = {0, 0};
// pdcch_mask_t current_mask; ///< this allocation PDCCH mask
// pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree
// route) prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current
// solution/tree route
// };
using alloc_result_t = srslte::bounded_vector<const tree_node*, 16>;
sf_cch_allocator2() : logger(srslog::fetch_basic_logger("MAC")) {}
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx_);
/**
* Allocates DCI space in PDCCH and PUCCH, avoiding in the process collisions with other users
* @param alloc_type allocation type (e.g. DL data, UL data, ctrl)
* @param aggr_idx Aggregation level index (0..3)
* @param user UE object or null in case of broadcast/RAR/paging allocation
* @param has_pusch_grant If the UE has already an PUSCH grant for UCI allocated
* @return if the allocation was successful
*/
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false);
void rem_last_dci();
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
void get_allocs(alloc_result_t* vec = nullptr, pdcch_mask_t* tot_mask = nullptr, size_t idx = 0) const;
uint32_t nof_cces() const { return cc_cfg->nof_cce_table[current_cfix]; }
size_t nof_allocs() const { return dci_record_list.size(); }
std::string result_to_string(bool verbose = false) const;
private:
/// DCI allocation parameters
struct alloc_record {
bool pusch_uci;
uint32_t aggr_idx;
alloc_type_t alloc_type;
sched_ue* user;
};
const cce_cfi_position_table* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const;
// PDCCH allocation algorithm
bool alloc_dfs_node(const alloc_record& record, uint32_t start_child_idx);
bool get_next_dfs();
// consts
const sched_cell_params_t* cc_cfg = nullptr;
srslog::basic_logger& logger;
srslte_pucch_cfg_t pucch_cfg_common = {};
// tti vars
tti_point tti_rx;
uint32_t current_cfix = 0;
uint32_t current_max_cfix = 0;
std::vector<tree_node> last_dci_dfs, temp_dci_dfs;
std::vector<alloc_record> dci_record_list; ///< Keeps a record of all the PDCCH allocations done so far
};
/// Class responsible for managing a PDCCH CCE grid, namely CCE allocs, and avoid collisions.
class sf_cch_allocator
{

View File

@ -16,6 +16,231 @@
namespace srsenb {
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch)
{
if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) {
return n1_pucch == ue_pucch_cfg.n_pucch_sr;
}
return false;
}
void sf_cch_allocator2::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
pucch_cfg_common = cc_cfg->pucch_cfg_common;
}
void sf_cch_allocator2::new_tti(tti_point tti_rx_)
{
tti_rx = tti_rx_;
dci_record_list.clear();
last_dci_dfs.clear();
current_cfix = cc_cfg->sched_cfg->min_nof_ctrl_symbols - 1;
current_max_cfix = cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1;
}
const cce_cfi_position_table*
sf_cch_allocator2::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const
{
switch (alloc_type) {
case alloc_type_t::DL_BC:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_PCCH:
return &cc_cfg->common_locations[cfix];
case alloc_type_t::DL_RAR:
return &cc_cfg->rar_locations[to_tx_dl(tti_rx).sf_idx()][cfix];
case alloc_type_t::DL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
case alloc_type_t::UL_DATA:
return user->get_locations(cc_cfg->enb_cc_idx, cfix + 1, to_tx_dl(tti_rx).sf_idx());
default:
break;
}
return nullptr;
}
bool sf_cch_allocator2::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant)
{
temp_dci_dfs.clear();
uint32_t start_cfix = current_cfix;
alloc_record record;
record.user = user;
record.aggr_idx = aggr_idx;
record.alloc_type = alloc_type;
record.pusch_uci = has_pusch_grant;
// Try to allocate grant. If it fails, attempt the same grant, but using a different permutation of past grant DCI
// positions
do {
bool success = alloc_dfs_node(record, 0);
if (success) {
// DCI record allocation successful
dci_record_list.push_back(record);
return true;
}
if (temp_dci_dfs.empty()) {
temp_dci_dfs = last_dci_dfs;
}
} while (get_next_dfs());
// Revert steps to initial state, before dci record allocation was attempted
last_dci_dfs.swap(temp_dci_dfs);
current_cfix = start_cfix;
return false;
}
// bool sf_cch_allocator2::get_next_dfs()
//{
// if (last_dci_dfs.empty()) {
// // If we reach root, increase CFI
// if (current_cfix < cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) {
// current_cfix++;
// return true;
// }
// return false;
// }
//
// uint32_t dfs_level = last_dci_dfs.size() - 1;
// uint32_t start_child_idx = last_dci_dfs.back().dci_pos_idx + 1;
// last_dci_dfs.pop_back();
// while (not alloc_dfs_node(dci_record_list[dfs_level], start_child_idx)) {
// start_child_idx = 0;
// // If failed to allocate record, go one level lower in DFS
// if (not get_next_dfs()) {
// // If no more options in DFS, return false
// return false;
// }
// }
//}
bool sf_cch_allocator2::get_next_dfs()
{
do {
uint32_t start_child_idx = 0;
if (last_dci_dfs.empty()) {
// If we reach root, increase CFI
current_cfix++;
if (current_cfix > cc_cfg->sched_cfg->max_nof_ctrl_symbols - 1) {
return false;
}
} else {
// Attempt to re-add last tree node, but with a higher node child index
start_child_idx = last_dci_dfs.back().dci_pos_idx + 1;
last_dci_dfs.pop_back();
}
while (last_dci_dfs.size() < dci_record_list.size() and
alloc_dfs_node(dci_record_list[last_dci_dfs.size()], start_child_idx)) {
start_child_idx = 0;
}
} while (last_dci_dfs.size() < dci_record_list.size());
// Finished computation of next DFS node
return true;
}
bool sf_cch_allocator2::alloc_dfs_node(const alloc_record& record, uint32_t start_dci_idx)
{
// Get DCI Location Table
const cce_cfi_position_table* dci_locs = get_cce_loc_table(record.alloc_type, record.user, current_cfix);
if (dci_locs == nullptr or (*dci_locs)[record.aggr_idx].empty()) {
return false;
}
const cce_position_list& dci_pos_list = (*dci_locs)[record.aggr_idx];
if (start_dci_idx >= dci_pos_list.size()) {
return false;
}
tree_node node;
node.dci_pos_idx = start_dci_idx;
node.dci_pos.L = record.aggr_idx;
node.rnti = record.user != nullptr ? record.user->get_rnti() : SRSLTE_INVALID_RNTI;
node.current_mask.resize(nof_cces());
// get cumulative pdcch & pucch masks
if (not last_dci_dfs.empty()) {
node.total_mask = last_dci_dfs.back().total_mask;
node.total_pucch_mask = last_dci_dfs.back().total_pucch_mask;
} else {
node.total_mask.resize(nof_cces());
node.total_pucch_mask.resize(cc_cfg->nof_prb());
}
for (; node.dci_pos_idx < dci_pos_list.size(); ++node.dci_pos_idx) {
node.dci_pos.ncce = dci_pos_list[node.dci_pos_idx];
if (record.alloc_type == alloc_type_t::DL_DATA and not record.pusch_uci) {
// The UE needs to allocate space in PUCCH for HARQ-ACK
pucch_cfg_common.n_pucch = node.dci_pos.ncce + pucch_cfg_common.N_pucch_1;
if (is_pucch_sr_collision(record.user->get_ue_cfg().pucch_cfg, to_tx_dl_ack(tti_rx), pucch_cfg_common.n_pucch)) {
// avoid collision of HARQ-ACK with own SR n(1)_pucch
continue;
}
node.pucch_n_prb = srslte_pucch_n_prb(&cc_cfg->cfg.cell, &pucch_cfg_common, 0);
if (not cc_cfg->sched_cfg->pucch_mux_enabled and node.total_pucch_mask.test(node.pucch_n_prb)) {
// PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position
continue;
}
}
node.current_mask.reset();
node.current_mask.fill(node.dci_pos.ncce, node.dci_pos.ncce + (1U << record.aggr_idx));
if ((node.total_mask & node.current_mask).any()) {
// there is a PDCCH collision. Try another CCE position
continue;
}
// Allocation successful
node.total_mask |= node.current_mask;
if (node.pucch_n_prb >= 0) {
node.total_pucch_mask.set(node.pucch_n_prb);
}
last_dci_dfs.push_back(node);
return true;
}
return false;
}
void sf_cch_allocator2::rem_last_dci()
{
assert(not dci_record_list.empty());
// Remove DCI record
last_dci_dfs.pop_back();
dci_record_list.pop_back();
}
void sf_cch_allocator2::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
if (vec != nullptr) {
vec->clear();
vec->resize(last_dci_dfs.size());
for (uint32_t i = 0; i < last_dci_dfs.size(); ++i) {
(*vec)[i] = &last_dci_dfs[i];
}
}
if (tot_mask != nullptr) {
if (last_dci_dfs.empty()) {
tot_mask->resize(nof_cces());
tot_mask->reset();
} else {
*tot_mask = last_dci_dfs.back().total_mask;
}
}
}
std::string sf_cch_allocator2::result_to_string(bool verbose) const
{
return "";
}
/////////////////////////
void sf_cch_allocator::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
@ -226,14 +451,6 @@ void sf_cch_allocator::alloc_tree_t::reset()
dci_alloc_tree.clear();
}
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& ue_pucch_cfg, tti_point tti_tx_dl_ack, uint32_t n1_pucch)
{
if (ue_pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&ue_pucch_cfg, tti_tx_dl_ack.to_uint())) {
return n1_pucch == ue_pucch_cfg.n_pucch_sr;
}
return false;
}
/// Algorithm to compute a valid PDCCH allocation
bool sf_cch_allocator::alloc_tree_t::add_tree_node_leaves(int parent_node_idx,
const alloc_record_t& dci_record,

View File

@ -45,11 +45,10 @@ int test_pdcch_one_ue()
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[ENB_CC_IDX].set_cfg(ENB_CC_IDX, cell_cfg, sched_args));
sf_cch_allocator pdcch;
sched_ue sched_ue{rnti, cell_params, ue_cfg};
sf_cch_allocator2 pdcch;
sched_ue sched_ue{rnti, cell_params, ue_cfg};
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_alloc_combinations() == 0);
TESTASSERT(pdcch.nof_allocs() == 0);
uint32_t tti_counter = 0;
@ -85,8 +84,8 @@ int test_pdcch_one_ue()
const cce_position_list& dci_locs = (*dci_cce)[aggr_idx];
// TEST: Check the first alloc of the pdcch result (e.g. rnti, valid cce mask, etc.)
sf_cch_allocator::alloc_result_t pdcch_result;
pdcch_mask_t pdcch_mask;
sf_cch_allocator2::alloc_result_t pdcch_result;
pdcch_mask_t pdcch_mask;
pdcch.get_allocs(&pdcch_result, &pdcch_mask, 0);
TESTASSERT(pdcch_result.size() == 1);
TESTASSERT(pdcch_result[0]->rnti == sched_ue.get_rnti());
@ -142,11 +141,10 @@ int test_pdcch_ue_and_sibs()
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args));
sf_cch_allocator pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg};
sf_cch_allocator2 pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg};
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_alloc_combinations() == 0);
TESTASSERT(pdcch.nof_allocs() == 0);
tti_point tti_rx{std::uniform_int_distribution<uint32_t>(0, 9)(get_rand_gen())};
@ -154,10 +152,10 @@ int test_pdcch_ue_and_sibs()
pdcch.new_tti(tti_rx);
TESTASSERT(pdcch.nof_cces() == cell_params[0].nof_cce_table[0]);
TESTASSERT(pdcch.get_cfi() == 1); // Start at CFI=1
TESTASSERT(pdcch.nof_alloc_combinations() == 0);
TESTASSERT(pdcch.nof_allocs() == 0);
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_BC, 2));
TESTASSERT(pdcch.nof_alloc_combinations() == 4);
TESTASSERT(pdcch.nof_allocs() == 1);
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_RAR, 2));
TESTASSERT(pdcch.nof_allocs() == 2);
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, 2, &sched_ue, false));
@ -168,9 +166,9 @@ int test_pdcch_ue_and_sibs()
TESTASSERT(pdcch.nof_allocs() == 2);
// TEST: DCI positions
uint32_t cfi = pdcch.get_cfi();
sf_cch_allocator::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
uint32_t cfi = pdcch.get_cfi();
sf_cch_allocator2::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
pdcch.get_allocs(&dci_result, &result_pdcch_mask);
TESTASSERT(dci_result.size() == 2);
const cce_position_list& bc_dci_locs = cell_params[0].common_locations[cfi - 1][2];
@ -191,13 +189,12 @@ int test_6prbs()
sched_interface::sched_args_t sched_args{};
TESTASSERT(cell_params[0].set_cfg(0, cell_cfg, sched_args));
sf_cch_allocator pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg};
sf_cch_allocator::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
sf_cch_allocator2 pdcch;
sched_ue sched_ue{0x46, cell_params, ue_cfg}, sched_ue2{0x47, cell_params, ue_cfg};
sf_cch_allocator2::alloc_result_t dci_result;
pdcch_mask_t result_pdcch_mask;
pdcch.init(cell_params[PCell_IDX]);
TESTASSERT(pdcch.nof_alloc_combinations() == 0);
TESTASSERT(pdcch.nof_allocs() == 0);
uint32_t opt_cfi = 3;