Implementation of scheduler algorithm to avoid PUCCH collisions

- The cch_allocator considers the existence of a fixed PUCCH region dedicated to CQI, SR, and HARQ ACKs
- At the moment PUCCH multiplexing is not considered
- The PUCCH width was increased to accommodate possible PUCCH-ACK positions
This commit is contained in:
Francisco 2021-02-19 14:52:04 +00:00 committed by Andre Puschmann
parent 8ae8b31ba2
commit 34e39a9835
9 changed files with 209 additions and 153 deletions

View File

@ -102,7 +102,7 @@ public:
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx);
dl_ctrl_alloc_t alloc_dl_ctrl(uint32_t aggr_lvl, alloc_type_t alloc_type);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask);
alloc_outcome_t alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant);
bool reserve_dl_rbgs(uint32_t start_rbg, uint32_t end_rbg);
alloc_outcome_t alloc_ul_data(sched_ue* user, prb_interval alloc, bool needs_pdcch);
alloc_outcome_t reserve_ul_prbs(const prbmask_t& prbmask, bool strict);
@ -117,7 +117,11 @@ public:
uint32_t get_pucch_width() const { return pucch_nrb; }
private:
alloc_outcome_t alloc_dl(uint32_t aggr_lvl, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user = nullptr);
alloc_outcome_t alloc_dl(uint32_t aggr_lvl,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user = nullptr,
bool has_pusch_grant = false);
// consts
const sched_cell_params_t* cc_cfg = nullptr;

View File

@ -25,10 +25,12 @@ class sf_cch_allocator
public:
const static uint32_t MAX_CFI = 3;
struct alloc_t {
int8_t pucch_n_prb; ///< this PUCCH resource identifier
uint16_t rnti = 0;
srslte_dci_location_t dci_pos = {0, 0};
pdcch_mask_t current_mask; ///< this PDCCH alloc mask
pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree route)
pdcch_mask_t current_mask; ///< this allocation PDCCH mask
pdcch_mask_t total_mask; ///< Accumulation of all PDCCH masks for the current solution (tree route)
prbmask_t total_pucch_mask; ///< Accumulation of all PUCCH masks for the current solution/tree route
};
using alloc_result_t = std::vector<const alloc_t*>;
@ -36,8 +38,15 @@ public:
void init(const sched_cell_params_t& cell_params_);
void new_tti(tti_point tti_rx_);
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr);
bool set_cfi(uint32_t cfi);
/**
* Allocates DCI space in PDCCH and PUCCH, avoiding in the process collisions with other users
* @param alloc_type allocation type (e.g. DL data, UL data, ctrl)
* @param aggr_idx Aggregation level index (0..3)
* @param user UE object or null in case of broadcast/RAR/paging allocation
* @param has_pusch_grant If the UE has already an PUSCH grant for UCI allocated
* @return if the allocation was successful
*/
bool alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user = nullptr, bool has_pusch_grant = false);
// getters
uint32_t get_cfi() const { return current_cfix + 1; }
@ -48,41 +57,52 @@ public:
std::string result_to_string(bool verbose = false) const;
private:
/// DCI allocation parameters
struct alloc_record_t {
sched_ue* user;
uint32_t aggr_idx;
alloc_type_t alloc_type;
bool pusch_uci;
};
/// Tree-based data structure to store possible DCI allocation decisions
struct alloc_tree_t {
struct node_t {
int parent_idx;
alloc_t node;
node_t(int i, const alloc_t& a) : parent_idx(i), node(a) {}
};
// args
size_t nof_cces;
const sched_cell_params_t* cc_cfg = nullptr;
srslte_pucch_cfg_t* pucch_cfg = nullptr;
uint32_t cfi;
// state
size_t nof_cces;
std::vector<node_t> dci_alloc_tree;
size_t prev_start = 0, prev_end = 0;
explicit alloc_tree_t(size_t nof_cces_) : nof_cces(nof_cces_) {}
size_t nof_leaves() const { return prev_end - prev_start; }
void reset();
};
struct alloc_record_t {
sched_ue* user;
uint32_t aggr_idx;
alloc_type_t alloc_type;
explicit alloc_tree_t(uint32_t this_cfi, const sched_cell_params_t& cc_params, srslte_pucch_cfg_t& pucch_cfg);
size_t nof_leaves() const { return prev_end - prev_start; }
void reset();
void get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const;
bool add_tree_node_leaves(int node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_rx);
std::string result_to_string(bool verbose) const;
};
const alloc_tree_t& get_alloc_tree() const { return alloc_trees[current_cfix]; }
const cce_cfi_position_table* get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uint32_t cfix) const;
// PDCCH allocation algorithm
bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix);
static bool add_tree_node_leaves(alloc_tree_t& tree,
int node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_tx_dl);
bool set_cfi(uint32_t cfi);
bool alloc_dci_record(const alloc_record_t& record, uint32_t cfix);
// consts
const sched_cell_params_t* cc_cfg = nullptr;
srslog::basic_logger& logger;
srslte_pucch_cfg_t pucch_cfg_common = {};
// tti vars
tti_point tti_rx;

View File

@ -135,7 +135,6 @@ public:
uint32_t get_max_retx();
bool pucch_sr_collision(tti_point tti_tx_dl, uint32_t n_cce);
bool pdsch_enabled(tti_point tti_rx, uint32_t enb_cc_idx) const;
bool pusch_enabled(tti_point tti_rx, uint32_t enb_cc_idx, bool needs_pdcch) const;

View File

@ -1103,16 +1103,6 @@ int set_derived_args(all_args_t* args_, rrc_cfg_t* rrc_cfg_, phy_cfg_t* phy_cfg_
rrc_cfg_->sibs[1].sib2().rr_cfg_common.prach_cfg.prach_cfg_info.prach_freq_offset = 0;
phy_cfg_->prach_cnfg.prach_cfg_info.prach_freq_offset = 0;
}
if (nrb_pucch > 1) {
fprintf(stderr,
"ERROR: Invalid PUCCH configuration - \"cqi_report_cnfg=%d\" and \"sched_request_cnfg.nof_prb=%d\""
" in rr.conf for 6 PRBs.\n Consider decreasing these values to 1 to leave enough space for the "
"transmission of Msg3.\n",
rrc_cfg_->cqi_cfg.nof_prb,
rrc_cfg_->sr_cfg.nof_prb);
rrc_cfg_->cqi_cfg.nof_prb = 1;
rrc_cfg_->sr_cfg.nof_prb = 1;
}
}
// Patch certain args that are not exposed yet

View File

@ -167,7 +167,11 @@ void sf_grid_t::new_tti(tti_point tti_rx_)
}
//! Allocates CCEs and RBs for the given mask and allocation type (e.g. data, BC, RAR, paging)
alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx, alloc_type_t alloc_type, rbgmask_t alloc_mask, sched_ue* user)
alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx,
alloc_type_t alloc_type,
rbgmask_t alloc_mask,
sched_ue* user,
bool has_pusch_grant)
{
// Check RBG collision
if ((dl_mask & alloc_mask).any()) {
@ -175,7 +179,7 @@ alloc_outcome_t sf_grid_t::alloc_dl(uint32_t aggr_idx, alloc_type_t alloc_type,
}
// Allocate DCI in PDCCH
if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user)) {
if (not pdcch_alloc.alloc_dci(alloc_type, aggr_idx, user, has_pusch_grant)) {
if (user != nullptr) {
if (logger.debug.enabled()) {
logger.debug("No space in PDCCH for rnti=0x%x DL tx. Current PDCCH allocation: %s",
@ -216,12 +220,12 @@ sf_grid_t::dl_ctrl_alloc_t sf_grid_t::alloc_dl_ctrl(uint32_t aggr_idx, alloc_typ
}
//! Allocates CCEs and RBs for a user DL data alloc.
alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask)
alloc_outcome_t sf_grid_t::alloc_dl_data(sched_ue* user, const rbgmask_t& user_mask, bool has_pusch_grant)
{
srslte_dci_format_t dci_format = user->get_dci_format();
uint32_t nof_bits = srslte_dci_format_sizeof(&cc_cfg->cfg.cell, nullptr, nullptr, dci_format);
uint32_t aggr_idx = user->get_aggr_level(cc_cfg->enb_cc_idx, nof_bits);
alloc_outcome_t ret = alloc_dl(aggr_idx, alloc_type_t::DL_DATA, user_mask, user);
alloc_outcome_t ret = alloc_dl(aggr_idx, alloc_type_t::DL_DATA, user_mask, user, has_pusch_grant);
return ret;
}
@ -551,13 +555,14 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
return alloc_outcome_t::INVALID_PRBMASK;
}
bool has_pusch_grant = is_ul_alloc(user->get_rnti()) or cc_results->is_ul_alloc(user->get_rnti());
// Check if there is space in the PUCCH for HARQ ACKs
const sched_interface::ue_cfg_t& ue_cfg = user->get_ue_cfg();
std::bitset<SRSLTE_MAX_CARRIERS> scells = user->scell_activation_mask();
uint32_t ue_cc_idx = cc->get_ue_cc_idx();
if (user->nof_carriers_configured() > 1 and (ue_cc_idx == 0 or scells[ue_cc_idx]) and
is_periodic_cqi_expected(ue_cfg, get_tti_tx_ul())) {
bool has_pusch_grant = is_ul_alloc(user->get_rnti()) or cc_results->is_ul_alloc(user->get_rnti());
if (not has_pusch_grant) {
// Try to allocate small PUSCH grant, if there are no allocated PUSCH grants for this TTI yet
prb_interval alloc = {};
@ -572,7 +577,7 @@ alloc_outcome_t sf_sched::alloc_dl_user(sched_ue* user, const rbgmask_t& user_ma
}
// Try to allocate RBGs and DCI
alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask);
alloc_outcome_t ret = tti_alloc.alloc_dl_data(user, user_mask, has_pusch_grant);
if (ret != alloc_outcome_t::SUCCESS) {
return ret;
}

View File

@ -12,24 +12,19 @@
#include "srsenb/hdr/stack/mac/sched_phy_ch/sf_cch_allocator.h"
#include "srsenb/hdr/stack/mac/sched_grid.h"
#include "srslte/srslog/bundled/fmt/format.h"
namespace srsenb {
void sf_cch_allocator::alloc_tree_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
}
void sf_cch_allocator::init(const sched_cell_params_t& cell_params_)
{
cc_cfg = &cell_params_;
cc_cfg = &cell_params_;
pucch_cfg_common = cc_cfg->pucch_cfg_common;
// init alloc trees
alloc_trees.reserve(cc_cfg->sched_cfg->max_nof_ctrl_symbols);
for (uint32_t i = 0; i < cc_cfg->sched_cfg->max_nof_ctrl_symbols; ++i) {
alloc_trees.emplace_back(cc_cfg->nof_cce_table[i]);
alloc_trees.emplace_back(i + 1, *cc_cfg, pucch_cfg_common);
}
}
@ -65,10 +60,10 @@ sf_cch_allocator::get_cce_loc_table(alloc_type_t alloc_type, sched_ue* user, uin
return nullptr;
}
bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user)
bool sf_cch_allocator::alloc_dci(alloc_type_t alloc_type, uint32_t aggr_idx, sched_ue* user, bool has_pusch_grant)
{
// TODO: Make the alloc tree update lazy
alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type};
alloc_record_t record{.user = user, .aggr_idx = aggr_idx, .alloc_type = alloc_type, .pusch_uci = has_pusch_grant};
// Try to allocate user in PDCCH for given CFI. If it fails, increment CFI.
uint32_t first_cfi = get_cfi();
@ -103,10 +98,10 @@ bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t c
if (tree.prev_end > 0) {
for (size_t j = tree.prev_start; j < tree.prev_end; ++j) {
ret |= add_tree_node_leaves(tree, (int)j, record, *dci_locs, to_tx_dl(tti_rx));
ret |= tree.add_tree_node_leaves((int)j, record, *dci_locs, tti_rx);
}
} else {
ret = add_tree_node_leaves(tree, -1, record, *dci_locs, to_tx_dl(tti_rx));
ret = tree.add_tree_node_leaves(-1, record, *dci_locs, tti_rx);
}
if (ret) {
@ -117,66 +112,6 @@ bool sf_cch_allocator::alloc_dci_record(const alloc_record_t& record, uint32_t c
return ret;
}
//! Algorithm to compute a valid PDCCH allocation
bool sf_cch_allocator::add_tree_node_leaves(alloc_tree_t& tree,
int parent_node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_tx_dl)
{
bool ret = false;
alloc_t alloc;
alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : (uint16_t)0u;
alloc.dci_pos.L = dci_record.aggr_idx;
// get cumulative pdcch mask
pdcch_mask_t cum_mask;
if (parent_node_idx >= 0) {
cum_mask = tree.dci_alloc_tree[parent_node_idx].node.total_mask;
} else {
cum_mask.resize(tree.nof_cces);
}
for (uint32_t i = 0; i < dci_locs[dci_record.aggr_idx].size(); ++i) {
uint32_t startpos = dci_locs[dci_record.aggr_idx][i];
if (dci_record.alloc_type == alloc_type_t::DL_DATA and dci_record.user->pucch_sr_collision(tti_tx_dl, startpos)) {
// will cause a collision in the PUCCH
continue;
}
pdcch_mask_t alloc_mask(tree.nof_cces);
alloc_mask.fill(startpos, startpos + (1u << dci_record.aggr_idx));
if ((cum_mask & alloc_mask).any()) {
// there is collision. Try another mask
continue;
}
// Allocation successful
alloc.current_mask = alloc_mask;
alloc.total_mask = cum_mask | alloc_mask;
alloc.dci_pos.ncce = startpos;
// Prune if repetition
uint32_t j = tree.prev_end;
for (; j < tree.dci_alloc_tree.size(); ++j) {
if (tree.dci_alloc_tree[j].node.total_mask == alloc.total_mask) {
break;
}
}
if (j < tree.dci_alloc_tree.size()) {
continue;
}
// Register allocation
tree.dci_alloc_tree.emplace_back(parent_node_idx, alloc);
ret = true;
}
return ret;
}
bool sf_cch_allocator::set_cfi(uint32_t cfi)
{
if (cfi < cc_cfg->sched_cfg->min_nof_ctrl_symbols or cfi > cc_cfg->sched_cfg->max_nof_ctrl_symbols) {
@ -209,20 +144,132 @@ bool sf_cch_allocator::set_cfi(uint32_t cfi)
}
current_cfix = new_cfix;
// TODO: The estimation of the number of required prbs in metric depends on CFI. Analyse the consequences
return true;
}
void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
auto& tree = alloc_trees[current_cfix];
alloc_trees[current_cfix].get_allocs(vec, tot_mask, idx);
}
std::string sf_cch_allocator::result_to_string(bool verbose) const
{
return alloc_trees[current_cfix].result_to_string(verbose);
}
sf_cch_allocator::alloc_tree_t::alloc_tree_t(uint32_t this_cfi,
const sched_cell_params_t& cc_params,
srslte_pucch_cfg_t& pucch_cfg) :
cfi(this_cfi), cc_cfg(&cc_params), pucch_cfg(&pucch_cfg), nof_cces(cc_params.nof_cce_table[this_cfi - 1])
{
dci_alloc_tree.reserve(8);
}
void sf_cch_allocator::alloc_tree_t::reset()
{
prev_start = 0;
prev_end = 0;
dci_alloc_tree.clear();
}
bool is_pucch_sr_collision(const srslte_pucch_cfg_t& pucch_cfg, tti_point tti_tx_dl, uint32_t n1_pucch)
{
if (pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&pucch_cfg, tti_tx_dl.to_uint())) {
return n1_pucch == pucch_cfg.n_pucch_sr;
}
return false;
}
/// Algorithm to compute a valid PDCCH allocation
bool sf_cch_allocator::alloc_tree_t::add_tree_node_leaves(int parent_node_idx,
const alloc_record_t& dci_record,
const cce_cfi_position_table& dci_locs,
tti_point tti_rx_)
{
bool ret = false;
alloc_t alloc;
alloc.rnti = (dci_record.user != nullptr) ? dci_record.user->get_rnti() : SRSLTE_INVALID_RNTI;
alloc.dci_pos.L = dci_record.aggr_idx;
// get cumulative pdcch & pucch masks
pdcch_mask_t parent_total_mask;
prbmask_t parent_pucch_mask;
if (parent_node_idx >= 0) {
parent_total_mask = dci_alloc_tree[parent_node_idx].node.total_mask;
parent_pucch_mask = dci_alloc_tree[parent_node_idx].node.total_pucch_mask;
} else {
parent_total_mask.resize(nof_cces);
parent_pucch_mask.resize(cc_cfg->nof_prb());
}
for (uint32_t i = 0; i < dci_locs[dci_record.aggr_idx].size(); ++i) {
int8_t pucch_prbidx = -1;
uint32_t ncce_pos = dci_locs[dci_record.aggr_idx][i];
if (dci_record.alloc_type == alloc_type_t::DL_DATA and not dci_record.pusch_uci) {
// The UE needs to allocate space in PUCCH for HARQ-ACK
pucch_cfg->n_pucch = ncce_pos + pucch_cfg->N_pucch_1;
if (is_pucch_sr_collision(*pucch_cfg, to_tx_dl_ack(tti_rx_), pucch_cfg->n_pucch)) {
// avoid collision of HARQ-ACK with own SR n(1)_pucch
continue;
}
pucch_prbidx = srslte_pucch_n_prb(&cc_cfg->cfg.cell, pucch_cfg, 0);
bool test = pucch_prbidx != 1 and pucch_prbidx != (uint8_t)(cc_cfg->nof_prb() - 2);
if (parent_pucch_mask.test(pucch_prbidx)) {
// PUCCH allocation would collide with other PUCCH/PUSCH grants. Try another CCE position
continue;
}
}
pdcch_mask_t alloc_mask(nof_cces);
alloc_mask.fill(ncce_pos, ncce_pos + (1u << dci_record.aggr_idx));
if ((parent_total_mask & alloc_mask).any()) {
// there is a PDCCH collision. Try another CCE position
continue;
}
// Allocation successful
alloc.current_mask = alloc_mask;
alloc.total_mask = parent_total_mask | alloc_mask;
alloc.dci_pos.ncce = ncce_pos;
alloc.pucch_n_prb = pucch_prbidx;
alloc.total_pucch_mask = parent_pucch_mask;
if (pucch_prbidx >= 0) {
alloc.total_pucch_mask.set(pucch_prbidx);
}
// Prune if repetition of total_masks
uint32_t j = prev_end;
for (; j < dci_alloc_tree.size(); ++j) {
if (dci_alloc_tree[j].node.total_mask == alloc.total_mask) {
// leave nested for-loop
break;
}
}
if (j < dci_alloc_tree.size()) {
continue;
}
// Register allocation
dci_alloc_tree.emplace_back(parent_node_idx, alloc);
ret = true;
}
return ret;
}
void sf_cch_allocator::alloc_tree_t::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, size_t idx) const
{
// if alloc tree is empty
if (tree.prev_start == tree.prev_end) {
if (prev_start == prev_end) {
if (vec != nullptr) {
vec->clear();
}
if (tot_mask != nullptr) {
tot_mask->resize(nof_cces());
tot_mask->resize(nof_cces);
tot_mask->reset();
}
return;
@ -231,50 +278,52 @@ void sf_cch_allocator::get_allocs(alloc_result_t* vec, pdcch_mask_t* tot_mask, s
// set vector of allocations
if (vec != nullptr) {
vec->clear();
size_t i = tree.prev_start + idx;
while (tree.dci_alloc_tree[i].parent_idx >= 0) {
vec->push_back(&tree.dci_alloc_tree[i].node);
i = (size_t)tree.dci_alloc_tree[i].parent_idx;
size_t i = prev_start + idx;
while (dci_alloc_tree[i].parent_idx >= 0) {
vec->push_back(&dci_alloc_tree[i].node);
i = (size_t)dci_alloc_tree[i].parent_idx;
}
vec->push_back(&tree.dci_alloc_tree[i].node);
vec->push_back(&dci_alloc_tree[i].node);
std::reverse(vec->begin(), vec->end());
}
// set final cce mask
if (tot_mask != nullptr) {
*tot_mask = tree.dci_alloc_tree[tree.prev_start + idx].node.total_mask;
*tot_mask = dci_alloc_tree[prev_start + idx].node.total_mask;
}
}
std::string sf_cch_allocator::result_to_string(bool verbose) const
std::string sf_cch_allocator::alloc_tree_t::result_to_string(bool verbose) const
{
auto& tree = alloc_trees[current_cfix];
std::stringstream ss;
ss << "cfi=" << get_cfi() << ", mask_size=" << nof_cces() << ", " << tree.prev_end - tree.prev_start
<< " DCI allocation combinations:\n";
// get all the possible combinations of DCI allocations
// get all the possible combinations of DCI pos allocations
fmt::basic_memory_buffer<char, 1024> strbuf;
fmt::format_to(strbuf,
"SCHED: PDCCH allocations cfi={}, nof_cce={}, {} possible combinations:\n",
cfi,
nof_cces,
prev_end - prev_start);
uint32_t count = 0;
for (size_t i = tree.prev_start; i < tree.prev_end; ++i) {
for (size_t i = prev_start; i < prev_end; ++i) {
alloc_result_t vec;
pdcch_mask_t tot_mask;
get_allocs(&vec, &tot_mask, i - tree.prev_start);
get_allocs(&vec, &tot_mask, i - prev_start);
ss << " combination " << count << ": mask=0x" << tot_mask.to_hex().c_str();
fmt::format_to(strbuf, "[{}]: total mask=0x{}", count, tot_mask.to_hex().c_str());
if (verbose) {
ss << ", DCI allocs:\n";
fmt::format_to(strbuf, ", allocations:\n");
for (const auto& dci_alloc : vec) {
char hex[5];
sprintf(hex, "%x", dci_alloc->rnti);
ss << " > rnti=0x" << hex << ": " << dci_alloc->current_mask.to_hex().c_str() << " / "
<< dci_alloc->total_mask.to_hex().c_str() << "\n";
fmt::format_to(strbuf,
" > rnti=0x{:0x}: 0x{} / 0x{}\n",
dci_alloc->rnti,
dci_alloc->current_mask.to_hex().c_str(),
dci_alloc->total_mask.to_hex().c_str());
}
} else {
ss << "\n";
fmt::format_to(strbuf, "\n");
}
count++;
}
return ss.str();
return fmt::to_string(strbuf);
}
} // namespace srsenb
} // namespace srsenb

View File

@ -176,17 +176,6 @@ void sched_ue::unset_sr()
sr = false;
}
bool sched_ue::pucch_sr_collision(tti_point tti_tx_dl, uint32_t n_cce)
{
if (!phy_config_dedicated_enabled) {
return false;
}
if (cfg.pucch_cfg.sr_configured && srslte_ue_ul_sr_send_tti(&cfg.pucch_cfg, tti_tx_dl.to_uint())) {
return (n_cce + cfg.pucch_cfg.N_pucch_1) == cfg.pucch_cfg.n_pucch_sr;
}
return false;
}
tti_point prev_meas_gap_start(tti_point tti, uint32_t period, uint32_t offset)
{
return tti_point{static_cast<uint32_t>(floor(static_cast<float>((tti - offset).to_uint()) / period)) * period +

View File

@ -88,7 +88,7 @@ uint32_t sched_time_pf::try_dl_alloc(ue_ctxt& ue_ctxt, sched_ue& ue, sf_sched* t
}
}
if (code == alloc_outcome_t::DCI_COLLISION) {
logger.info("SCHED: Couldn't find space in PDCCH for DL tx for rnti=0x%x", ue.get_rnti());
logger.info("SCHED: Couldn't find space in PDCCH/PUCCH for DL tx for rnti=0x%x", ue.get_rnti());
}
return 0;
}

View File

@ -71,7 +71,7 @@ int test_pdcch_one_ue()
const cce_cfi_position_table* dci_cce = sched_ue.get_locations(ENB_CC_IDX, prev_cfi, to_tx_dl(tti_rx).sf_idx());
uint32_t prev_nof_cce_locs = (*dci_cce)[aggr_idx].size();
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, aggr_idx, &sched_ue));
TESTASSERT(pdcch.alloc_dci(alloc_type_t::DL_DATA, aggr_idx, &sched_ue, true));
TESTASSERT(pdcch.nof_allocs() == 1);
if (prev_nof_cce_locs == pdcch.nof_allocs() - 1) {
// CFI must be increased
@ -102,7 +102,7 @@ int test_pdcch_one_ue()
}
prev_nof_cce_locs = dci_locs.size();
prev_cfi = pdcch.get_cfi();
TESTASSERT(pdcch.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, &sched_ue));
TESTASSERT(pdcch.alloc_dci(alloc_type_t::UL_DATA, aggr_idx, &sched_ue, true));
TESTASSERT(pdcch.nof_allocs() == 2);
if (prev_nof_cce_locs == pdcch.nof_allocs() - 1) {
// CFI must be increased