Changes on srsENB mutexing logic to fix general race condition and deadlock issues (#229)

* fixed some issues with the UL scheduling

* Hack to fix UL scheduler

* minor fix

* Cleaned up code and fixed issue with the update_allocation function

* fixed the console printing in the enb

* log/console fix

* fixed the log print

* added a normalization factor

* RLC: entity uses dynamic instances. Simplified stop/reset/reestablish procedure. Added non-blocking interface

* Limit decimals in metrics stdout

* Changed mutexes to rwlock in RLC/RRC/MAC/PDCP to fix race conditions when removing users

* Fix deadlock bug for MIMO

* Remove headers

* Fix missing unlock causing overflows

* Do not decrease CQI when PUCCH (this is a temporal fix, requires to reduce the maximum MCS)

* Fix mutex unlock in worker

* Configurable RLC tx buffer. Default to 512 for enodeb

* Check NULL SDU in write_sdu()

* Protect RLC objects and tx_queue from being destroyed while using it

* Remove superfluous code

* Disable SIB logging

* Fix block_queue for enb
This commit is contained in:
Ismael Gomez 2018-07-05 11:00:19 +02:00 committed by GitHub
parent 546b631c93
commit 589e569ce9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1075 additions and 649 deletions

View File

@ -42,6 +42,8 @@
#include <pthread.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <strings.h>
namespace srslte {
@ -63,6 +65,28 @@ public:
pthread_cond_init(&cv_full, NULL);
this->capacity = capacity;
mutexed_callback = NULL;
enable = true;
num_threads = 0;
}
~block_queue() {
// Unlock threads waiting at push or pop
pthread_mutex_lock(&mutex);
enable = false;
pthread_cond_signal(&cv_full);
pthread_cond_signal(&cv_empty);
pthread_mutex_unlock(&mutex);
// Wait threads blocked in push/pop to exit
while(num_threads>0) {
usleep(100);
}
// Wait them to exit and destroy cv and mutex
pthread_mutex_lock(&mutex);
pthread_cond_destroy(&cv_full);
pthread_cond_destroy(&cv_empty);
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
}
void set_mutexed_itf(call_mutexed_itf *itf) {
mutexed_callback = itf;
@ -70,26 +94,6 @@ public:
void resize(int new_capacity) {
capacity = new_capacity;
}
bool push_(const myobj& value, bool block) {
pthread_mutex_lock(&mutex);
if (capacity > 0) {
if (block) {
while(q.size() > (uint32_t) capacity) {
pthread_cond_wait(&cv_full, &mutex);
}
} else {
pthread_mutex_unlock(&mutex);
return false;
}
}
q.push(value);
if (mutexed_callback) {
mutexed_callback->pushing(value);
}
pthread_cond_signal(&cv_empty);
pthread_mutex_unlock(&mutex);
return true;
}
void push(const myobj& value) {
push_(value, true);
@ -99,36 +103,14 @@ public:
return push_(value, false);
}
bool try_pop(myobj *value) {
pthread_mutex_lock(&mutex);
if (q.empty()) {
pthread_mutex_unlock(&mutex);
return false;
}
if (value) {
*value = q.front();
q.pop();
}
if (mutexed_callback) {
mutexed_callback->popping(*value);
}
pthread_cond_signal(&cv_full);
pthread_mutex_unlock(&mutex);
return true;
bool try_pop(myobj *value) {
return pop_(value, false);
}
myobj wait_pop() { // blocking pop
pthread_mutex_lock(&mutex);
while(q.empty()) {
pthread_cond_wait(&cv_empty, &mutex);
}
myobj value = q.front();
q.pop();
if (mutexed_callback) {
mutexed_callback->popping(value);
}
pthread_cond_signal(&cv_full);
pthread_mutex_unlock(&mutex);
myobj value;
bzero(&value, sizeof(myobj));
pop_(&value, true);
return value;
}
@ -153,12 +135,77 @@ public:
}
private:
bool pop_(myobj *value, bool block) {
if (!enable) {
return false;
}
pthread_mutex_lock(&mutex);
num_threads++;
bool ret = false;
if (q.empty() && !block) {
goto exit;
}
while (q.empty() && enable) {
pthread_cond_wait(&cv_empty, &mutex);
}
if (!enable) {
goto exit;
}
if (value) {
*value = q.front();
q.pop();
}
ret = true;
if (mutexed_callback) {
mutexed_callback->popping(*value);
}
pthread_cond_signal(&cv_full);
exit:
num_threads--;
pthread_mutex_unlock(&mutex);
return ret;
}
bool push_(const myobj& value, bool block) {
if (!enable) {
return false;
}
pthread_mutex_lock(&mutex);
num_threads++;
bool ret = false;
if (capacity > 0) {
if (block) {
while(q.size() >= (uint32_t) capacity && enable) {
pthread_cond_wait(&cv_full, &mutex);
}
if (!enable) {
goto exit;
}
} else if (q.size() >= (uint32_t) capacity) {
goto exit;
}
}
q.push(value);
ret = true;
if (mutexed_callback) {
mutexed_callback->pushing(value);
}
pthread_cond_signal(&cv_empty);
exit:
num_threads--;
pthread_mutex_unlock(&mutex);
return ret;
}
std::queue<myobj> q;
pthread_mutex_t mutex;
pthread_cond_t cv_empty;
pthread_cond_t cv_full;
call_mutexed_itf *mutexed_callback;
int capacity;
bool enable;
uint32_t num_threads;
};
}

View File

@ -173,8 +173,7 @@ public:
class rlc_interface_rrc
{
public:
virtual void reset(uint16_t rnti) = 0;
virtual void clear_buffer(uint16_t rnti) = 0;
virtual void clear_buffer(uint16_t rnti) = 0;
virtual void add_user(uint16_t rnti) = 0;
virtual void rem_user(uint16_t rnti) = 0;
virtual void add_bearer(uint16_t rnti, uint32_t lcid) = 0;

View File

@ -56,13 +56,15 @@ public:
srsue::ue_interface *ue_,
log *rlc_log_,
mac_interface_timers *mac_timers_,
uint32_t lcid_);
uint32_t lcid_,
int buffer_size = -1); // -1 to use default buffer sizes
void stop();
void get_metrics(rlc_metrics_t &m);
// PDCP interface
void write_sdu(uint32_t lcid, byte_buffer_t *sdu);
void write_sdu_nb(uint32_t lcid, byte_buffer_t *sdu);
void write_sdu_mch(uint32_t lcid, byte_buffer_t *sdu);
bool rb_is_um(uint32_t lcid);
@ -99,6 +101,7 @@ private:
srslte::rlc_entity rlc_array[SRSLTE_N_RADIO_BEARERS];
srslte::rlc_um rlc_array_mrb[SRSLTE_N_MCH_LCIDS];
uint32_t default_lcid;
int buffer_size;
long ul_tput_bytes[SRSLTE_N_RADIO_BEARERS];
long dl_tput_bytes[SRSLTE_N_RADIO_BEARERS];

View File

@ -70,7 +70,7 @@ class rlc_am
:public rlc_common
{
public:
rlc_am();
rlc_am(uint32_t queue_len = 16);
~rlc_am();
void init(log *rlc_entity_log_,
uint32_t lcid_,
@ -78,9 +78,9 @@ public:
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers);
void configure(srslte_rlc_config_t cnfg);
void reset();
void reestablish();
void stop();
void empty_queue();
rlc_mode_t get_mode();
@ -88,6 +88,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -122,6 +123,7 @@ private:
// Mutexes
pthread_mutex_t mutex;
bool tx_enabled;
bool poll_received;
bool do_status;
rlc_status_pdu_t status;

View File

@ -151,13 +151,13 @@ struct rlc_status_pdu_t{
class rlc_common
{
public:
virtual ~rlc_common() {}
virtual void init(srslte::log *rlc_entity_log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
srslte::mac_interface_timers *mac_timers_) = 0;
virtual void configure(srslte_rlc_config_t cnfg) = 0;
virtual void reset() = 0;
virtual void stop() = 0;
virtual void empty_queue() = 0;
@ -166,6 +166,7 @@ public:
// PDCP interface
virtual void write_sdu(byte_buffer_t *sdu) = 0;
virtual void write_sdu_nb(byte_buffer_t *sdu) = 0;
// MAC interface
virtual uint32_t get_buffer_state() = 0;

View File

@ -52,10 +52,10 @@ public:
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers_);
mac_interface_timers *mac_timers_,
int buffer_size = -1); // use -1 for default buffer sizes
void configure(srslte_rlc_config_t cnfg);
void reset();
void reestablish();
void stop();
void empty_queue();
@ -66,6 +66,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -75,10 +76,8 @@ public:
private:
rlc_tm tm;
rlc_um um;
rlc_am am;
rlc_mode_t mode;
uint32_t lcid;
rlc_common *rlc;
};

View File

@ -40,14 +40,14 @@ class rlc_tm
:public rlc_common
{
public:
rlc_tm();
rlc_tm(uint32_t queue_len = 16);
~rlc_tm();
void init(log *rlc_entity_log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers);
void configure(srslte_rlc_config_t cnfg);
void reset();
void stop();
void empty_queue();
@ -56,6 +56,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -71,6 +72,8 @@ private:
srsue::pdcp_interface_rlc *pdcp;
srsue::rrc_interface_rlc *rrc;
bool tx_enabled;
// Thread-safe queues for MAC messages
rlc_tx_queue ul_queue;
};

View File

@ -44,7 +44,7 @@ namespace srslte {
class rlc_tx_queue : public block_queue<byte_buffer_t*>::call_mutexed_itf
{
public:
rlc_tx_queue(uint32_t capacity = 128) : queue((int) capacity) {
rlc_tx_queue(int capacity = 128) : queue(capacity) {
unread_bytes = 0;
queue.set_mutexed_itf(this);
}
@ -64,6 +64,11 @@ public:
queue.push(msg);
}
bool try_write(byte_buffer_t *msg)
{
return queue.try_push(msg);
}
void read(byte_buffer_t **msg)
{
byte_buffer_t *m = queue.wait_pop();

View File

@ -49,8 +49,7 @@ class rlc_um
,public rlc_common
{
public:
rlc_um();
rlc_um(uint32_t queue_len = 32);
~rlc_um();
void init(log *rlc_entity_log_,
uint32_t lcid_,
@ -58,7 +57,6 @@ public:
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers_);
void configure(srslte_rlc_config_t cnfg);
void reset();
void stop();
void empty_queue();
bool is_mrb();
@ -68,6 +66,7 @@ public:
// PDCP interface
void write_sdu(byte_buffer_t *sdu);
void write_sdu_nb(byte_buffer_t *sdu);
// MAC interface
uint32_t get_buffer_state();
@ -131,6 +130,7 @@ private:
srslte::timers::timer *reordering_timer;
uint32_t reordering_timer_id;
bool tx_enabled;
bool pdu_lost;
int build_data_pdu(uint8_t *payload, uint32_t nof_bytes);

View File

@ -51,7 +51,8 @@ void rlc::init(srsue::pdcp_interface_rlc *pdcp_,
srsue::ue_interface *ue_,
log *rlc_log_,
mac_interface_timers *mac_timers_,
uint32_t lcid_)
uint32_t lcid_,
int buffer_size_)
{
pdcp = pdcp_;
rrc = rrc_;
@ -59,11 +60,12 @@ void rlc::init(srsue::pdcp_interface_rlc *pdcp_,
rlc_log = rlc_log_;
mac_timers = mac_timers_;
default_lcid = lcid_;
buffer_size = buffer_size_;
gettimeofday(&metrics_time[1], NULL);
reset_metrics();
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers); // SRB0
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers, buffer_size); // SRB0
}
void rlc::reset_metrics()
@ -75,8 +77,9 @@ void rlc::reset_metrics()
void rlc::stop()
{
for(uint32_t i=0; i<SRSLTE_N_RADIO_BEARERS; i++) {
if(rlc_array[i].active())
if(rlc_array[i].active()) {
rlc_array[i].stop();
}
}
}
@ -114,6 +117,7 @@ void rlc::get_metrics(rlc_metrics_t &m)
reset_metrics();
}
// A call to reestablish stops all lcids but does not delete the instances. The mapping lcid to rlc mode can not change
void rlc::reestablish() {
for(uint32_t i=0; i<SRSLTE_N_RADIO_BEARERS; i++) {
if(rlc_array[i].active()) {
@ -122,14 +126,16 @@ void rlc::reestablish() {
}
}
// Resetting the RLC layer returns the object to the state after the call to init(): All lcids are stopped and
// defaul lcid=0 is created
void rlc::reset()
{
for(uint32_t i=0; i<SRSLTE_N_RADIO_BEARERS; i++) {
if(rlc_array[i].active())
rlc_array[i].reset();
rlc_array[i].stop();
}
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers); // SRB0
rlc_array[0].init(RLC_MODE_TM, rlc_log, default_lcid, pdcp, rrc, mac_timers, buffer_size); // SRB0
}
void rlc::empty_queue()
@ -149,6 +155,12 @@ void rlc::write_sdu(uint32_t lcid, byte_buffer_t *sdu)
rlc_array[lcid].write_sdu(sdu);
}
}
void rlc::write_sdu_nb(uint32_t lcid, byte_buffer_t *sdu)
{
if(valid_lcid(lcid)) {
rlc_array[lcid].write_sdu_nb(sdu);
}
}
void rlc::write_sdu_mch(uint32_t lcid, byte_buffer_t *sdu)
{
if(valid_lcid_mrb(lcid)) {
@ -307,16 +319,16 @@ void rlc::add_bearer(uint32_t lcid, srslte_rlc_config_t cnfg)
switch(cnfg.rlc_mode)
{
case LIBLTE_RRC_RLC_MODE_AM:
rlc_array[lcid].init(RLC_MODE_AM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_AM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
case LIBLTE_RRC_RLC_MODE_UM_BI:
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
case LIBLTE_RRC_RLC_MODE_UM_UNI_DL:
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
case LIBLTE_RRC_RLC_MODE_UM_UNI_UL:
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers);
rlc_array[lcid].init(RLC_MODE_UM, rlc_log, lcid, pdcp, rrc, mac_timers, buffer_size);
break;
default:
rlc_log->error("Cannot add RLC entity - invalid mode\n");

View File

@ -36,7 +36,7 @@
namespace srslte {
rlc_am::rlc_am() : tx_sdu_queue(16)
rlc_am::rlc_am(uint32_t queue_len) : tx_sdu_queue(queue_len)
{
log = NULL;
pdcp = NULL;
@ -68,19 +68,13 @@ rlc_am::rlc_am() : tx_sdu_queue(16)
do_status = false;
}
// Warning: must call stop() to properly deallocate all buffers
rlc_am::~rlc_am()
{
// reset RLC and dealloc SDUs
stop();
if(rx_sdu) {
pool->deallocate(rx_sdu);
}
if(tx_sdu) {
pool->deallocate(tx_sdu);
}
pthread_mutex_destroy(&mutex);
pool = NULL;
}
void rlc_am::init(srslte::log *log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
@ -91,6 +85,7 @@ void rlc_am::init(srslte::log *log_,
lcid = lcid_;
pdcp = pdcp_;
rrc = rrc_;
tx_enabled = true;
}
void rlc_am::configure(srslte_rlc_config_t cfg_)
@ -106,21 +101,16 @@ void rlc_am::configure(srslte_rlc_config_t cfg_)
void rlc_am::empty_queue() {
// Drop all messages in TX SDU queue
byte_buffer_t *buf;
while(tx_sdu_queue.size() > 0) {
tx_sdu_queue.read(&buf);
while(tx_sdu_queue.try_read(&buf)) {
pool->deallocate(buf);
}
}
void rlc_am::stop()
{
reset();
pthread_mutex_destroy(&mutex);
}
void rlc_am::reset()
{
// Empty tx_sdu_queue before locking the mutex
// Empty tx_sdu_queue before locking the mutex
tx_enabled = false;
usleep(100);
empty_queue();
pthread_mutex_lock(&mutex);
@ -198,8 +188,34 @@ uint32_t rlc_am::get_bearer()
void rlc_am::write_sdu(byte_buffer_t *sdu)
{
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B, tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B, tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
void rlc_am::write_sdu_nb(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
if (tx_sdu_queue.try_write(sdu)) {
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B, tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->warning_hex(sdu->msg, sdu->N_bytes, "[Dropped SDU] %s Tx SDU (%d B, tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
pool->deallocate(sdu);
}
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
/****************************************************************************

View File

@ -33,34 +33,49 @@ rlc_entity::rlc_entity()
{
}
void rlc_entity::init(rlc_mode_t mode,
void rlc_entity::init(rlc_mode_t mode_,
log *rlc_entity_log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
srsue::rrc_interface_rlc *rrc_,
mac_interface_timers *mac_timers_)
mac_interface_timers *mac_timers_,
int buffer_size)
{
tm.reset();
um.reset();
am.reset();
switch(mode)
{
case RLC_MODE_TM:
rlc = &tm;
break;
case RLC_MODE_UM:
rlc = &um;
break;
case RLC_MODE_AM:
rlc = &am;
break;
default:
rlc_entity_log_->error("Invalid RLC mode - defaulting to TM\n");
rlc = &tm;
break;
if (buffer_size <= 0) {
buffer_size = 16;
}
// Create the RLC instance the first time init() is called.
// If called to reestablished, the entity is stopped but not destroyed
// Next call to init() must use same mode
if (rlc == NULL) {
switch(mode_)
{
case RLC_MODE_TM:
rlc = new rlc_tm((uint32_t) buffer_size);
break;
case RLC_MODE_UM:
rlc = new rlc_um((uint32_t) buffer_size);
break;
case RLC_MODE_AM:
rlc = new rlc_am((uint32_t) buffer_size);
break;
default:
rlc_entity_log_->error("Invalid RLC mode - defaulting to TM\n");
rlc = new rlc_tm((uint32_t) buffer_size);
break;
}
lcid = lcid_;
mode = mode_;
} else {
if (lcid != lcid_) {
rlc_entity_log_->warning("Reestablishing RLC instance. LCID changed from %d to %d\n", lcid, lcid_);
lcid = lcid_;
}
if (mode != mode_) {
rlc_entity_log_->console("Error reestablishing RLC instance. Mode changed from %d to %d. \n", mode, mode_);
}
}
rlc->init(rlc_entity_log_, lcid_, pdcp_, rrc_, mac_timers_);
}
@ -70,19 +85,16 @@ void rlc_entity::configure(srslte_rlc_config_t cnfg)
rlc->configure(cnfg);
}
// Reestablishment stops the entity but does not destroy it. Mode will not change
void rlc_entity::reestablish() {
rlc->reset();
}
void rlc_entity::reset()
{
rlc->reset();
rlc = NULL;
rlc->stop();
}
// A call to stop() stops the entity and clears deletes the instance. Next time this entity can be used for other mode.
void rlc_entity::stop()
{
rlc->stop();
delete rlc;
rlc = NULL;
}
@ -119,6 +131,12 @@ void rlc_entity::write_sdu(byte_buffer_t *sdu)
rlc->write_sdu(sdu);
}
void rlc_entity::write_sdu_nb(byte_buffer_t *sdu)
{
if(rlc)
rlc->write_sdu_nb(sdu);
}
// MAC interface
uint32_t rlc_entity::get_buffer_state()
{

View File

@ -29,7 +29,7 @@
namespace srslte {
rlc_tm::rlc_tm() : ul_queue(16)
rlc_tm::rlc_tm(uint32_t queue_len) : ul_queue(queue_len)
{
log = NULL;
pdcp = NULL;
@ -38,6 +38,11 @@ rlc_tm::rlc_tm() : ul_queue(16)
pool = byte_buffer_pool::get_instance();
}
// Warning: must call stop() to properly deallocate all buffers
rlc_tm::~rlc_tm() {
pool = NULL;
}
void rlc_tm::init(srslte::log *log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
@ -48,6 +53,7 @@ void rlc_tm::init(srslte::log *log_,
lcid = lcid_;
pdcp = pdcp_;
rrc = rrc_;
tx_enabled = true;
}
void rlc_tm::configure(srslte_rlc_config_t cnfg)
@ -64,14 +70,10 @@ void rlc_tm::empty_queue()
}
}
void rlc_tm::reset()
{
empty_queue();
}
void rlc_tm::stop()
{
reset();
tx_enabled = false;
empty_queue();
}
rlc_mode_t rlc_tm::get_mode()
@ -87,11 +89,37 @@ uint32_t rlc_tm::get_bearer()
// PDCP interface
void rlc_tm::write_sdu(byte_buffer_t *sdu)
{
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, before: queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
ul_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, after: queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
ul_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
void rlc_tm::write_sdu_nb(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
if (ul_queue.try_write(sdu)) {
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU, queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
} else {
log->warning_hex(sdu->msg, sdu->N_bytes, "[Dropped SDU] %s Tx SDU, queue size=%d, bytes=%d",
rrc->get_rb_name(lcid).c_str(), ul_queue.size(), ul_queue.size_bytes());
pool->deallocate(sdu);
}
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
// MAC interface

View File

@ -33,7 +33,7 @@
namespace srslte {
rlc_um::rlc_um() : tx_sdu_queue(32)
rlc_um::rlc_um(uint32_t queue_len) : tx_sdu_queue(queue_len)
{
log = NULL;
pdcp = NULL;
@ -62,10 +62,13 @@ rlc_um::rlc_um() : tx_sdu_queue(32)
pdu_lost = false;
}
// Warning: must call stop() to properly deallocate all buffers
rlc_um::~rlc_um()
{
stop();
pthread_mutex_destroy(&mutex);
pool = NULL;
}
void rlc_um::init(srslte::log *log_,
uint32_t lcid_,
srsue::pdcp_interface_rlc *pdcp_,
@ -79,6 +82,7 @@ void rlc_um::init(srslte::log *log_,
mac_timers = mac_timers_;
reordering_timer_id = mac_timers->timer_get_unique_id();
reordering_timer = mac_timers->timer_get(reordering_timer_id);
tx_enabled = true;
}
void rlc_um::configure(srslte_rlc_config_t cnfg_)
@ -115,8 +119,7 @@ void rlc_um::configure(srslte_rlc_config_t cnfg_)
void rlc_um::empty_queue() {
// Drop all messages in TX SDU queue
byte_buffer_t *buf;
while(tx_sdu_queue.size() > 0) {
tx_sdu_queue.read(&buf);
while(tx_sdu_queue.try_read(&buf)) {
pool->deallocate(buf);
}
}
@ -128,16 +131,8 @@ bool rlc_um::is_mrb()
void rlc_um::stop()
{
reset();
if (mac_timers && reordering_timer) {
mac_timers->timer_release_id(reordering_timer_id);
reordering_timer = NULL;
}
}
void rlc_um::reset()
{
// Empty tx_sdu_queue before locking the mutex
// Empty tx_sdu_queue before locking the mutex
tx_enabled = false;
empty_queue();
pthread_mutex_lock(&mutex);
@ -159,7 +154,7 @@ void rlc_um::reset()
if(reordering_timer) {
reordering_timer->stop();
}
// Drop all messages in RX window
std::map<uint32_t, rlc_umd_pdu_t>::iterator it;
for(it = rx_window.begin(); it != rx_window.end(); it++) {
@ -167,6 +162,11 @@ void rlc_um::reset()
}
rx_window.clear();
pthread_mutex_unlock(&mutex);
if (mac_timers && reordering_timer) {
mac_timers->timer_release_id(reordering_timer_id);
reordering_timer = NULL;
}
}
rlc_mode_t rlc_um::get_mode()
@ -182,11 +182,36 @@ uint32_t rlc_um::get_bearer()
/****************************************************************************
* PDCP interface
***************************************************************************/
void rlc_um::write_sdu(byte_buffer_t *sdu)
{
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B ,tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
tx_sdu_queue.write(sdu);
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B ,tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
void rlc_um::write_sdu_nb(byte_buffer_t *sdu)
{
if (!tx_enabled) {
byte_buffer_pool::get_instance()->deallocate(sdu);
return;
}
if (sdu) {
if (tx_sdu_queue.try_write(sdu)) {
log->info_hex(sdu->msg, sdu->N_bytes, "%s Tx SDU (%d B ,tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
} else {
log->warning_hex(sdu->msg, sdu->N_bytes, "[Dropped SDU] %s Tx SDU (%d B ,tx_sdu_queue_len=%d)", rrc->get_rb_name(lcid).c_str(), sdu->N_bytes, tx_sdu_queue.size());
pool->deallocate(sdu);
}
} else {
log->warning("NULL SDU pointer in write_sdu()\n");
}
}
/****************************************************************************

View File

@ -1408,7 +1408,7 @@ void reset_test()
pdu_bufs.N_bytes = len;
// reset RLC1
rlc1.reset();
rlc1.stop();
// read another PDU segment from RLC1
len = rlc1.read_pdu(pdu_bufs.msg, 4);

View File

@ -124,7 +124,8 @@ private:
static const int MAC_PDU_THREAD_PRIO = 60;
// We use a rwlock in MAC to allow multiple workers to access MAC simultaneously. No conflicts will happen since access for different TTIs
pthread_rwlock_t rwlock;
// Interaction with PHY
phy_interface_mac *phy_h;

View File

@ -36,8 +36,15 @@
#include <pthread.h>
namespace srsenb {
/* Caution: User addition (ue_cfg) and removal (ue_rem) are not thread-safe
* Rest of operations are thread-safe
*
* The subclass sched_ue is thread-safe so that access to shared variables like buffer states
* from scheduler thread and other threads is protected for each individual user.
*/
class sched : public sched_interface
{
@ -89,7 +96,7 @@ public:
void set_sched_cfg(sched_args_t *sched_cfg);
int reset();
int ue_cfg(uint16_t rnti, ue_cfg_t *ue_cfg);
int ue_cfg(uint16_t rnti, ue_cfg_t *ue_cfg);
int ue_rem(uint16_t rnti);
bool ue_exists(uint16_t rnti);
@ -216,10 +223,7 @@ private:
uint32_t current_cfi;
bool configured;
pthread_mutex_t mutex, mutex2;
};

View File

@ -35,6 +35,12 @@
namespace srsenb {
/** This class is designed to be thread-safe because it is called from workers through scheduler thread and from
* higher layers and mac threads.
*
* 1 mutex is created for every user and only access to same user variables are mutexed
*/
class sched_ue {
public:
@ -56,6 +62,7 @@ public:
*
************************************************************/
sched_ue();
~sched_ue();
void reset();
void phy_config_enabled(uint32_t tti, bool enabled);
void set_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg, sched_interface::cell_cfg_t *cell_cfg,
@ -101,6 +108,7 @@ public:
uint32_t get_pending_dl_new_data(uint32_t tti);
uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_ul_old_data();
uint32_t get_pending_dl_new_data_total(uint32_t tti);
dl_harq_proc *get_pending_dl_harq(uint32_t tti);
@ -129,8 +137,6 @@ public:
bool get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2]);
bool pucch_sr_collision(uint32_t current_tti, uint32_t n_cce);
uint32_t get_pending_ul_old_data();
private:
typedef struct {
@ -152,11 +158,21 @@ private:
static bool bearer_is_ul(ue_bearer_t *lch);
static bool bearer_is_dl(ue_bearer_t *lch);
uint32_t get_pending_dl_new_data_unlocked(uint32_t tti);
uint32_t get_pending_ul_old_data_unlocked();
uint32_t get_pending_ul_new_data_unlocked(uint32_t tti);
bool needs_cqi_unlocked(uint32_t tti, bool will_send = false);
int generate_format2a_unlocked(dl_harq_proc *h, sched_interface::dl_sched_data_t *data, uint32_t tti, uint32_t cfi);
bool is_first_dl_tx();
sched_interface::ue_cfg_t cfg;
srslte_cell_t cell;
srslte::log* log_h;
pthread_mutex_t mutex;
/* Buffer states */
bool sr;

View File

@ -103,9 +103,13 @@ private:
user_interface_gtpu gtpu_itf;
user_interface_rrc rrc_itf;
srslte::pdcp *pdcp;
};
};
void clear_user(user_interface *ue);
std::map<uint32_t,user_interface> users;
std::map<uint32_t,user_interface> users;
pthread_rwlock_t rwlock;
rlc_interface_pdcp *rlc;
rrc_interface_pdcp *rrc;

View File

@ -52,7 +52,6 @@ public:
void stop();
// rlc_interface_rrc
void reset(uint16_t rnti);
void clear_buffer(uint16_t rnti);
void add_user(uint16_t rnti);
void rem_user(uint16_t rnti);
@ -91,8 +90,14 @@ private:
srsenb::rrc_interface_rlc *rrc;
srslte::rlc *rlc;
srsenb::rlc *parent;
};
};
void clear_user(user_interface *ue);
const static int RLC_TX_QUEUE_LEN = 512;
pthread_rwlock_t rwlock;
std::map<uint32_t,user_interface> users;
std::vector<mch_service_t> mch_services;

View File

@ -123,7 +123,6 @@ public:
bzero(&cqi_sched, sizeof(cqi_sched));
bzero(&cfg, sizeof(cfg));
bzero(&sib2, sizeof(sib2));
bzero(&user_mutex, sizeof(user_mutex));
bzero(&paging_mutex, sizeof(paging_mutex));
}
@ -350,6 +349,7 @@ private:
srslte::byte_buffer_t* pdu;
}rrc_pdu;
const static uint32_t LCID_EXIT = 0xffff0000;
const static uint32_t LCID_REM_USER = 0xffff0001;
const static uint32_t LCID_REL_USER = 0xffff0002;
const static uint32_t LCID_RLF_USER = 0xffff0003;

View File

@ -96,96 +96,112 @@ bool mac::init(mac_args_t *args_, srslte_cell_t *cell_, phy_interface_mac *phy,
reset();
started = true;
pthread_rwlock_init(&rwlock, NULL);
started = true;
}
return started;
return started;
}
void mac::stop()
{
pthread_rwlock_wrlock(&rwlock);
for (uint32_t i=0;i<ue_db.size();i++) {
delete ue_db[i];
}
for (int i=0;i<NOF_BCCH_DLSCH_MSG;i++) {
srslte_softbuffer_tx_free(&bcch_softbuffer_tx[i]);
}
}
srslte_softbuffer_tx_free(&pcch_softbuffer_tx);
srslte_softbuffer_tx_free(&rar_softbuffer_tx);
started = false;
started = false;
timers_thread.stop();
pdu_process_thread.stop();
pthread_rwlock_unlock(&rwlock);
pthread_rwlock_destroy(&rwlock);
}
// Implement Section 5.9
void mac::reset()
{
Info("Resetting MAC\n");
timers_db.stop_all();
tti = 0;
last_rnti = 70;
tti = 0;
last_rnti = 70;
/* Setup scheduler */
scheduler.reset();
}
void mac::start_pcap(srslte::mac_pcap* pcap_)
{
pcap = pcap_;
// Set pcap in all UEs for UL messages
pcap = pcap_;
// Set pcap in all UEs for UL messages
for(std::map<uint16_t, ue*>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
ue *u = iter->second;
u->start_pcap(pcap);
}
}
}
/********************************************************
*
* RLC interface
* RLC interface
*
*******************************************************/
int mac::rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
pthread_rwlock_rdlock(&rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
return scheduler.dl_rlc_buffer_state(rnti, lc_id, tx_queue, retx_queue);
ret = scheduler.dl_rlc_buffer_state(rnti, lc_id, tx_queue, retx_queue);
} else {
for(uint32_t i = 0; i < mch.num_mtch_sched; i++){
if(lc_id == mch.mtch_sched[i].lcid){
mch.mtch_sched[i].lcid_buffer_size = tx_queue;
}
}
return 0;
ret = 0;
}
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg)
{
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
// configure BSR group in UE
ue_db[rnti]->set_lcg(lc_id, (uint32_t) cfg->group);
return scheduler.bearer_ue_cfg(rnti, lc_id, cfg);
ret = scheduler.bearer_ue_cfg(rnti, lc_id, cfg);
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
{
if (ue_db.count(rnti)) {
return scheduler.bearer_ue_rem(rnti, lc_id);
pthread_rwlock_rdlock(&rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
ret = scheduler.bearer_ue_rem(rnti, lc_id);
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
void mac::phy_config_enabled(uint16_t rnti, bool enabled)
@ -193,58 +209,74 @@ void mac::phy_config_enabled(uint16_t rnti, bool enabled)
scheduler.phy_config_enabled(rnti, enabled);
}
// Update UE configuration
// Update UE configuration
int mac::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t* cfg)
{
if (ue_db.count(rnti)) {
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
// Add RNTI to the PHY (pregerate signals) now instead of after PRACH
if (!ue_db[rnti]->is_phy_added) {
ue_db[rnti]->is_phy_added = true;
ue_db[rnti]->is_phy_added = true;
Info("Registering rnti=0x%x to PHY...\n", rnti);
// Register new user in PHY
if (phy_h->add_rnti(rnti)) {
Error("Registering new ue rnti=0x%x to PHY\n", rnti);
Error("Registering new ue rnti=0x%x to PHY\n", rnti);
}
Info("Done registering rnti=0x%x to PHY...\n", rnti);
}
// Update Scheduler configuration
// Update Scheduler configuration
if (scheduler.ue_cfg(rnti, cfg)) {
Error("Registering new UE rnti=0x%x to SCHED\n", rnti);
return -1;
}
return 0;
} else {
ret = 0;
}
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
// Removes UE from DB
int mac::ue_rem(uint16_t rnti)
{
if (ue_db.count(rnti)) {
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.ue_rem(rnti);
phy_h->rem_rnti(rnti);
delete ue_db[rnti];
ue_db.erase(rnti);
Info("User rnti=0x%x removed from MAC/PHY\n", rnti);
return 0;
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
if (ret) {
return ret;
}
pthread_rwlock_wrlock(&rwlock);
if (ue_db.count(rnti)) {
delete ue_db[rnti];
ue_db.erase(rnti);
Info("User rnti=0x%x removed from MAC/PHY\n", rnti);
} else {
Error("User rnti=0x%x already removed\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
return 0;
}
int mac::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
{
memcpy(&this->cell_config, cell_cfg, sizeof(sched_interface::cell_cfg_t));
return scheduler.cell_cfg(cell_cfg);
return scheduler.cell_cfg(cell_cfg);
}
void mac::get_metrics(mac_metrics_t metrics[ENB_METRICS_MAX_USERS])
{
pthread_rwlock_rdlock(&rwlock);
int cnt=0;
for(std::map<uint16_t, ue*>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
ue *u = iter->second;
@ -252,21 +284,23 @@ void mac::get_metrics(mac_metrics_t metrics[ENB_METRICS_MAX_USERS])
u->metrics_read(&metrics[cnt]);
cnt++;
}
}
}
pthread_rwlock_unlock(&rwlock);
}
/********************************************************
*
* PHY interface
* PHY interface
*
*******************************************************/
void mac::rl_failure(uint16_t rnti)
{
if (ue_db.count(rnti)) {
uint32_t nof_fails = ue_db[rnti]->rl_failure();
if (nof_fails >= (uint32_t) args.link_failure_nof_err && args.link_failure_nof_err > 0) {
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
uint32_t nof_fails = ue_db[rnti]->rl_failure();
if (nof_fails >= (uint32_t) args.link_failure_nof_err && args.link_failure_nof_err > 0) {
Info("Detected Uplink failure for rnti=0x%x\n", rnti);
rrc_h->rl_failure(rnti);
ue_db[rnti]->rl_failure_reset();
@ -274,141 +308,166 @@ void mac::rl_failure(uint16_t rnti)
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void mac::rl_ok(uint16_t rnti)
{
if (ue_db.count(rnti)) {
ue_db[rnti]->rl_failure_reset();
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
ue_db[rnti]->rl_failure_reset();
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
int mac::ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
{
pthread_rwlock_rdlock(&rwlock);
log_h->step(tti);
uint32_t nof_bytes = scheduler.dl_ack_info(tti, rnti, tb_idx, ack);
ue_db[rnti]->metrics_tx(ack, nof_bytes);
if (ack) {
if (nof_bytes > 64) { // do not count RLC status messages only
rrc_h->set_activity_user(rnti);
rrc_h->set_activity_user(rnti);
log_h->info("DL activity rnti=0x%x, n_bytes=%d\n", rnti, nof_bytes);
}
}
return 0;
pthread_rwlock_unlock(&rwlock);
return 0;
}
int mac::crc_info(uint32_t tti, uint16_t rnti, uint32_t nof_bytes, bool crc)
{
log_h->step(tti);
if (ue_db.count(rnti)) {
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
ue_db[rnti]->set_tti(tti);
ue_db[rnti]->metrics_rx(crc, nof_bytes);
// push the pdu through the queue if received correctly
if (crc) {
ue_db[rnti]->push_pdu(tti, nof_bytes);
pdu_process_thread.notify();
pdu_process_thread.notify();
} else {
ue_db[rnti]->deallocate_pdu(tti);
}
return scheduler.ul_crc_info(tti, rnti, crc);
ret = scheduler.ul_crc_info(tti, rnti, crc);
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::set_dl_ant_info(uint16_t rnti, LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *dl_ant_info) {
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_ant_info(rnti, dl_ant_info);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::ri_info(uint32_t tti, uint16_t rnti, uint32_t ri_value)
{
log_h->step(tti);
if (ue_db.count(rnti)) {
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_ri_info(tti, rnti, ri_value);
ue_db[rnti]->metrics_dl_ri(ri_value);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
{
log_h->step(tti);
pthread_rwlock_rdlock(&rwlock);
int ret = -1;
if (ue_db.count(rnti)) {
scheduler.dl_pmi_info(tti, rnti, pmi_value);
ue_db[rnti]->metrics_dl_pmi(pmi_value);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.dl_cqi_info(tti, rnti, cqi_value);
ue_db[rnti]->metrics_dl_cqi(cqi_value);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::snr_info(uint32_t tti, uint16_t rnti, float snr)
{
log_h->step(tti);
if (ue_db.count(rnti)) {
uint32_t cqi = srslte_cqi_from_snr(snr);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
uint32_t cqi = srslte_cqi_from_snr(snr);
scheduler.ul_cqi_info(tti, rnti, cqi, 0);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::sr_detected(uint32_t tti, uint16_t rnti)
{
log_h->step(tti);
if (ue_db.count(rnti)) {
scheduler.ul_sr_info(tti, rnti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
if (ue_db.count(rnti)) {
scheduler.ul_sr_info(tti, rnti);
ret = 0;
} else {
Error("User rnti=0x%x not found\n", rnti);
return -1;
}
return 0;
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::rach_detected(uint32_t tti, uint32_t preamble_idx, uint32_t time_adv)
{
log_h->step(tti);
int ret = -1;
pthread_rwlock_rdlock(&rwlock);
// Find empty slot for pending rars
uint32_t ra_id=0;
while(pending_rars[ra_id].temp_crnti && ra_id<MAX_PENDING_RARS-1) {
@ -416,49 +475,53 @@ int mac::rach_detected(uint32_t tti, uint32_t preamble_idx, uint32_t time_adv)
}
if (ra_id == MAX_PENDING_RARS) {
Error("Maximum number of pending RARs exceeded (%d)\n", MAX_PENDING_RARS);
return -1;
goto unlock;
}
// Create new UE
ue_db[last_rnti] = new ue;
// Create new UE
ue_db[last_rnti] = new ue;
ue_db[last_rnti]->config(last_rnti, cell.nof_prb, &scheduler, rrc_h, rlc_h, log_h);
// Set PCAP if available
// Set PCAP if available
if (pcap) {
ue_db[last_rnti]->start_pcap(pcap);
}
// Save RA info
pending_rars[ra_id].preamble_idx = preamble_idx;
pending_rars[ra_id].preamble_idx = preamble_idx;
pending_rars[ra_id].ta_cmd = 2*time_adv;
pending_rars[ra_id].temp_crnti = last_rnti;
pending_rars[ra_id].temp_crnti = last_rnti;
// Add new user to the scheduler so that it can RX/TX SRB0
sched_interface::ue_cfg_t uecfg;
sched_interface::ue_cfg_t uecfg;
bzero(&uecfg, sizeof(sched_interface::ue_cfg_t));
uecfg.ue_bearers[0].direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH;
uecfg.ue_bearers[0].direction = srsenb::sched_interface::ue_bearer_cfg_t::BOTH;
if (scheduler.ue_cfg(last_rnti, &uecfg)) {
// Release pending RAR
bzero(&pending_rars[ra_id], sizeof(pending_rar_t));
Error("Registering new user rnti=0x%x to SCHED\n", last_rnti);
return -1;
goto unlock;
}
// Register new user in RRC
// Register new user in RRC
rrc_h->add_user(last_rnti);
// Trigger scheduler RACH
scheduler.dl_rach_info(tti, ra_id, last_rnti, 7);
log_h->info("RACH: tti=%d, preamble=%d, offset=%d, temp_crnti=0x%x\n",
tti, preamble_idx, time_adv, last_rnti);
log_h->console("RACH: tti=%d, preamble=%d, offset=%d, temp_crnti=0x%x\n",
tti, preamble_idx, time_adv, last_rnti);
// Increae RNTI counter
// Trigger scheduler RACH
scheduler.dl_rach_info(tti, ra_id, last_rnti, 7);
log_h->info("RACH: tti=%d, preamble=%d, offset=%d, temp_crnti=0x%x\n",
tti, preamble_idx, time_adv, last_rnti);
log_h->console("RACH: tti=%d, preamble=%d, offset=%d, temp_crnti=0x%x\n",
tti, preamble_idx, time_adv, last_rnti);
// Increae RNTI counter
last_rnti++;
if (last_rnti >= 60000) {
last_rnti = 70;
last_rnti = 70;
}
return 0;
ret = 0;
unlock:
pthread_rwlock_unlock(&rwlock);
return ret;
}
int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
@ -468,76 +531,85 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
if (!started) {
return 0;
}
if (!dl_sched_res) {
return SRSLTE_ERROR_INVALID_INPUTS;
return SRSLTE_ERROR_INVALID_INPUTS;
}
// Run scheduler with current info
sched_interface::dl_sched_res_t sched_result;
// Run scheduler with current info
sched_interface::dl_sched_res_t sched_result;
bzero(&sched_result, sizeof(sched_interface::dl_sched_res_t));
if (scheduler.dl_sched(tti, &sched_result) < 0) {
Error("Running scheduler\n");
return SRSLTE_ERROR;
}
int n = 0;
// Copy data grants
int n = 0;
pthread_rwlock_rdlock(&rwlock);
// Copy data grants
for (uint32_t i=0;i<sched_result.nof_data_elems;i++) {
// Get UE
// Get UE
uint16_t rnti = sched_result.data[i].rnti;
// Copy grant info
dl_sched_res->sched_grants[n].rnti = rnti;
dl_sched_res->sched_grants[n].dci_format = sched_result.data[i].dci_format;
memcpy(&dl_sched_res->sched_grants[n].grant, &sched_result.data[i].dci, sizeof(srslte_ra_dl_dci_t));
memcpy(&dl_sched_res->sched_grants[n].location, &sched_result.data[i].dci_location, sizeof(srslte_dci_location_t));
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
dl_sched_res->sched_grants[n].softbuffers[tb] =
ue_db[rnti]->get_tx_softbuffer(sched_result.data[i].dci.harq_process, tb);
if (ue_db.count(rnti)) {
// Copy grant info
dl_sched_res->sched_grants[n].rnti = rnti;
dl_sched_res->sched_grants[n].dci_format = sched_result.data[i].dci_format;
memcpy(&dl_sched_res->sched_grants[n].grant, &sched_result.data[i].dci, sizeof(srslte_ra_dl_dci_t));
memcpy(&dl_sched_res->sched_grants[n].location, &sched_result.data[i].dci_location, sizeof(srslte_dci_location_t));
if (sched_result.data[i].nof_pdu_elems[tb] > 0) {
/* Get PDU if it's a new transmission */
dl_sched_res->sched_grants[n].data[tb] = ue_db[rnti]->generate_pdu(tb,
sched_result.data[i].pdu[tb],
sched_result.data[i].nof_pdu_elems[tb],
sched_result.data[i].tbs[tb]);
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
dl_sched_res->sched_grants[n].softbuffers[tb] =
ue_db[rnti]->get_tx_softbuffer(sched_result.data[i].dci.harq_process, tb);
if (!dl_sched_res->sched_grants[n].data[tb]) {
Error("Error! PDU was not generated (rnti=0x%04x, tb=%d)\n", rnti, tb);
sched_result.data[i].dci.tb_en[tb] = false;
if (sched_result.data[i].nof_pdu_elems[tb] > 0) {
/* Get PDU if it's a new transmission */
dl_sched_res->sched_grants[n].data[tb] = ue_db[rnti]->generate_pdu(tb,
sched_result.data[i].pdu[tb],
sched_result.data[i].nof_pdu_elems[tb],
sched_result.data[i].tbs[tb]);
if (!dl_sched_res->sched_grants[n].data[tb]) {
Error("Error! PDU was not generated (rnti=0x%04x, tb=%d)\n", rnti, tb);
sched_result.data[i].dci.tb_en[tb] = false;
}
if (pcap) {
pcap->write_dl_crnti(dl_sched_res->sched_grants[n].data[tb], sched_result.data[i].tbs[tb], rnti, true, tti);
}
} else {
/* TB not enabled OR no data to send: set pointers to NULL */
dl_sched_res->sched_grants[n].data[tb] = NULL;
}
if (pcap) {
pcap->write_dl_crnti(dl_sched_res->sched_grants[n].data[tb], sched_result.data[i].tbs[tb], rnti, true, tti);
}
} else {
/* TB not enabled OR no data to send: set pointers to NULL */
dl_sched_res->sched_grants[n].data[tb] = NULL;
}
n++;
} else {
Warning("Invalid DL scheduling result. User 0x%x does not exist\n", rnti);
}
n++;
}
// Copy RAR grants
// No more uses of shared ue_db beyond here
pthread_rwlock_unlock(&rwlock);
// Copy RAR grants
for (uint32_t i=0;i<sched_result.nof_rar_elems;i++) {
// Copy grant info
// Copy grant info
dl_sched_res->sched_grants[n].rnti = sched_result.rar[i].rarnti;
dl_sched_res->sched_grants[n].dci_format = SRSLTE_DCI_FORMAT1A; // Force Format 1A
memcpy(&dl_sched_res->sched_grants[n].grant, &sched_result.rar[i].dci, sizeof(srslte_ra_dl_dci_t));
memcpy(&dl_sched_res->sched_grants[n].location, &sched_result.rar[i].dci_location, sizeof(srslte_dci_location_t));
memcpy(&dl_sched_res->sched_grants[n].location, &sched_result.rar[i].dci_location, sizeof(srslte_dci_location_t));
// Set softbuffer (there are no retx in RAR but a softbuffer is required)
dl_sched_res->sched_grants[n].softbuffers[0] = &rar_softbuffer_tx;
// Assemble PDU
// Assemble PDU
dl_sched_res->sched_grants[n].data[0] = assemble_rar(sched_result.rar[i].grants, sched_result.rar[i].nof_grants, i, sched_result.rar[i].tbs);
if (pcap) {
pcap->write_dl_ranti(dl_sched_res->sched_grants[n].data[0], sched_result.rar[i].tbs, dl_sched_res->sched_grants[n].rnti, true, tti);
}
@ -545,15 +617,15 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
n++;
}
// Copy SI and Paging grants
// Copy SI and Paging grants
for (uint32_t i=0;i<sched_result.nof_bc_elems;i++) {
// Copy grant info
// Copy grant info
dl_sched_res->sched_grants[n].rnti = (sched_result.bc[i].type == sched_interface::dl_sched_bc_t::BCCH ) ? SRSLTE_SIRNTI : SRSLTE_PRNTI;
dl_sched_res->sched_grants[n].dci_format = SRSLTE_DCI_FORMAT1A; // Force Format 1A
memcpy(&dl_sched_res->sched_grants[n].grant, &sched_result.bc[i].dci, sizeof(srslte_ra_dl_dci_t));
memcpy(&dl_sched_res->sched_grants[n].location, &sched_result.bc[i].dci_location, sizeof(srslte_dci_location_t));
// Set softbuffer
memcpy(&dl_sched_res->sched_grants[n].location, &sched_result.bc[i].dci_location, sizeof(srslte_dci_location_t));
// Set softbuffer
if (sched_result.bc[i].type == sched_interface::dl_sched_bc_t::BCCH) {
dl_sched_res->sched_grants[n].softbuffers[0] = &bcch_softbuffer_tx[sched_result.bc[i].index];
dl_sched_res->sched_grants[n].data[0] = assemble_si(sched_result.bc[i].index);
@ -566,20 +638,20 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
dl_sched_res->sched_grants[n].softbuffers[0] = &pcch_softbuffer_tx;
dl_sched_res->sched_grants[n].data[0] = pcch_payload_buffer;
rlc_h->read_pdu_pcch(pcch_payload_buffer, pcch_payload_buffer_len);
if (pcap) {
pcap->write_dl_pch(dl_sched_res->sched_grants[n].data[0], sched_result.bc[i].tbs, true, tti);
}
}
n++;
}
dl_sched_res->nof_grants = n;
// Number of CCH symbols
dl_sched_res->cfi = sched_result.cfi;
dl_sched_res->nof_grants = n;
// Number of CCH symbols
dl_sched_res->cfi = sched_result.cfi;
return SRSLTE_SUCCESS;
}
@ -587,12 +659,12 @@ void mac::build_mch_sched(uint32_t tbs)
{
int sfs_per_sched_period = mcch.pmch_infolist_r9[0].pmch_config_r9.sf_alloc_end_r9;
int bytes_per_sf = tbs/8 - 6;// leave 6 bytes for header
int total_space_avail_bytes = sfs_per_sched_period*bytes_per_sf;
int total_bytes_to_tx = 0;
int total_bytes_to_tx = 0;
// calculate total bytes to be scheduled
for (uint32_t i = 0; i < mch.num_mtch_sched; i++) {
total_bytes_to_tx += mch.mtch_sched[i].lcid_buffer_size;
@ -600,34 +672,34 @@ void mac::build_mch_sched(uint32_t tbs)
}
int last_mtch_stop = 0;
if(total_bytes_to_tx >= total_space_avail_bytes){
for(uint32_t i = 0; i < mch.num_mtch_sched;i++){
double ratio = mch.mtch_sched[i].lcid_buffer_size/total_bytes_to_tx;
float assigned_sfs = floor(sfs_per_sched_period*ratio);
mch.mtch_sched[i].stop = last_mtch_stop + (uint32_t)assigned_sfs;
last_mtch_stop = mch.mtch_sched[i].stop;
}
}
}else {
for(uint32_t i = 0; i < mch.num_mtch_sched;i++){
float assigned_sfs = ceil(((float)mch.mtch_sched[i].lcid_buffer_size)/((float)bytes_per_sf));
mch.mtch_sched[i].stop = last_mtch_stop + (uint32_t)assigned_sfs;
last_mtch_stop = mch.mtch_sched[i].stop;
last_mtch_stop = mch.mtch_sched[i].stop;
}
}
}
int mac::get_mch_sched(bool is_mcch, dl_sched_t *dl_sched_res)
{
srslte_ra_mcs_t mcs;
srslte_ra_mcs_t mcs_data;
mcs.idx = this->sib13.mbsfn_area_info_list_r9[0].signalling_mcs_r9;
mcs_data.idx = this->mcch.pmch_infolist_r9[0].pmch_config_r9.datamcs_r9;
srslte_dl_fill_ra_mcs(&mcs, this->cell_config.cell.nof_prb);
srslte_dl_fill_ra_mcs(&mcs_data, this->cell_config.cell.nof_prb);
if(is_mcch){
if(is_mcch){
build_mch_sched(mcs_data.tbs);
mch.mcch_payload = mcch_payload_buffer;
mch.current_sf_allocation_num = 1;
@ -636,17 +708,17 @@ int mac::get_mch_sched(bool is_mcch, dl_sched_t *dl_sched_res)
mch.pdu[i].lcid = srslte::sch_subh::MCH_SCHED_INFO;
// mch.mtch_sched[i].lcid = 1+i;
}
mch.pdu[mch.num_mtch_sched].lcid = 0;
mch.pdu[mch.num_mtch_sched].nbytes = current_mcch_length;
dl_sched_res->sched_grants[0].rnti = SRSLTE_MRNTI;
dl_sched_res->sched_grants[0].data[0] = ue_db[SRSLTE_MRNTI]->generate_mch_pdu(mch, mch.num_mtch_sched + 1, mcs.tbs/8);
} else {
uint32_t current_lcid = 1;
uint32_t mtch_index = 0;
uint32_t mtch_stop = mch.mtch_sched[mch.num_mtch_sched -1].stop;
for(uint32_t i = 0;i < mch.num_mtch_sched;i++) {
if(mch.current_sf_allocation_num <= mch.mtch_sched[i].stop){
current_lcid = mch.mtch_sched[i].lcid;
@ -669,9 +741,9 @@ int mac::get_mch_sched(bool is_mcch, dl_sched_t *dl_sched_res)
}
mch.current_sf_allocation_num++;
}
return SRSLTE_SUCCESS;
}
}
uint8_t* mac::assemble_rar(sched_interface::dl_sched_rar_grant_t* grants, uint32_t nof_grants, int rar_idx, uint32_t pdu_len)
{
@ -684,10 +756,10 @@ uint8_t* mac::assemble_rar(sched_interface::dl_sched_rar_grant_t* grants, uint32
if (pdu->new_subh()) {
/* Search pending RAR */
int idx = grants[i].ra_id;
pdu->get()->set_rapid(pending_rars[idx].preamble_idx);
pdu->get()->set_rapid(pending_rars[idx].preamble_idx);
pdu->get()->set_ta_cmd(pending_rars[idx].ta_cmd);
pdu->get()->set_temp_crnti(pending_rars[idx].temp_crnti);
pdu->get()->set_sched_grant(grant_buffer);
pdu->get()->set_temp_crnti(pending_rars[idx].temp_crnti);
pdu->get()->set_sched_grant(grant_buffer);
bzero(&pending_rars[idx], sizeof(pending_rar_t));
}
}
@ -695,76 +767,85 @@ uint8_t* mac::assemble_rar(sched_interface::dl_sched_rar_grant_t* grants, uint32
return rar_payload[rar_idx];
} else {
Error("Assembling RAR: pdu_len > rar_payload_len (%d>%d)\n", pdu_len, rar_payload_len);
return NULL;
return NULL;
}
}
uint8_t* mac::assemble_si(uint32_t index)
{
{
rlc_h->read_pdu_bcch_dlsch(index, bcch_dlsch_payload);
return bcch_dlsch_payload;
}
int mac::get_ul_sched(uint32_t tti, ul_sched_t *ul_sched_res)
int mac::get_ul_sched(uint32_t tti, ul_sched_t *ul_sched_res)
{
log_h->step(tti);
if (!started) {
return 0;
return 0;
}
if (!ul_sched_res) {
return SRSLTE_ERROR_INVALID_INPUTS;
return SRSLTE_ERROR_INVALID_INPUTS;
}
// Run scheduler with current info
sched_interface::ul_sched_res_t sched_result;
// Run scheduler with current info
sched_interface::ul_sched_res_t sched_result;
bzero(&sched_result, sizeof(sched_interface::ul_sched_res_t));
if (scheduler.ul_sched(tti, &sched_result)<0) {
Error("Running scheduler\n");
return SRSLTE_ERROR;
return SRSLTE_ERROR;
}
// Copy DCI grants
ul_sched_res->nof_grants = 0;
int n = 0;
pthread_rwlock_rdlock(&rwlock);
// Copy DCI grants
ul_sched_res->nof_grants = 0;
int n = 0;
for (uint32_t i=0;i<sched_result.nof_dci_elems;i++) {
if (sched_result.pusch[i].tbs > 0) {
// Get UE
// Get UE
uint16_t rnti = sched_result.pusch[i].rnti;
// Copy grant info
ul_sched_res->sched_grants[n].rnti = rnti;
ul_sched_res->sched_grants[n].current_tx_nb = sched_result.pusch[i].current_tx_nb;
ul_sched_res->sched_grants[n].needs_pdcch = sched_result.pusch[i].needs_pdcch;
memcpy(&ul_sched_res->sched_grants[n].grant, &sched_result.pusch[i].dci, sizeof(srslte_ra_ul_dci_t));
memcpy(&ul_sched_res->sched_grants[n].location, &sched_result.pusch[i].dci_location, sizeof(srslte_dci_location_t));
ul_sched_res->sched_grants[n].softbuffer = ue_db[rnti]->get_rx_softbuffer(tti);
if (sched_result.pusch[n].current_tx_nb == 0) {
srslte_softbuffer_rx_reset_tbs(ul_sched_res->sched_grants[n].softbuffer, sched_result.pusch[i].tbs*8);
if (ue_db.count(rnti)) {
// Copy grant info
ul_sched_res->sched_grants[n].rnti = rnti;
ul_sched_res->sched_grants[n].current_tx_nb = sched_result.pusch[i].current_tx_nb;
ul_sched_res->sched_grants[n].needs_pdcch = sched_result.pusch[i].needs_pdcch;
memcpy(&ul_sched_res->sched_grants[n].grant, &sched_result.pusch[i].dci, sizeof(srslte_ra_ul_dci_t));
memcpy(&ul_sched_res->sched_grants[n].location, &sched_result.pusch[i].dci_location, sizeof(srslte_dci_location_t));
ul_sched_res->sched_grants[n].softbuffer = ue_db[rnti]->get_rx_softbuffer(tti);
if (sched_result.pusch[n].current_tx_nb == 0) {
srslte_softbuffer_rx_reset_tbs(ul_sched_res->sched_grants[n].softbuffer, sched_result.pusch[i].tbs*8);
}
ul_sched_res->sched_grants[n].data = ue_db[rnti]->request_buffer(tti, sched_result.pusch[i].tbs);
ul_sched_res->nof_grants++;
n++;
} else {
Warning("Invalid DL scheduling result. User 0x%x does not exist\n", rnti);
}
ul_sched_res->sched_grants[n].data = ue_db[rnti]->request_buffer(tti, sched_result.pusch[i].tbs);
ul_sched_res->nof_grants++;
n++;
} else {
Warning("Grant %d for rnti=0x%x has zero TBS\n", i, sched_result.pusch[i].rnti);
}
}
// No more uses of ue_db beyond here
pthread_rwlock_unlock(&rwlock);
// Copy PHICH actions
for (uint32_t i=0;i<sched_result.nof_phich_elems;i++) {
ul_sched_res->phich[i].ack = sched_result.phich[i].phich == sched_interface::ul_sched_phich_t::ACK;
ul_sched_res->phich[i].rnti = sched_result.phich[i].rnti;
ul_sched_res->phich[i].rnti = sched_result.phich[i].rnti;
}
ul_sched_res->nof_phich = sched_result.nof_phich_elems;
return SRSLTE_SUCCESS;
return SRSLTE_SUCCESS;
}
void mac::tti_clock()
{
timers_thread.tti_clock();
@ -799,7 +880,7 @@ srslte::timers::timer* mac::timer_get(uint32_t timer_id)
*******************************************************/
void mac::timer_thread::run_thread()
{
running=true;
running=true;
ttisync.set_producer_cntr(0);
ttisync.resync();
while(running) {
@ -829,34 +910,34 @@ void mac::timer_thread::tti_clock()
*
*******************************************************/
mac::pdu_process::pdu_process(pdu_process_handler *h) : running(false) {
handler = h;
handler = h;
pthread_mutex_init(&mutex, NULL);
pthread_cond_init(&cvar, NULL);
have_data = false;
start(MAC_PDU_THREAD_PRIO);
have_data = false;
start(MAC_PDU_THREAD_PRIO);
}
void mac::pdu_process::stop()
{
pthread_mutex_lock(&mutex);
running = false;
running = false;
pthread_cond_signal(&cvar);
pthread_mutex_unlock(&mutex);
wait_thread_finish();
}
void mac::pdu_process::notify()
{
pthread_mutex_lock(&mutex);
have_data = true;
have_data = true;
pthread_cond_signal(&cvar);
pthread_mutex_unlock(&mutex);
}
void mac::pdu_process::run_thread()
{
running = true;
running = true;
while(running) {
have_data = handler->process_pdus();
if (!have_data) {
@ -871,13 +952,15 @@ void mac::pdu_process::run_thread()
bool mac::process_pdus()
{
bool ret = false;
pthread_rwlock_rdlock(&rwlock);
bool ret = false;
for(std::map<uint16_t, ue*>::iterator iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
ue *u = iter->second;
uint16_t rnti = iter->first;
ue *u = iter->second;
uint16_t rnti = iter->first;
ret = ret | u->process_pdus();
}
return ret;
pthread_rwlock_unlock(&rwlock);
return ret;
}

View File

@ -57,22 +57,16 @@ sched::sched() : bc_aggr_level(0), rar_aggr_level(0), avail_rbg(0), P(0), start_
bzero(&sched_cfg, sizeof(sched_cfg));
bzero(&common_locations, sizeof(common_locations));
bzero(&pdsch_re, sizeof(pdsch_re));
bzero(&mutex, sizeof(mutex));
for (int i = 0; i < 3; i++) {
bzero(rar_locations[i], sizeof(sched_ue::sched_dci_cce_t) * 10);
}
pthread_mutex_init(&mutex, NULL);
pthread_mutex_init(&mutex2, NULL);
reset();
}
sched::~sched()
{
srslte_regs_free(&regs);
pthread_mutex_destroy(&mutex);
pthread_mutex_destroy(&mutex2);
}
void sched::init(rrc_interface_mac *rrc_, srslte::log* log)
@ -118,8 +112,6 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
return -1;
}
pthread_mutex_lock(&mutex);
memcpy(&cfg, cell_cfg, sizeof(sched_interface::cell_cfg_t));
// Get DCI locations
@ -147,9 +139,7 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
}
configured = true;
pthread_mutex_unlock(&mutex);
return 0;
return 0;
}
@ -161,31 +151,24 @@ int sched::cell_cfg(sched_interface::cell_cfg_t* cell_cfg)
int sched::ue_cfg(uint16_t rnti, sched_interface::ue_cfg_t *ue_cfg)
{
pthread_mutex_lock(&mutex);
// Add or config user
// Add or config user
ue_db[rnti].set_cfg(rnti, ue_cfg, &cfg, &regs, log_h);
ue_db[rnti].set_max_mcs(sched_cfg.pusch_max_mcs, sched_cfg.pdsch_max_mcs);
ue_db[rnti].set_fixed_mcs(sched_cfg.pusch_mcs, sched_cfg.pdsch_mcs);
pthread_mutex_unlock(&mutex);
return 0;
return 0;
}
int sched::ue_rem(uint16_t rnti)
{
pthread_mutex_lock(&mutex);
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
if (ue_db.count(rnti)) {
ue_db.erase(rnti);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
bool sched::ue_exists(uint16_t rnti)
@ -195,33 +178,27 @@ bool sched::ue_exists(uint16_t rnti)
void sched::phy_config_enabled(uint16_t rnti, bool enabled)
{
pthread_mutex_lock(&mutex);
if (ue_db.count(rnti)) {
if (ue_db.count(rnti)) {
ue_db[rnti].phy_config_enabled(current_tti, enabled);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_mutex_unlock(&mutex);
}
int sched::bearer_ue_cfg(uint16_t rnti, uint32_t lc_id, sched_interface::ue_bearer_cfg_t *cfg)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_bearer_cfg(lc_id, cfg);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
{
pthread_mutex_lock(&mutex);
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].rem_bearer(lc_id);
@ -229,49 +206,33 @@ int sched::bearer_ue_rem(uint16_t rnti, uint32_t lc_id)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
uint32_t sched::get_dl_buffer(uint16_t rnti)
{
pthread_mutex_lock(&mutex);
uint32_t ret = 0;
uint32_t ret = 0;
if (ue_db.count(rnti)) {
ret = ue_db[rnti].get_pending_dl_new_data(current_tti);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
uint32_t sched::get_ul_buffer(uint16_t rnti)
{
pthread_mutex_lock(&mutex);
uint32_t ret = 0;
uint32_t ret = 0;
if (ue_db.count(rnti)) {
ret = ue_db[rnti].get_pending_ul_new_data(current_tti);
} else {
Error("User rnti=0x%x not found\n", rnti);
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
/* \Warning: This function is not mutexed because it can produce late changes on the buffer state while
* the scheduler is already allocating data, resulting in empty grants.
* Ideally we would like the scheduler to query the RLC for buffer states in order to get the most updated
* buffer state with the minimum overhead. However, the current architecture is designed to be compliant
* with the FAPI interface
*
* We add a new mutex used only in ue_rem to avoid the UE being removed in between the access to
* ue_db.count() and the access to the std::map.
*/
int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].dl_buffer_state(lc_id, tx_queue, retx_queue);
@ -279,14 +240,11 @@ int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue,
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
return ret;
}
/* \Warning Read comment in dl_rlc_buffer_state() */
int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code)
{
pthread_mutex_lock(&mutex2);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].mac_buffer_state(ce_code);
@ -294,12 +252,10 @@ int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex2);
return ret;
}
int sched::dl_ant_info(uint16_t rnti, LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *dl_ant_info) {
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_ant_info(dl_ant_info);
@ -307,55 +263,47 @@ int sched::dl_ant_info(uint16_t rnti, LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t tb_idx, bool ack)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ret = ue_db[rnti].set_ack_info(tti, tb_idx, ack);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
int sched::ul_crc_info(uint32_t tti, uint16_t rnti, bool crc)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_ul_crc(tti, crc);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
int sched::dl_ri_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_ri(tti, cqi_value);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_pmi(tti, pmi_value);
@ -363,13 +311,11 @@ int sched::dl_pmi_info(uint32_t tti, uint16_t rnti, uint32_t pmi_value)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_dl_cqi(tti, cqi_value);
@ -377,7 +323,6 @@ int sched::dl_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi_value)
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
}
@ -399,72 +344,62 @@ int sched::dl_rach_info(uint32_t tti, uint32_t ra_id, uint16_t rnti, uint32_t es
int sched::ul_cqi_info(uint32_t tti, uint16_t rnti, uint32_t cqi, uint32_t ul_ch_code)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_ul_cqi(tti, cqi, ul_ch_code);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
int sched::ul_bsr(uint16_t rnti, uint32_t lcid, uint32_t bsr, bool set_value)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].ul_buffer_state(lcid, bsr, set_value);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
int sched::ul_recv_len(uint16_t rnti, uint32_t lcid, uint32_t len)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].ul_recv_len(lcid, len);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
int sched::ul_phr(uint16_t rnti, int phr)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].ul_phr(phr);
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
int sched::ul_sr_info(uint32_t tti, uint16_t rnti)
{
pthread_mutex_lock(&mutex);
int ret = 0;
int ret = 0;
if (ue_db.count(rnti)) {
ue_db[rnti].set_sr();;
} else {
Error("User rnti=0x%x not found\n", rnti);
ret = -1;
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
void sched::tpc_inc(uint16_t rnti)
@ -756,7 +691,6 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result)
if (!configured) {
return 0;
}
pthread_mutex_lock(&mutex);
/* If ul_sched() not yet called this tti, reset CCE state */
if (current_tti != tti) {
@ -786,8 +720,7 @@ int sched::dl_sched(uint32_t tti, sched_interface::dl_sched_res_t* sched_result)
/* Set CFI */
sched_result->cfi = current_cfi;
pthread_mutex_unlock(&mutex);
return 0;
return 0;
}
// Uplink sched
@ -798,8 +731,6 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
return 0;
}
pthread_mutex_lock(&mutex);
/* If dl_sched() not yet called this tti (this tti is +4ms advanced), reset CCE state */
if (TTI_TX(current_tti) != tti) {
bzero(used_cce, MAX_CCE*sizeof(bool));
@ -858,14 +789,8 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
pucch.L = (uint32_t) cfg.nrb_pucch;
if(!ul_metric->update_allocation(pucch)) {
log_h->warning("SCHED: Failed to allocate PUCCH\n");
}
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
sched_ue *user = (sched_ue *) &iter->second;
uint16_t rnti = (uint16_t) iter->first;
uint32_t prb_idx[2] = {0, 0};
if(user->get_pucch_sched(current_tti, prb_idx)) {
user->has_pucch = true;
}
} else {
log_h->debug("Allocating PUCCH (%d,%d)\n", pucch.RB_start, pucch.RB_start+pucch.L);
}
} else {
for(it_t iter=ue_db.begin(); iter!=ue_db.end(); ++iter) {
@ -888,6 +813,10 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
ul_harq_proc::ul_alloc_t prach = {cfg.prach_freq_offset, 6};
if(!ul_metric->update_allocation(prach)) {
log_h->warning("SCHED: Failed to allocate PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L);
if (prach.RB_start + prach.L > cfg.cell.nof_prb) {
fprintf(stderr, "Invalid PRACH configuration: frequency offset=%d outside bandwidth limits\n", cfg.prach_freq_offset);
return -1;
}
}
else {
log_h->debug("SCHED: Allocated PRACH RBs within (%d,%d)\n", prach.RB_start, prach.RB_start + prach.L);
@ -999,8 +928,6 @@ int sched::ul_sched(uint32_t tti, srsenb::sched_interface::ul_sched_res_t* sched
sched_result->nof_dci_elems = nof_dci_elems;
sched_result->nof_phich_elems = nof_phich_elems;
pthread_mutex_unlock(&mutex);
return SRSLTE_SUCCESS;
}

View File

@ -60,15 +60,24 @@ sched_ue::sched_ue() : dl_next_alloc(NULL), ul_next_alloc(NULL), has_pucch(false
bzero(&dl_harq, sizeof(dl_harq));
bzero(&ul_harq, sizeof(ul_harq));
bzero(&dl_ant_info, sizeof(dl_ant_info));
pthread_mutex_init(&mutex, NULL);
reset();
}
sched_ue::~sched_ue() {
pthread_mutex_lock(&mutex);
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
}
void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_interface::cell_cfg_t *cell_cfg,
srslte_regs_t *regs, srslte::log *log_h_)
{
reset();
rnti = rnti_;
pthread_mutex_lock(&mutex);
rnti = rnti_;
log_h = log_h_;
memcpy(&cell, &cell_cfg->cell, sizeof(srslte_cell_t));
P = srslte_ra_type0_P(cell.nof_prb);
@ -81,11 +90,7 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in
}
Info("SCHED: Added user rnti=0x%x\n", rnti);
for (int i=0;i<sched_interface::MAX_LC;i++) {
set_bearer_cfg(i, &cfg.ue_bearers[i]);
}
// Config HARQ processes
// Config HARQ processes
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
dl_harq[i].config(i, cfg.maxharq_tx, log_h);
ul_harq[i].config(i, cfg.maxharq_tx, log_h);
@ -96,11 +101,18 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in
for (int sf_idx=0;sf_idx<10;sf_idx++) {
sched::generate_cce_location(regs, &dci_locations[cfi][sf_idx], cfi+1, sf_idx, rnti);
}
}
}
pthread_mutex_unlock(&mutex);
for (int i=0;i<sched_interface::MAX_LC;i++) {
set_bearer_cfg(i, &cfg.ue_bearers[i]);
}
}
void sched_ue::reset()
{
pthread_mutex_lock(&mutex);
bzero(&cfg, sizeof(sched_interface::ue_cfg_t));
sr = false;
next_tpc_pusch = 1;
@ -123,17 +135,22 @@ void sched_ue::reset()
ul_harq[i].reset(tb);
}
}
pthread_mutex_unlock(&mutex);
for (int i=0;i<sched_interface::MAX_LC; i++) {
rem_bearer(i);
}
}
void sched_ue::set_fixed_mcs(int mcs_ul, int mcs_dl) {
fixed_mcs_ul = mcs_ul;
fixed_mcs_dl = mcs_dl;
pthread_mutex_lock(&mutex);
fixed_mcs_ul = mcs_ul;
fixed_mcs_dl = mcs_dl;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
pthread_mutex_lock(&mutex);
if (mcs_ul < 0) {
max_mcs_ul = 28;
} else {
@ -144,6 +161,7 @@ void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
} else {
max_mcs_dl = mcs_dl;
}
pthread_mutex_unlock(&mutex);
}
@ -155,6 +173,7 @@ void sched_ue::set_max_mcs(int mcs_ul, int mcs_dl) {
void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t* cfg)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
memcpy(&lch[lc_id].cfg, cfg, sizeof(sched_interface::ue_bearer_cfg_t));
lch[lc_id].buf_tx = 0;
@ -163,13 +182,16 @@ void sched_ue::set_bearer_cfg(uint32_t lc_id, sched_interface::ue_bearer_cfg_t*
Info("SCHED: Set bearer config lc_id=%d, direction=%d\n", lc_id, (int) lch[lc_id].cfg.direction);
}
}
pthread_mutex_unlock(&mutex);
}
void sched_ue::rem_bearer(uint32_t lc_id)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
bzero(&lch[lc_id], sizeof(ue_bearer_t));
}
pthread_mutex_unlock(&mutex);
}
void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
@ -180,6 +202,7 @@ void sched_ue::phy_config_enabled(uint32_t tti, bool enabled)
void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
if (set_value) {
lch[lc_id].bsr = bsr;
@ -189,25 +212,30 @@ void sched_ue::ul_buffer_state(uint8_t lc_id, uint32_t bsr, bool set_value)
}
Debug("SCHED: bsr=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", bsr, lc_id,
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
pthread_mutex_unlock(&mutex);
}
void sched_ue::ul_phr(int phr)
{
power_headroom= phr;
power_headroom = phr;
}
void sched_ue::dl_buffer_state(uint8_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
{
pthread_mutex_lock(&mutex);
if (lc_id < sched_interface::MAX_LC) {
lch[lc_id].buf_retx = retx_queue;
lch[lc_id].buf_tx = tx_queue;
Debug("SCHED: DL lcid=%d buffer_state=%d,%d\n", lc_id, tx_queue, retx_queue);
}
pthread_mutex_unlock(&mutex);
}
void sched_ue::mac_buffer_state(uint32_t ce_code)
{
buf_mac++;
pthread_mutex_lock(&mutex);
buf_mac++;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_sr()
@ -222,42 +250,59 @@ void sched_ue::unset_sr()
bool sched_ue::pucch_sr_collision(uint32_t current_tti, uint32_t n_cce)
{
bool ret = false;
pthread_mutex_lock(&mutex);
uint32_t n_pucch_sr, n_pucch_nosr;
srslte_pucch_sched_t pucch_sched;
bool has_sr;
if (!phy_config_dedicated_enabled) {
return false;
goto unlock;
}
srslte_pucch_sched_t pucch_sched;
pucch_sched.sps_enabled = false;
pucch_sched.n_pucch_sr = cfg.sr_N_pucch;
pucch_sched.n_pucch_2 = cfg.n_pucch_cqi;
pucch_sched.N_pucch_1 = cfg.pucch_cfg.n1_pucch_an;
bool has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
if (!has_sr) {
return false;
goto unlock;
}
uint32_t n_pucch_sr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, true, &pucch_sched);
uint32_t n_pucch_nosr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, false, &pucch_sched);
n_pucch_sr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, true, &pucch_sched);
n_pucch_nosr = srslte_pucch_get_npucch(n_cce, SRSLTE_PUCCH_FORMAT_1A, false, &pucch_sched);
if (srslte_pucch_n_prb(&cfg.pucch_cfg, SRSLTE_PUCCH_FORMAT_1A, n_pucch_sr, cell.nof_prb, cell.cp, 0) ==
srslte_pucch_n_prb(&cfg.pucch_cfg, SRSLTE_PUCCH_FORMAT_1A, n_pucch_nosr, cell.nof_prb, cell.cp, 0))
{
return true;
ret = true;
} else {
return false;
ret = false;
}
unlock:
pthread_mutex_unlock(&mutex);
return ret;
}
bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
{
bool ret = false;
bool has_sr;
pthread_mutex_lock(&mutex);
if (!phy_config_dedicated_enabled) {
return false;
goto unlock;
}
srslte_pucch_sched_t pucch_sched;
pucch_sched.sps_enabled = false;
pucch_sched.n_pucch_sr = cfg.sr_N_pucch;
pucch_sched.n_pucch_2 = cfg.n_pucch_cqi;
pucch_sched.N_pucch_1 = cfg.pucch_cfg.n1_pucch_an;
bool has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
has_sr = cfg.sr_enabled && srslte_ue_ul_sr_send_tti(cfg.sr_I, current_tti);
// First check if it has pending ACKs
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
@ -270,7 +315,8 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
Debug("SCHED: Reserved Format1A PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d, ncce=%d, has_sr=%d, n_pucch_1=%d\n",
rnti, prb_idx[0], prb_idx[1], n_pucch, dl_harq[i].get_n_cce(), has_sr, pucch_sched.N_pucch_1);
}
return true;
ret = true;
goto unlock;
}
}
// If there is no Format1A/B, then check if it's expecting Format1
@ -281,7 +327,8 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
}
}
Debug("SCHED: Reserved Format1 PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d\n", rnti, prb_idx[0], prb_idx[1], cfg.sr_N_pucch);
return true;
ret = true;
goto unlock;
}
// Finally check Format2 (periodic CQI)
if (cfg.cqi_enabled && srslte_cqi_send(cfg.cqi_idx, current_tti)) {
@ -292,27 +339,41 @@ bool sched_ue::get_pucch_sched(uint32_t current_tti, uint32_t prb_idx[2])
Debug("SCHED: Reserved Format2 PUCCH for rnti=0x%x, n_prb=%d,%d, n_pucch=%d, pmi_idx=%d\n",
rnti, prb_idx[0], prb_idx[1], cfg.cqi_pucch, cfg.cqi_idx);
}
return true;
ret = true;
goto unlock;
}
return false;
ret = false;
unlock:
pthread_mutex_unlock(&mutex);
return ret;
}
int sched_ue::set_ack_info(uint32_t tti, uint32_t tb_idx, bool ack)
{
pthread_mutex_lock(&mutex);
int ret = -1;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
if (TTI_TX(dl_harq[i].get_tti()) == tti) {
Debug("SCHED: Set ACK=%d for rnti=0x%x, pid=%d.%d, tti=%d\n", ack, rnti, i, tb_idx, tti);
dl_harq[i].set_ack(tb_idx, ack);
return dl_harq[i].get_tbs(tb_idx);
ret = dl_harq[i].get_tbs(tb_idx);
goto unlock;
}
}
Warning("SCHED: Received ACK info for unknown TTI=%d\n", tti);
return -1;
ret = -1;
unlock:
pthread_mutex_unlock(&mutex);
return ret;
}
void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
{
pthread_mutex_lock(&mutex);
// Remove PDCP header??
if (len > 4) {
len -= 4;
@ -328,54 +389,72 @@ void sched_ue::ul_recv_len(uint32_t lcid, uint32_t len)
}
Debug("SCHED: recv_len=%d, lcid=%d, bsr={%d,%d,%d,%d}\n", len, lcid,
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_ul_crc(uint32_t tti, bool crc_res)
{
pthread_mutex_lock(&mutex);
get_ul_harq(tti)->set_ack(0, crc_res);
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_ri(uint32_t tti, uint32_t ri)
{
pthread_mutex_lock(&mutex);
dl_ri = ri;
dl_ri_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_pmi(uint32_t tti, uint32_t pmi)
{
pthread_mutex_lock(&mutex);
dl_pmi = pmi;
dl_pmi_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_cqi(uint32_t tti, uint32_t cqi)
{
pthread_mutex_lock(&mutex);
dl_cqi = cqi;
dl_cqi_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_dl_ant_info(LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT *d)
{
pthread_mutex_lock(&mutex);
memcpy(&dl_ant_info, d, sizeof(LIBLTE_RRC_ANTENNA_INFO_DEDICATED_STRUCT));
pthread_mutex_unlock(&mutex);
}
void sched_ue::set_ul_cqi(uint32_t tti, uint32_t cqi, uint32_t ul_ch_code)
{
ul_cqi = cqi;
ul_cqi_tti = tti;
pthread_mutex_lock(&mutex);
ul_cqi = cqi;
ul_cqi_tti = tti;
pthread_mutex_unlock(&mutex);
}
void sched_ue::tpc_inc() {
pthread_mutex_lock(&mutex);
if (power_headroom > 0) {
next_tpc_pusch = 3;
next_tpc_pucch = 3;
}
log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti);
pthread_mutex_unlock(&mutex);
}
void sched_ue::tpc_dec() {
pthread_mutex_lock(&mutex);
next_tpc_pusch = 0;
next_tpc_pucch = 0;
log_h->info("SCHED: Set TCP=%d for rnti=0x%x\n", next_tpc_pucch, rnti);
pthread_mutex_unlock(&mutex);
}
/*******************************************************
@ -391,6 +470,8 @@ int sched_ue::generate_format1(dl_harq_proc *h,
uint32_t tti,
uint32_t cfi)
{
pthread_mutex_lock(&mutex);
srslte_ra_dl_dci_t *dci = &data->dci;
bzero(dci, sizeof(srslte_ra_dl_dci_t));
@ -410,7 +491,7 @@ int sched_ue::generate_format1(dl_harq_proc *h,
}
if (h->is_empty(0)) {
uint32_t req_bytes = get_pending_dl_new_data(tti);
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
uint32_t nof_prb = format1_count_prb(h->get_rbgmask(), cell.nof_prb);
srslte_ra_dl_grant_t grant;
@ -462,8 +543,9 @@ int sched_ue::generate_format1(dl_harq_proc *h,
data->tbs[1] = 0;
dci->tb_en[0] = true;
dci->tb_en[1] = false;
}
return tbs;
}
pthread_mutex_unlock(&mutex);
return tbs;
}
// Generates a Format2a grant
@ -471,8 +553,21 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
{
pthread_mutex_lock(&mutex);
int ret = generate_format2a_unlocked(h, data, tti, cfi);
pthread_mutex_unlock(&mutex);
return ret;
}
// Generates a Format2a grant
int sched_ue::generate_format2a_unlocked(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
{
bool tb_en[SRSLTE_MAX_TB] = {false};
srslte_ra_dl_dci_t *dci = &data->dci;
bzero(dci, sizeof(srslte_ra_dl_dci_t));
@ -512,7 +607,7 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
}
for (uint32_t tb = 0; tb < SRSLTE_MAX_TB; tb++) {
uint32_t req_bytes = get_pending_dl_new_data(tti);
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
int mcs = 0;
int tbs = 0;
@ -566,17 +661,22 @@ int sched_ue::generate_format2a(dl_harq_proc *h,
dci->tpc_pucch = (uint8_t) next_tpc_pucch;
next_tpc_pucch = 1;
return data->tbs[0] + data->tbs[1];
int ret = data->tbs[0] + data->tbs[1];
return ret;
}
// Generates a Format2 grant
int sched_ue::generate_format2(dl_harq_proc *h,
sched_interface::dl_sched_data_t *data,
uint32_t tti,
uint32_t cfi)
{
pthread_mutex_lock(&mutex);
/* Call Format 2a (common) */
int ret = generate_format2a(h, data, tti, cfi);
int ret = generate_format2a_unlocked(h, data, tti, cfi);
/* Compute precoding information */
if (SRSLTE_RA_DL_GRANT_NOF_TB(&data->dci) == 1) {
@ -585,6 +685,8 @@ int sched_ue::generate_format2(dl_harq_proc *h,
data->dci.pinfo = (uint8_t) (dl_pmi & 1);
}
pthread_mutex_unlock(&mutex);
return ret;
}
@ -594,7 +696,9 @@ int sched_ue::generate_format0(ul_harq_proc *h,
uint32_t tti,
bool cqi_request)
{
srslte_ra_ul_dci_t *dci = &data->dci;
pthread_mutex_lock(&mutex);
srslte_ra_ul_dci_t *dci = &data->dci;
bzero(dci, sizeof(srslte_ra_ul_dci_t));
int mcs = 0;
@ -608,7 +712,7 @@ int sched_ue::generate_format0(ul_harq_proc *h,
h->new_tx(tti, mcs, tbs);
} else if (h->is_empty(0)) {
uint32_t req_bytes = get_pending_ul_new_data(tti);
uint32_t req_bytes = get_pending_ul_new_data_unlocked(tti);
uint32_t N_srs = 0;
uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*allocation.L*SRSLTE_NRE;
@ -645,8 +749,10 @@ int sched_ue::generate_format0(ul_harq_proc *h,
dci->tpc_pusch = next_tpc_pusch;
next_tpc_pusch = 1;
}
return tbs;
pthread_mutex_unlock(&mutex);
return tbs;
}
/*******************************************************
@ -678,37 +784,43 @@ bool sched_ue::is_first_dl_tx()
}
bool sched_ue::needs_cqi(uint32_t tti, bool will_be_sent)
{
bool ret = false;
if (phy_config_dedicated_enabled &&
cfg.aperiodic_cqi_period &&
get_pending_dl_new_data(tti) > 0)
{
pthread_mutex_lock(&mutex);
bool ret = needs_cqi_unlocked(tti, will_be_sent);
pthread_mutex_unlock(&mutex);
return ret;
}
// Private lock-free implemenentation
bool sched_ue::needs_cqi_unlocked(uint32_t tti, bool will_be_sent)
{
bool ret = false;
if (phy_config_dedicated_enabled &&
cfg.aperiodic_cqi_period &&
get_pending_dl_new_data_unlocked(tti) > 0)
{
uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti);
uint32_t interval = srslte_tti_interval(tti, dl_cqi_tti);
bool needscqi = interval >= cfg.aperiodic_cqi_period;
if (needscqi) {
uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti);
if (needscqi) {
uint32_t interval_sent = srslte_tti_interval(tti, cqi_request_tti);
if (interval_sent >= 16) {
if (will_be_sent) {
cqi_request_tti = tti;
cqi_request_tti = tti;
}
Debug("SCHED: Needs_cqi, last_sent=%d, will_be_sent=%d\n", cqi_request_tti, will_be_sent);
ret = true;
}
ret = true;
}
}
}
return ret;
return ret;
}
uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
{
uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) {
if (bearer_is_dl(&lch[i])) {
pending_data += lch[i].buf_retx + lch[i].buf_tx;
}
}
return pending_data;
pthread_mutex_lock(&mutex);
uint32_t pending_data = get_pending_dl_new_data_unlocked(tti);
pthread_mutex_unlock(&mutex);
return pending_data;
}
/// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data,
@ -717,46 +829,78 @@ uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
/// \return number of bytes to be allocated
uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti)
{
uint32_t req_bytes = get_pending_dl_new_data(tti);
pthread_mutex_lock(&mutex);
uint32_t req_bytes = get_pending_dl_new_data_unlocked(tti);
if(req_bytes>0) {
req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header
if(is_first_dl_tx()) {
req_bytes += 6; // count for RAR
}
}
pthread_mutex_unlock(&mutex);
return req_bytes;
}
// Private lock-free implementation
uint32_t sched_ue::get_pending_dl_new_data_unlocked(uint32_t tti)
{
uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) {
if (bearer_is_dl(&lch[i])) {
pending_data += lch[i].buf_retx + lch[i].buf_tx;
}
}
return pending_data;
}
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{
uint32_t pending_data = 0;
pthread_mutex_lock(&mutex);
uint32_t pending_data = get_pending_ul_new_data_unlocked(tti);
pthread_mutex_unlock(&mutex);
return pending_data;
}
uint32_t sched_ue::get_pending_ul_old_data()
{
pthread_mutex_lock(&mutex);
uint32_t pending_data = get_pending_ul_old_data_unlocked();
pthread_mutex_unlock(&mutex);
return pending_data;
}
// Private lock-free implementation
uint32_t sched_ue::get_pending_ul_new_data_unlocked(uint32_t tti)
{
uint32_t pending_data = 0;
for (int i=0;i<sched_interface::MAX_LC;i++) {
if (bearer_is_ul(&lch[i])) {
pending_data += lch[i].bsr;
}
}
if (!pending_data && is_sr_triggered()) {
return 512;
return 512;
}
if (!pending_data && needs_cqi(tti)) {
return 128;
if (!pending_data && needs_cqi_unlocked(tti)) {
return 128;
}
uint32_t pending_ul_data = get_pending_ul_old_data();
uint32_t pending_ul_data = get_pending_ul_old_data_unlocked();
if (pending_data > pending_ul_data) {
pending_data -= pending_ul_data;
pending_data -= pending_ul_data;
} else {
pending_data = 0;
pending_data = 0;
}
if (pending_data) {
Debug("SCHED: pending_data=%d, pending_ul_data=%d, bsr={%d,%d,%d,%d}\n", pending_data,pending_ul_data,
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
lch[0].bsr, lch[1].bsr, lch[2].bsr, lch[3].bsr);
}
return pending_data;
return pending_data;
}
uint32_t sched_ue::get_pending_ul_old_data()
// Private lock-free implementation
uint32_t sched_ue::get_pending_ul_old_data_unlocked()
{
uint32_t pending_data = 0;
uint32_t pending_data = 0;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
pending_data += ul_harq[i].get_pending_data();
}
@ -775,6 +919,8 @@ uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols)
{
pthread_mutex_lock(&mutex);
int mcs = 0;
uint32_t nof_re = 0;
int tbs = 0;
@ -791,10 +937,12 @@ uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_sym
if (tbs > 0) {
nbytes = tbs;
} else if (tbs < 0) {
pthread_mutex_unlock(&mutex);
return 0;
}
}
pthread_mutex_unlock(&mutex);
return n;
}
@ -803,13 +951,15 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
int mcs = 0;
int tbs = 0;
uint32_t nbytes = 0;
uint32_t N_srs = 0;
uint32_t n = 0;
uint32_t N_srs = 0;
uint32_t n = 0;
if (req_bytes == 0) {
return 0;
}
pthread_mutex_lock(&mutex);
for (n=1;n<cell.nof_prb && nbytes < req_bytes + 4;n++) {
uint32_t nof_re = (2*(SRSLTE_CP_NSYMB(cell.cp)-1) - N_srs)*n*SRSLTE_NRE;
int tbs = 0;
@ -826,7 +976,9 @@ uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)
while (!srslte_dft_precoding_valid_prb(n) && n<=cell.nof_prb) {
n++;
}
pthread_mutex_unlock(&mutex);
return n;
}
@ -839,7 +991,10 @@ bool sched_ue::is_sr_triggered()
dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
{
#if ASYNC_DL_SCHED
int oldest_idx=-1;
pthread_mutex_lock(&mutex);
int oldest_idx=-1;
uint32_t oldest_tti = 0;
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
if (dl_harq[i].has_pending_retx(0, tti) || dl_harq[i].has_pending_retx(1, tti)) {
@ -850,11 +1005,15 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
}
}
}
dl_harq_proc *h = NULL;
if (oldest_idx >= 0) {
return &dl_harq[oldest_idx];
} else {
return NULL;
h = &dl_harq[oldest_idx];
}
pthread_mutex_unlock(&mutex);
return h;
#else
return &dl_harq[tti%SCHED_MAX_HARQ_PROC];
#endif
@ -862,12 +1021,16 @@ dl_harq_proc* sched_ue::get_pending_dl_harq(uint32_t tti)
dl_harq_proc* sched_ue::get_empty_dl_harq()
{
for (int i=0;i<SCHED_MAX_HARQ_PROC;i++) {
pthread_mutex_lock(&mutex);
dl_harq_proc *h = NULL;
for (int i=0;i<SCHED_MAX_HARQ_PROC && !h;i++) {
if (dl_harq[i].is_empty(0) && dl_harq[i].is_empty(1)) {
return &dl_harq[i];
h = &dl_harq[i];
}
}
return NULL;
pthread_mutex_unlock(&mutex);
return h;
}
ul_harq_proc* sched_ue::get_ul_harq(uint32_t tti)
@ -908,6 +1071,7 @@ srslte_dci_format_t sched_ue::get_dci_format() {
/* Find lowest DCI aggregation level supported by the UE spectral efficiency */
uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
{
pthread_mutex_lock(&mutex);
uint32_t l=0;
float max_coderate = srslte_cqi_to_coderate(dl_cqi);
float coderate = 99;
@ -922,7 +1086,8 @@ uint32_t sched_ue::get_aggr_level(uint32_t nof_bits)
l++;
} while(l<l_max && factor*coderate > max_coderate);
Debug("SCHED: CQI=%d, l=%d, nof_bits=%d, coderate=%.2f, max_coderate=%.2f\n", dl_cqi, l, nof_bits, coderate, max_coderate);
return l;
pthread_mutex_unlock(&mutex);
return l;
}
sched_ue::sched_dci_cce_t* sched_ue::get_locations(uint32_t cfi, uint32_t sf_idx)
@ -955,7 +1120,7 @@ int sched_ue::alloc_pdu(int tbs_bytes, sched_interface::dl_sched_pdu_t* pdu)
pdu->nbytes = x;
Debug("SCHED: Allocated lcid=%d, nbytes=%d, tbs_bytes=%d\n", pdu->lcid, pdu->nbytes, tbs_bytes);
}
return x;
return x;
}
uint32_t sched_ue::format1_count_prb(uint32_t bitmask, uint32_t cell_nof_prb) {
@ -1029,18 +1194,15 @@ int sched_ue::alloc_tbs(uint32_t nof_prb,
uint32_t max_Qm = is_ul?4:6; // Allow 16-QAM in PUSCH Only
// TODO: Compute real spectral efficiency based on PUSCH-UCI configuration
if (has_pucch && is_ul) {
cqi-=3;
}
int tbs = cqi_to_tbs(cqi, nof_prb, nof_re, max_mcs, max_Qm, &sel_mcs)/8;
/* If less bytes are requested, lower the MCS */
if (tbs > (int) req_bytes && req_bytes > 0) {
uint32_t req_tbs_idx = srslte_ra_tbs_to_table_idx(req_bytes*8, nof_prb);
uint32_t req_mcs = srslte_ra_mcs_from_tbs_idx(req_tbs_idx);
if (req_mcs < sel_mcs) {
sel_mcs = req_mcs;
sel_mcs = req_mcs;
tbs = srslte_ra_tbs_from_idx(req_tbs_idx, nof_prb)/8;
}
}

View File

@ -115,8 +115,7 @@ srslte_softbuffer_tx_t* ue::get_tx_softbuffer(uint32_t harq_process, uint32_t tb
uint8_t* ue::request_buffer(uint32_t tti, uint32_t len)
{
uint8_t *ret = NULL;
pthread_mutex_lock(&mutex);
if (len > 0) {
if (len > 0) {
if (!pending_buffers[tti%NOF_HARQ_PROCESSES]) {
ret = pdus.request(len);
pending_buffers[tti%NOF_HARQ_PROCESSES] = ret;
@ -127,8 +126,7 @@ uint8_t* ue::request_buffer(uint32_t tti, uint32_t len)
} else {
log_h->warning("Requesting buffer for zero bytes\n");
}
pthread_mutex_unlock(&mutex);
return ret;
return ret;
}
bool ue::process_pdus()

View File

@ -41,6 +41,8 @@ using namespace std;
namespace srsenb{
#define MAX(a,b) (a>b?a:b)
char const * const prefixes[2][9] =
{
{ "", "m", "u", "n", "p", "f", "a", "z", "y", },
@ -125,25 +127,41 @@ void metrics_stdout::print_metrics()
}
cout << std::hex << metrics.mac[i].rnti << " ";
cout << float_to_string(metrics.mac[i].dl_cqi, 2);
cout << float_to_string(MAX(0.1,metrics.mac[i].dl_cqi), 2);
cout << float_to_string(metrics.mac[i].dl_ri, 1);
cout << float_to_string(metrics.phy[i].dl.mcs, 2);
cout << float_to_eng_string((float) metrics.mac[i].tx_brate/metrics_report_period, 2);
if(not isnan(metrics.phy[i].dl.mcs)) {
cout << float_to_string(MAX(0.1,metrics.phy[i].dl.mcs), 2);
} else {
cout << float_to_string(0,2);
}
if (metrics.mac[i].tx_brate > 0 && metrics_report_period) {
cout << float_to_eng_string(MAX(0.1,(float) metrics.mac[i].tx_brate/metrics_report_period), 2);
} else {
cout << float_to_string(0, 2) << "";
}
if (metrics.mac[i].tx_pkts > 0 && metrics.mac[i].tx_errors) {
cout << float_to_string((float) 100*metrics.mac[i].tx_errors/metrics.mac[i].tx_pkts, 1) << "%";
cout << float_to_string(MAX(0.1,(float) 100*metrics.mac[i].tx_errors/metrics.mac[i].tx_pkts), 1) << "%";
} else {
cout << float_to_string(0, 1) << "%";
}
cout << float_to_string(metrics.phy[i].ul.sinr, 2);
if(not isnan(metrics.phy[i].ul.sinr)) {
cout << float_to_string(MAX(0.1,metrics.phy[i].ul.sinr), 2);
} else {
cout << float_to_string(0,2);
}
cout << float_to_string(metrics.mac[i].phr, 2);
cout << float_to_string(metrics.phy[i].ul.mcs, 2);
if(not isnan(metrics.phy[i].ul.mcs)) {
cout << float_to_string(MAX(0.1,metrics.phy[i].ul.mcs), 2);
} else {
cout << float_to_string(0,2);
}
if (metrics.mac[i].rx_brate > 0 && metrics_report_period) {
cout << float_to_eng_string((float) metrics.mac[i].rx_brate/metrics_report_period, 2);
cout << float_to_eng_string(MAX(0.1,(float) metrics.mac[i].rx_brate/metrics_report_period), 2);
} else {
cout << " " << float_to_string(0, 2);
cout << float_to_string(0, 2) << "";
}
if (metrics.mac[i].rx_pkts > 0 && metrics.mac[i].rx_errors > 0) {
cout << float_to_string((float) 100*metrics.mac[i].rx_errors/metrics.mac[i].rx_pkts, 1) << "%";
cout << float_to_string(MAX(0.1,(float) 100*metrics.mac[i].rx_errors/metrics.mac[i].rx_pkts), 1) << "%";
} else {
cout << float_to_string(0, 1) << "%";
}

View File

@ -39,10 +39,10 @@
using namespace std;
// Enable this to log SI
#define LOG_THIS(a) 1
//#define LOG_THIS(a) 1
// Enable this one to skip SI-RNTI
//#define LOG_THIS(rnti) (rnti != 0xFFFF)
#define LOG_THIS(rnti) (rnti != 0xFFFF)
/* Define GUI-related things */
@ -185,11 +185,11 @@ void phch_worker::stop()
free(signal_buffer_tx[p]);
}
}
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
} else {
printf("Warning could not stop properly PHY\n");
}
pthread_mutex_unlock(&mutex);
pthread_mutex_destroy(&mutex);
}
void phch_worker::reset()
{
@ -372,9 +372,10 @@ void phch_worker::work_imp()
subframe_cfg_t sf_cfg;
phy->get_sf_config(&sf_cfg, tti_tx_dl);// TODO difference between tti_tx_dl and t_tx_dl
pthread_mutex_lock(&mutex);
is_worker_running = true;
mac_interface_phy::ul_sched_t *ul_grants = phy->ul_grants;
mac_interface_phy::dl_sched_t *dl_grants = phy->dl_grants;
mac_interface_phy *mac = phy->mac;
@ -398,7 +399,7 @@ void phch_worker::work_imp()
decode_pucch();
// Get DL scheduling for the TX TTI from MAC
if(sf_cfg.sf_type == SUBFRAME_TYPE_REGULAR) {
if (mac->get_dl_sched(tti_tx_dl, &dl_grants[t_tx_dl]) < 0) {
Error("Getting DL scheduling from MAC\n");

View File

@ -37,18 +37,24 @@ void pdcp::init(rlc_interface_pdcp* rlc_, rrc_interface_pdcp* rrc_, gtpu_interfa
log_h = pdcp_log_;
pool = srslte::byte_buffer_pool::get_instance();
pthread_rwlock_init(&rwlock, NULL);
}
void pdcp::stop()
{
pthread_rwlock_wrlock(&rwlock);
for(std::map<uint32_t, user_interface>::iterator iter=users.begin(); iter!=users.end(); ++iter) {
rem_user((uint32_t) iter->first);
clear_user(&iter->second);
}
users.clear();
pthread_rwlock_unlock(&rwlock);
pthread_rwlock_destroy(&rwlock);
}
void pdcp::add_user(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti) == 0) {
srslte::pdcp *obj = new srslte::pdcp;
obj->init(&users[rnti].rlc_itf, &users[rnti].rrc_itf, &users[rnti].gtpu_itf, log_h, RB_ID_SRB0, SECURITY_DIRECTION_DOWNLINK);
@ -61,20 +67,30 @@ void pdcp::add_user(uint16_t rnti)
users[rnti].gtpu_itf.gtpu = gtpu;
users[rnti].pdcp = obj;
}
pthread_rwlock_unlock(&rwlock);
}
// Private unlocked deallocation of user
void pdcp::clear_user(user_interface *ue)
{
ue->pdcp->stop();
delete ue->pdcp;
ue->pdcp = NULL;
}
void pdcp::rem_user(uint16_t rnti)
{
pthread_rwlock_wrlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->stop();
delete users[rnti].pdcp;
users[rnti].pdcp = NULL;
clear_user(&users[rnti]);
users.erase(rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::add_bearer(uint16_t rnti, uint32_t lcid, srslte::srslte_pdcp_config_t cfg)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
users[rnti].pdcp->add_bearer(lcid, cfg);
@ -82,37 +98,45 @@ void pdcp::add_bearer(uint16_t rnti, uint32_t lcid, srslte::srslte_pdcp_config_t
users[rnti].pdcp->add_bearer_mrb(lcid, cfg);
}
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::reset(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->reset();
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::config_security(uint16_t rnti, uint32_t lcid, uint8_t* k_rrc_enc_, uint8_t* k_rrc_int_,
srslte::CIPHERING_ALGORITHM_ID_ENUM cipher_algo_,
srslte::INTEGRITY_ALGORITHM_ID_ENUM integ_algo_)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->config_security(lcid, k_rrc_enc_, k_rrc_int_, cipher_algo_, integ_algo_);
users[rnti].pdcp->enable_integrity(lcid);
users[rnti].pdcp->enable_encryption(lcid);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::write_pdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].pdcp->write_pdu(lcid, sdu);
} else {
pool->deallocate(sdu);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
users[rnti].pdcp->write_sdu(lcid, sdu);
@ -122,6 +146,7 @@ void pdcp::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
} else {
pool->deallocate(sdu);
}
pthread_rwlock_unlock(&rwlock);
}
void pdcp::user_interface_gtpu::write_pdu(uint32_t lcid, srslte::byte_buffer_t *pdu)

View File

@ -40,48 +40,57 @@ void rlc::init(pdcp_interface_rlc* pdcp_, rrc_interface_rlc* rrc_, mac_interface
pool = srslte::byte_buffer_pool::get_instance();
pthread_rwlock_init(&rwlock, NULL);
}
void rlc::stop()
{
pthread_rwlock_wrlock(&rwlock);
for(std::map<uint32_t, user_interface>::iterator iter=users.begin(); iter!=users.end(); ++iter) {
rem_user((uint32_t) iter->first);
clear_user(&iter->second);
}
users.clear();
pthread_rwlock_unlock(&rwlock);
pthread_rwlock_destroy(&rwlock);
}
void rlc::add_user(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti) == 0) {
srslte::rlc *obj = new srslte::rlc;
obj->init(&users[rnti], &users[rnti], &users[rnti], log_h, mac_timers, RB_ID_SRB0);
obj->init(&users[rnti], &users[rnti], &users[rnti], log_h, mac_timers, RB_ID_SRB0, RLC_TX_QUEUE_LEN);
users[rnti].rnti = rnti;
users[rnti].pdcp = pdcp;
users[rnti].rrc = rrc;
users[rnti].rlc = obj;
users[rnti].parent = this;
}
pthread_rwlock_unlock(&rwlock);
}
// Private unlocked deallocation of user
void rlc::clear_user(user_interface *ue)
{
ue->rlc->stop();
delete ue->rlc;
ue->rlc = NULL;
}
void rlc::rem_user(uint16_t rnti)
{
pthread_rwlock_wrlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->stop();
delete users[rnti].rlc;
users[rnti].rlc = NULL;
clear_user(&users[rnti]);
users.erase(rnti);
} else {
log_h->error("Removing rnti=0x%x. Already removed\n", rnti);
}
}
void rlc::reset(uint16_t rnti)
{
if (users.count(rnti)) {
users[rnti].rlc->reset();
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::clear_buffer(uint16_t rnti)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->empty_queue();
for (int i=0;i<SRSLTE_N_RADIO_BEARERS;i++) {
@ -89,27 +98,34 @@ void rlc::clear_buffer(uint16_t rnti)
}
log_h->info("Cleared buffer rnti=0x%x\n", rnti);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::add_bearer(uint16_t rnti, uint32_t lcid)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->add_bearer(lcid);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::add_bearer(uint16_t rnti, uint32_t lcid, srslte::srslte_rlc_config_t cnfg)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->add_bearer(lcid, cnfg);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::add_bearer_mrb(uint16_t rnti, uint32_t lcid)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->add_bearer_mrb_enb(lcid);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::read_pdu_pcch(uint8_t* payload, uint32_t buffer_size)
@ -121,11 +137,13 @@ int rlc::read_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof_b
{
int ret;
uint32_t tx_queue;
if(users.count(rnti)){
if(rnti != SRSLTE_MRNTI){
pthread_rwlock_rdlock(&rwlock);
if(users.count(rnti)) {
if(rnti != SRSLTE_MRNTI) {
ret = users[rnti].rlc->read_pdu(lcid, payload, nof_bytes);
tx_queue = users[rnti].rlc->get_total_buffer_state(lcid);
}else{
} else {
ret = users[rnti].rlc->read_pdu_mch(lcid, payload, nof_bytes);
tx_queue = users[rnti].rlc->get_total_mch_buffer_state(lcid);
}
@ -135,15 +153,16 @@ int rlc::read_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof_b
uint32_t retx_queue = 0;
log_h->debug("Buffer state PDCP: rnti=0x%x, lcid=%d, tx_queue=%d\n", rnti, lcid, tx_queue);
mac->rlc_buffer_state(rnti, lcid, tx_queue, retx_queue);
return ret;
}else{
return SRSLTE_ERROR;
ret = SRSLTE_ERROR;
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
void rlc::write_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof_bytes)
{
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
users[rnti].rlc->write_pdu(lcid, payload, nof_bytes);
@ -154,6 +173,7 @@ void rlc::write_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof
log_h->debug("Buffer state PDCP: rnti=0x%x, lcid=%d, tx_queue=%d\n", rnti, lcid, tx_queue);
mac->rlc_buffer_state(rnti, lcid, tx_queue, retx_queue);
}
pthread_rwlock_unlock(&rwlock);
}
void rlc::read_pdu_bcch_dlsch(uint32_t sib_index, uint8_t *payload)
@ -166,9 +186,11 @@ void rlc::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
{
uint32_t tx_queue;
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
if(rnti != SRSLTE_MRNTI){
users[rnti].rlc->write_sdu(lcid, sdu);
users[rnti].rlc->write_sdu_nb(lcid, sdu);
tx_queue = users[rnti].rlc->get_total_buffer_state(lcid);
}else {
users[rnti].rlc->write_sdu_mch(lcid, sdu);
@ -183,14 +205,17 @@ void rlc::write_sdu(uint16_t rnti, uint32_t lcid, srslte::byte_buffer_t* sdu)
} else {
pool->deallocate(sdu);
}
pthread_rwlock_unlock(&rwlock);
}
bool rlc::rb_is_um(uint16_t rnti, uint32_t lcid) {
bool ret = false;
pthread_rwlock_rdlock(&rwlock);
if (users.count(rnti)) {
return users[rnti].rlc->rb_is_um(lcid);
} else {
return false;
ret = users[rnti].rlc->rb_is_um(lcid);
}
pthread_rwlock_unlock(&rwlock);
return ret;
}
void rlc::user_interface::max_retx_attempted()

View File

@ -84,7 +84,8 @@ void rrc::stop()
{
if(running) {
running = false;
thread_cancel();
rrc_pdu p = {0, LCID_EXIT, NULL};
rx_pdu_queue.push(p);
wait_thread_finish();
}
act_monitor.stop();
@ -585,8 +586,7 @@ void rrc::process_release_complete(uint16_t rnti)
// There is no RRCReleaseComplete message from UE thus wait ~50 subframes for tx
usleep(50000);
}
// Save to call rem_user() directly without thread, because calling from private function
rem_user(rnti);
rem_user_thread(rnti);
} else {
rrc_log->error("Received ReleaseComplete for unknown rnti=0x%x\n", rnti);
}
@ -594,6 +594,7 @@ void rrc::process_release_complete(uint16_t rnti)
void rrc::rem_user(uint16_t rnti)
{
pthread_mutex_lock(&user_mutex);
if (users.count(rnti) == 1) {
rrc_log->console("Disconnecting rnti=0x%x.\n", rnti);
rrc_log->info("Disconnecting rnti=0x%x.\n", rnti);
@ -603,11 +604,6 @@ void rrc::rem_user(uint16_t rnti)
mac->ue_rem(rnti); // MAC handles PHY
gtpu->rem_user(rnti);
// Wait enough time
pthread_mutex_unlock(&user_mutex);
usleep(50000);
pthread_mutex_lock(&user_mutex);
// Now remove RLC and PDCP
rlc->rem_user(rnti);
pdcp->rem_user(rnti);
@ -615,11 +611,13 @@ void rrc::rem_user(uint16_t rnti)
// And deallocate resources from RRC
users[rnti].sr_free();
users[rnti].cqi_free();
users.erase(rnti);
rrc_log->info("Removed user rnti=0x%x\n", rnti);
} else {
rrc_log->error("Removing user rnti=0x%x (does not exist)\n", rnti);
}
pthread_mutex_unlock(&user_mutex);
}
void rrc::config_mac()
@ -778,7 +776,6 @@ void rrc::run_thread()
}
// Mutex these calls even though it's a private function
pthread_mutex_lock(&user_mutex);
if (users.count(p.rnti) == 1) {
switch(p.lcid)
{
@ -803,6 +800,9 @@ void rrc::run_thread()
users[p.rnti].set_activity();
}
break;
case LCID_EXIT:
rrc_log->info("Exiting thread\n");
break;
default:
rrc_log->error("Rx PDU with invalid bearer id: %d", p.lcid);
break;
@ -810,7 +810,6 @@ void rrc::run_thread()
} else {
rrc_log->warning("Discarding PDU for removed rnti=0x%x\n", p.rnti);
}
pthread_mutex_unlock(&user_mutex);
}
}
@ -863,7 +862,7 @@ void rrc::activity_monitor::run_thread()
parent->s1ap->user_release(rem_rnti, LIBLTE_S1AP_CAUSERADIONETWORK_USER_INACTIVITY);
} else {
if(rem_rnti != SRSLTE_MRNTI)
parent->rem_user(rem_rnti);
parent->rem_user_thread(rem_rnti);
}
}
pthread_mutex_unlock(&parent->user_mutex);