diff --git a/lib/include/srslte/common/block_queue.h b/lib/include/srslte/common/block_queue.h index 1d17e9339..3df22b40d 100644 --- a/lib/include/srslte/common/block_queue.h +++ b/lib/include/srslte/common/block_queue.h @@ -49,11 +49,23 @@ template class block_queue { public: + + // Callback functions for mutexed operations inside pop/push methods + class call_mutexed_itf { + public: + virtual void popping(myobj obj) = 0; + virtual void pushing(myobj obj) = 0; + }; + block_queue(int capacity = -1) { pthread_mutex_init(&mutex, NULL); pthread_cond_init(&cv_empty, NULL); pthread_cond_init(&cv_full, NULL); this->capacity = capacity; + mutexed_callback = NULL; + } + void set_mutexed_itf(call_mutexed_itf *itf) { + mutexed_callback = itf; } void resize(int new_capacity) { capacity = new_capacity; @@ -71,6 +83,9 @@ public: } } q.push(value); + if (mutexed_callback) { + mutexed_callback->pushing(value); + } pthread_cond_signal(&cv_empty); pthread_mutex_unlock(&mutex); return true; @@ -94,6 +109,9 @@ public: *value = q.front(); q.pop(); } + if (mutexed_callback) { + mutexed_callback->popping(*value); + } pthread_cond_signal(&cv_full); pthread_mutex_unlock(&mutex); return true; @@ -106,6 +124,9 @@ public: } myobj value = q.front(); q.pop(); + if (mutexed_callback) { + mutexed_callback->popping(value); + } pthread_cond_signal(&cv_full); pthread_mutex_unlock(&mutex); return value; @@ -136,6 +157,7 @@ private: pthread_mutex_t mutex; pthread_cond_t cv_empty; pthread_cond_t cv_full; + call_mutexed_itf *mutexed_callback; int capacity; }; diff --git a/lib/include/srslte/interfaces/ue_interfaces.h b/lib/include/srslte/interfaces/ue_interfaces.h index 9545d9424..a4fb2b2cb 100644 --- a/lib/include/srslte/interfaces/ue_interfaces.h +++ b/lib/include/srslte/interfaces/ue_interfaces.h @@ -529,6 +529,8 @@ typedef struct { float cfo_loop_bw_ref; float cfo_loop_ref_min; float cfo_loop_pss_tol; + float sfo_ema; + uint32_t sfo_correct_period; uint32_t cfo_loop_pss_conv; uint32_t cfo_ref_mask; bool average_subframe_enabled; diff --git a/lib/include/srslte/phy/ue/ue_sync.h b/lib/include/srslte/phy/ue/ue_sync.h index 3d9807aea..8eb18f636 100644 --- a/lib/include/srslte/phy/ue/ue_sync.h +++ b/lib/include/srslte/phy/ue/ue_sync.h @@ -61,6 +61,8 @@ #include "srslte/phy/common/timestamp.h" #include "srslte/phy/io/filesource.h" +#define DEFAULT_SAMPLE_OFFSET_CORRECT_PERIOD 10 +#define DEFAULT_SFO_EMA_COEFF 0.1 #define DEFAULT_CFO_BW_PSS 0.05 #define DEFAULT_CFO_PSS_MIN 400 // typical bias of PSS estimation. @@ -140,8 +142,7 @@ typedef struct SRSLTE_API { int next_rf_sample_offset; int last_sample_offset; float mean_sample_offset; - float mean_sfo; - uint32_t sample_offset_correct_period; + uint32_t sample_offset_correct_period; float sfo_ema; @@ -248,8 +249,11 @@ SRSLTE_API float srslte_ue_sync_get_sfo(srslte_ue_sync_t *q); SRSLTE_API int srslte_ue_sync_get_last_sample_offset(srslte_ue_sync_t *q); -SRSLTE_API void srslte_ue_sync_set_sample_offset_correct_period(srslte_ue_sync_t *q, - uint32_t nof_subframes); +SRSLTE_API void srslte_ue_sync_set_sfo_correct_period(srslte_ue_sync_t *q, + uint32_t nof_subframes); + +SRSLTE_API void srslte_ue_sync_set_sfo_ema(srslte_ue_sync_t *q, + float ema_coefficient); SRSLTE_API void srslte_ue_sync_get_last_timestamp(srslte_ue_sync_t *q, srslte_timestamp_t *timestamp); diff --git a/lib/include/srslte/upper/rlc_tx_queue.h b/lib/include/srslte/upper/rlc_tx_queue.h index 13dd04e75..08edae0fe 100644 --- a/lib/include/srslte/upper/rlc_tx_queue.h +++ b/lib/include/srslte/upper/rlc_tx_queue.h @@ -41,42 +41,38 @@ namespace srslte { -class rlc_tx_queue +class rlc_tx_queue : public block_queue::call_mutexed_itf { public: rlc_tx_queue(uint32_t capacity = 128) : queue((int) capacity) { unread_bytes = 0; + queue.set_mutexed_itf(this); + } + // increase/decrease unread_bytes inside push/pop mutexed operations + void pushing(byte_buffer_t *msg) { + unread_bytes += msg->N_bytes; + } + void popping(byte_buffer_t *msg) { + if (unread_bytes > msg->N_bytes) { + unread_bytes -= msg->N_bytes; + } else { + unread_bytes = 0; + } } void write(byte_buffer_t *msg) { queue.push(msg); - - unread_bytes += msg->N_bytes; } void read(byte_buffer_t **msg) { byte_buffer_t *m = queue.wait_pop(); *msg = m; - if (unread_bytes > (*msg)->N_bytes) { - unread_bytes -= (*msg)->N_bytes; - } else { - unread_bytes = 0; - } } bool try_read(byte_buffer_t **msg) { - if (queue.try_pop(msg)) { - if (unread_bytes > (*msg)->N_bytes) { - unread_bytes -= (*msg)->N_bytes; - } else { - unread_bytes = 0; - } - return true; - } else { - return false; - } + return queue.try_pop(msg); } void resize(uint32_t capacity) @@ -110,10 +106,10 @@ public: } private: - bool is_empty() { return queue.empty(); } + bool is_empty() { return queue.empty(); } block_queue queue; - uint32_t unread_bytes; + uint32_t unread_bytes; }; } // namespace srslte diff --git a/lib/src/phy/rf/rf_uhd_imp.c b/lib/src/phy/rf/rf_uhd_imp.c index 1c09b7990..e3476b219 100644 --- a/lib/src/phy/rf/rf_uhd_imp.c +++ b/lib/src/phy/rf/rf_uhd_imp.c @@ -60,7 +60,9 @@ typedef struct { float current_master_clock; bool async_thread_running; - pthread_t async_thread; + pthread_t async_thread; + + pthread_mutex_t tx_mutex; } rf_uhd_handler_t; void suppress_handler(const char *x) @@ -374,6 +376,8 @@ int rf_uhd_open_multi(char *args, void **h, uint32_t nof_channels) } handler->devname = NULL; + pthread_mutex_init(&handler->tx_mutex, NULL); + // Initialize handler handler->uhd_error_handler = NULL; @@ -819,9 +823,13 @@ int rf_uhd_send_timed_multi(void *h, bool has_time_spec, bool blocking, bool is_start_of_burst, - bool is_end_of_burst) { + bool is_end_of_burst) +{ rf_uhd_handler_t* handler = (rf_uhd_handler_t*) h; - + + pthread_mutex_lock(&handler->tx_mutex); + int ret = -1; + /* Resets the USRP time FIXME: this might cause problems for burst transmissions */ if (!has_time_spec && is_start_of_burst && handler->nof_tx_channels > 1) { uhd_usrp_set_time_now(handler->usrp, 0, 0, 0); @@ -866,15 +874,18 @@ int rf_uhd_send_timed_multi(void *h, tx_samples, &handler->tx_md, 3.0, &txd_samples); if (error) { fprintf(stderr, "Error sending to UHD: %d\n", error); - return -1; + goto unlock; } // Increase time spec uhd_tx_metadata_add_time_spec(&handler->tx_md, txd_samples/handler->tx_rate); n += txd_samples; trials++; } while (n < nsamples && trials < 100); - return nsamples; + + ret = nsamples; + } else { + const void *buffs_ptr[4]; for (int i = 0; i < 4; i++) { buffs_ptr[i] = data[i]; @@ -885,9 +896,14 @@ int rf_uhd_send_timed_multi(void *h, uhd_error error = uhd_tx_streamer_send(handler->tx_stream, buffs_ptr, nsamples, &handler->tx_md, 3.0, &txd_samples); if (error) { fprintf(stderr, "Error sending to UHD: %d\n", error); - return -1; + goto unlock; } - return txd_samples; + + ret = txd_samples; + } +unlock: + pthread_mutex_unlock(&handler->tx_mutex); + return ret; } diff --git a/lib/src/phy/ue/ue_sync.c b/lib/src/phy/ue/ue_sync.c index 38355af92..a0ae46990 100644 --- a/lib/src/phy/ue/ue_sync.c +++ b/lib/src/phy/ue/ue_sync.c @@ -41,11 +41,9 @@ #define MAX_TIME_OFFSET 128 -#define TRACK_MAX_LOST 100 +#define TRACK_MAX_LOST 10 #define TRACK_FRAME_SIZE 32 #define FIND_NOF_AVG_FRAMES 4 -#define DEFAULT_SAMPLE_OFFSET_CORRECT_PERIOD 0 -#define DEFAULT_SFO_EMA_COEFF 0.1 cf_t dummy_buffer0[15*2048/2]; @@ -386,7 +384,7 @@ int srslte_ue_sync_set_cell(srslte_ue_sync_t *q, srslte_cell_t cell) srslte_sync_set_em_alpha(&q->sfind, 1); srslte_sync_set_threshold(&q->sfind, 3.0); - srslte_sync_set_em_alpha(&q->strack, 0.2); + srslte_sync_set_em_alpha(&q->strack, 0.0); srslte_sync_set_threshold(&q->strack, 1.2); } @@ -464,14 +462,14 @@ void srslte_ue_sync_set_cfo_tol(srslte_ue_sync_t *q, float cfo_tol) { } float srslte_ue_sync_get_sfo(srslte_ue_sync_t *q) { - return q->mean_sfo/5e-3; + return q->mean_sample_offset/5e-3; } int srslte_ue_sync_get_last_sample_offset(srslte_ue_sync_t *q) { return q->last_sample_offset; } -void srslte_ue_sync_set_sample_offset_correct_period(srslte_ue_sync_t *q, uint32_t nof_subframes) { +void srslte_ue_sync_set_sfo_correct_period(srslte_ue_sync_t *q, uint32_t nof_subframes) { q->sample_offset_correct_period = nof_subframes; } @@ -563,7 +561,7 @@ static int track_peak_ok(srslte_ue_sync_t *q, uint32_t track_idx) { uint32_t frame_idx = 0; if (q->sample_offset_correct_period) { frame_idx = q->frame_ok_cnt%q->sample_offset_correct_period; - q->mean_sample_offset += (float) q->last_sample_offset/q->sample_offset_correct_period; + q->mean_sample_offset = SRSLTE_VEC_EMA((float) q->last_sample_offset, q->mean_sample_offset, q->sfo_ema); } else { q->mean_sample_offset = q->last_sample_offset; } @@ -589,23 +587,12 @@ static int track_peak_ok(srslte_ue_sync_t *q, uint32_t track_idx) { if (!frame_idx) { // Adjust RF sampling time based on the mean sampling offset q->next_rf_sample_offset = (int) round(q->mean_sample_offset); - - // Reset PSS averaging if correcting every a period longer than 1 - if (q->sample_offset_correct_period > 1) { - srslte_sync_reset(&q->strack); - } - - // Compute SFO based on mean sample offset - if (q->sample_offset_correct_period) { - q->mean_sample_offset /= q->sample_offset_correct_period; - } - q->mean_sfo = SRSLTE_VEC_EMA(q->mean_sample_offset, q->mean_sfo, q->sfo_ema); if (q->next_rf_sample_offset) { - INFO("Time offset adjustment: %d samples (%.2f), mean SFO: %.2f Hz, %.5f samples/5-sf, ema=%f, length=%d\n", + INFO("Time offset adjustment: %d samples (%.2f), mean SFO: %.2f Hz, ema=%f, length=%d\n", q->next_rf_sample_offset, q->mean_sample_offset, srslte_ue_sync_get_sfo(q), - q->mean_sfo, q->sfo_ema, q->sample_offset_correct_period); + q->sfo_ema, q->sample_offset_correct_period); } q->mean_sample_offset = 0; } diff --git a/lib/src/upper/rlc_um.cc b/lib/src/upper/rlc_um.cc index 7094ecd6b..f31492207 100644 --- a/lib/src/upper/rlc_um.cc +++ b/lib/src/upper/rlc_um.cc @@ -577,9 +577,16 @@ void rlc_um::reassemble_rx_sdus() break; } + // Check available space in SDU + if ((uint32_t)len > rx_sdu->get_tailroom()) { + log->error("Dropping PDU %d due to buffer mis-alignment (current segment len %d B, received %d B)\n", vr_ur, rx_sdu->N_bytes, len); + rx_sdu->reset(); + goto clean_up_rx_window; + } + log->debug("Concatenating %d bytes in to current length %d. rx_window remaining bytes=%d, vr_ur_in_rx_sdu=%d, vr_ur=%d, rx_mod=%d, last_mod=%d\n", len, rx_sdu->N_bytes, rx_window[vr_ur].buf->N_bytes, vr_ur_in_rx_sdu, vr_ur, cfg.rx_mod, (vr_ur_in_rx_sdu+1)%cfg.rx_mod); - memmove(&rx_sdu->msg[rx_sdu->N_bytes], rx_window[vr_ur].buf->msg, len); + memcpy(&rx_sdu->msg[rx_sdu->N_bytes], rx_window[vr_ur].buf->msg, len); rx_sdu->N_bytes += len; rx_window[vr_ur].buf->msg += len; rx_window[vr_ur].buf->N_bytes -= len; diff --git a/lib/test/upper/rlc_stress_test.cc b/lib/test/upper/rlc_stress_test.cc index b138b2b90..3a6f40a99 100644 --- a/lib/test/upper/rlc_stress_test.cc +++ b/lib/test/upper/rlc_stress_test.cc @@ -279,8 +279,10 @@ private: while(run_enable) { byte_buffer_t *pdu = byte_buffer_pool::get_instance()->allocate("rlc_tester::run_thread"); if (!pdu) { - printf("Fatal Error: Could not allocate PDU in rlc_tester::run_thread\n"); - exit(-1); + printf("Error: Could not allocate PDU in rlc_tester::run_thread\n\n\n"); + // backoff for a bit + usleep(1000); + continue; } for (uint32_t i = 0; i < SDU_SIZE; i++) { pdu->msg[i] = sn; diff --git a/srsenb/hdr/mac/scheduler_metric.h b/srsenb/hdr/mac/scheduler_metric.h index 4685f932a..9c40eda5c 100644 --- a/srsenb/hdr/mac/scheduler_metric.h +++ b/srsenb/hdr/mac/scheduler_metric.h @@ -34,7 +34,7 @@ namespace srsenb { class dl_metric_rr : public sched::metric_dl { public: - void new_tti(std::map &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols, uint32_t tti); + void new_tti(std::map &ue_db, uint32_t start_rbg, uint32_t nof_rbg, uint32_t nof_ctrl_symbols, uint32_t tti); dl_harq_proc* get_user_allocation(sched_ue *user); private: @@ -49,14 +49,14 @@ private: uint32_t count_rbg(uint32_t mask); uint32_t calc_rbg_mask(bool mask[25]); - bool used_rb[MAX_RBG]; + bool used_rbg[MAX_RBG]; uint32_t current_tti; - uint32_t total_rb; - uint32_t used_rb_mask; + uint32_t total_rbg; + uint32_t used_rbg_mask; uint32_t nof_ctrl_symbols; - uint32_t available_rb; + uint32_t available_rbg; }; class ul_metric_rr : public sched::metric_ul diff --git a/srsenb/hdr/mac/scheduler_ue.h b/srsenb/hdr/mac/scheduler_ue.h index aaa4b49b7..aafad89a6 100644 --- a/srsenb/hdr/mac/scheduler_ue.h +++ b/srsenb/hdr/mac/scheduler_ue.h @@ -93,11 +93,15 @@ public: * Functions used by scheduler metric objects *******************************************************/ - uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols); - uint32_t get_required_prb_ul(uint32_t req_bytes); + uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols); + uint32_t get_required_prb_ul(uint32_t req_bytes); + uint32_t prb_to_rbg(uint32_t nof_prb); + uint32_t rgb_to_prb(uint32_t nof_rbg); + uint32_t get_pending_dl_new_data(uint32_t tti); uint32_t get_pending_ul_new_data(uint32_t tti); + uint32_t get_pending_dl_new_data_total(uint32_t tti); dl_harq_proc *get_pending_dl_harq(uint32_t tti); dl_harq_proc *get_empty_dl_harq(); @@ -147,11 +151,10 @@ private: static bool bearer_is_ul(ue_bearer_t *lch); static bool bearer_is_dl(ue_bearer_t *lch); - + bool is_first_dl_tx(); - - - sched_interface::ue_cfg_t cfg; + + sched_interface::ue_cfg_t cfg; srslte_cell_t cell; srslte::log* log_h; @@ -175,7 +178,8 @@ private: uint32_t max_mcs_dl; uint32_t max_mcs_ul; int fixed_mcs_ul; - int fixed_mcs_dl; + int fixed_mcs_dl; + uint32_t P; int next_tpc_pusch; int next_tpc_pucch; diff --git a/srsenb/src/mac/scheduler.cc b/srsenb/src/mac/scheduler.cc index c8f99246c..2c3dee85a 100644 --- a/srsenb/src/mac/scheduler.cc +++ b/srsenb/src/mac/scheduler.cc @@ -610,7 +610,7 @@ int sched::dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST]) int nof_rar_elems = 0; for (uint32_t i=0;i 0 && avail_rbg >= rar_n_rb) + if (pending_rar[i].buf_rar > 0 && avail_rbg >= (uint32_t)ceil((float)rar_n_rb/P)) { /* Check if we are still within the RAR window, otherwise discard it */ if (current_tti <= (pending_rar[i].rar_tti + cfg.prach_rar_window + 3)%10240 && current_tti >= pending_rar[i].rar_tti + 3) @@ -664,8 +664,8 @@ int sched::dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST]) if (generate_format1a(start_rbg*P, rar_n_rb, buf_rar, 0, &rar[nof_rar_elems].dci) >= 0) { rar[nof_rar_elems].tbs = buf_rar; nof_rar_elems++; - avail_rbg -= rar_n_rb; - start_rbg += rar_n_rb; + avail_rbg -= (uint32_t)ceil((float)rar_n_rb/P); + start_rbg += (uint32_t)ceil((float)rar_n_rb/P); } else { Error("SCHED: Allocating Format1A grant\n"); } diff --git a/srsenb/src/mac/scheduler_metric.cc b/srsenb/src/mac/scheduler_metric.cc index f6ec18555..ff12d6641 100644 --- a/srsenb/src/mac/scheduler_metric.cc +++ b/srsenb/src/mac/scheduler_metric.cc @@ -47,9 +47,9 @@ uint32_t dl_metric_rr::calc_rbg_mask(bool mask[MAX_RBG]) { // Build RBG bitmask uint32_t rbg_bitmask = 0; - for (uint32_t n=0;nget_rbgmask()); } uint32_t pending_data = user->get_pending_dl_new_data(current_tti); - return user->get_required_prb_dl(pending_data, nof_ctrl_symbols); + return user->prb_to_rbg(user->get_required_prb_dl(pending_data, nof_ctrl_symbols)); } -void dl_metric_rr::new_tti(std::map &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols_, uint32_t tti) +void dl_metric_rr::new_tti(std::map &ue_db, uint32_t start_rbg, uint32_t nof_rbg, uint32_t nof_ctrl_symbols_, uint32_t tti) { - total_rb = start_rb+nof_rb; - for (uint32_t i=0;i 0;i++) { - if (used_rb[i]) { + for (uint32_t i=0;i 0;i++) { + if (used_rbg[i]) { mask_bit[i] = false; } else { mask_bit[i] = true; @@ -126,24 +126,24 @@ bool dl_metric_rr::new_allocation(uint32_t nof_rbg, uint32_t *rbgmask) { } void dl_metric_rr::update_allocation(uint32_t new_mask) { - used_rb_mask |= new_mask; - for (uint32_t n=0;nget_pending_dl_new_data(current_tti); dl_harq_proc *h = user->get_pending_dl_harq(current_tti); + uint32_t req_bytes = user->get_pending_dl_new_data_total(current_tti); // Schedule retx if we have space #if ASYNC_DL_SCHED @@ -160,7 +160,7 @@ dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) { // If not, try to find another mask in the current tti uint32_t nof_rbg = count_rbg(retx_mask); - if (nof_rbg < available_rb) { + if (nof_rbg < available_rbg) { if (new_allocation(nof_rbg, &retx_mask)) { update_allocation(retx_mask); h->set_rbgmask(retx_mask); @@ -176,10 +176,10 @@ dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) { if (h && h->is_empty()) { #endif // Allocate resources based on pending data - if (pending_data) { - uint32_t pending_rb = user->get_required_prb_dl(pending_data, nof_ctrl_symbols); + if (req_bytes) { + uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, nof_ctrl_symbols)); uint32_t newtx_mask = 0; - new_allocation(pending_rb, &newtx_mask); + new_allocation(pending_rbg, &newtx_mask); if (newtx_mask) { update_allocation(newtx_mask); h->set_rbgmask(newtx_mask); diff --git a/srsenb/src/mac/scheduler_ue.cc b/srsenb/src/mac/scheduler_ue.cc index f9245ac9b..2eacf06f0 100644 --- a/srsenb/src/mac/scheduler_ue.cc +++ b/srsenb/src/mac/scheduler_ue.cc @@ -71,6 +71,7 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in rnti = rnti_; log_h = log_h_; memcpy(&cell, &cell_cfg->cell, sizeof(srslte_cell_t)); + P = srslte_ra_type0_P(cell.nof_prb); max_mcs_dl = 28; max_mcs_ul = 28; @@ -710,6 +711,22 @@ uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti) return pending_data; } +/// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data, +/// the RAR resources, and headers +/// \param tti +/// \return number of bytes to be allocated +uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti) +{ + uint32_t req_bytes = get_pending_dl_new_data(tti); + if(req_bytes>0) { + req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header + if(is_first_dl_tx()) { + req_bytes += 6; // count for RAR + } + } + return req_bytes; +} + uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti) { uint32_t pending_data = 0; @@ -746,32 +763,39 @@ uint32_t sched_ue::get_pending_ul_old_data() return pending_data; } - -uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols) +uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb) { - int mcs = 0; - uint32_t nbytes = 0; - uint32_t n = 0; - if (req_bytes == 0) { - return 0; - } - - uint32_t nof_re = 0; - int tbs = 0; - for (n=1;n<=cell.nof_prb && nbytes < req_bytes;n++) { - nof_re = srslte_ra_dl_approx_nof_re(cell, n, nof_ctrl_symbols); - if (fixed_mcs_dl < 0) { - tbs = alloc_tbs_dl(n, nof_re, 0, &mcs); + return (uint32_t) ceil((float) nof_prb / P); +} + +uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg) +{ + return P*nof_rbg; +} + +uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols) +{ + int mcs = 0; + uint32_t nof_re = 0; + int tbs = 0; + + uint32_t nbytes = 0; + uint32_t n; + for (n=0; n < cell.nof_prb && nbytes < req_bytes; ++n) { + nof_re = srslte_ra_dl_approx_nof_re(cell, n+1, nof_ctrl_symbols); + if(fixed_mcs_dl < 0) { + tbs = alloc_tbs_dl(n+1, nof_re, 0, &mcs); } else { - tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl), n)/8; + tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl), n+1)/8; } if (tbs > 0) { - nbytes = tbs; + nbytes = tbs; } else if (tbs < 0) { - return 0; + return 0; } } - return n; + + return n; } uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes) diff --git a/srsue/hdr/phy/phch_common.h b/srsue/hdr/phy/phch_common.h index 82da05e7d..239fafa10 100644 --- a/srsue/hdr/phy/phch_common.h +++ b/srsue/hdr/phy/phch_common.h @@ -96,8 +96,8 @@ typedef struct { float avg_rssi_dbm; float last_radio_rssi; float rx_gain_offset; - float avg_snr_db_cqi; - float avg_snr_db_sync; + float avg_snr_db_cqi; + float avg_snr_db_sync; float avg_noise; bool pcell_meas_enabled; @@ -232,11 +232,7 @@ typedef struct { bool is_mch_subframe(subframe_cfg_t *cfg, uint32_t phy_tti); bool is_mcch_subframe(subframe_cfg_t *cfg, uint32_t phy_tti); - - }; - - - + }; } // namespace srsue #endif // SRSUE_PDCH_COMMON_H diff --git a/srsue/src/main.cc b/srsue/src/main.cc index a5306cc99..e4b0d3e4e 100644 --- a/srsue/src/main.cc +++ b/srsue/src/main.cc @@ -191,6 +191,14 @@ void parse_args(all_args_t *args, int argc, char *argv[]) { bpo::value(&args->expert.phy.cqi_fixed)->default_value(-1), "Fixes the reported CQI to a constant value. Default disabled.") + ("expert.sfo_correct_period", + bpo::value(&args->expert.phy.sfo_correct_period)->default_value(DEFAULT_SAMPLE_OFFSET_CORRECT_PERIOD), + "Period in ms to correct sample time") + + ("expert.sfo_emma", + bpo::value(&args->expert.phy.sfo_ema)->default_value(DEFAULT_SFO_EMA_COEFF), + "EMA coefficient to average sample offsets used to compute SFO") + ("expert.snr_ema_coeff", bpo::value(&args->expert.phy.snr_ema_coeff)->default_value(0.1), "Sets the SNR exponential moving average coefficient (Default 0.1)") diff --git a/srsue/src/phy/phch_common.cc b/srsue/src/phy/phch_common.cc index a1ba50dca..ae0fdb75d 100644 --- a/srsue/src/phy/phch_common.cc +++ b/srsue/src/phy/phch_common.cc @@ -347,9 +347,7 @@ void phch_common::reset() { sr_last_tx_tti = -1; cur_pusch_power = 0; avg_snr_db_cqi = 0; - avg_snr_db_sync = 0; avg_rsrp = 0; - avg_rsrp_cqi = 0; avg_rsrp_dbm = 0; avg_rsrq_db = 0; diff --git a/srsue/src/phy/phch_recv.cc b/srsue/src/phy/phch_recv.cc index cbbf5c18c..ddeccd183 100644 --- a/srsue/src/phy/phch_recv.cc +++ b/srsue/src/phy/phch_recv.cc @@ -686,6 +686,10 @@ void phch_recv::set_ue_sync_opts(srslte_ue_sync_t *q, float cfo) srslte_sync_set_cfo_cp_enable(&q->sfind, false, 0); } + // Set SFO ema and correct period + srslte_ue_sync_set_sfo_correct_period(q, worker_com->args->sfo_correct_period); + srslte_ue_sync_set_sfo_ema(q, worker_com->args->sfo_ema); + sss_alg_t sss_alg = SSS_FULL; if (!worker_com->args->sss_algorithm.compare("diff")) { sss_alg = SSS_DIFF; diff --git a/srsue/src/phy/phch_worker.cc b/srsue/src/phy/phch_worker.cc index 1b75c9ddf..dafd04ab3 100644 --- a/srsue/src/phy/phch_worker.cc +++ b/srsue/src/phy/phch_worker.cc @@ -479,13 +479,13 @@ void phch_worker::work_imp() } if (chest_ok) { - if (phy->avg_rsrp_sync_dbm > -130.0 && phy->avg_snr_db_sync > -10.0) { + if (phy->avg_rsrp_dbm > -130.0 && phy->avg_snr_db_cqi > -6.0) { log_h->debug("SNR=%.1f dB, RSRP=%.1f dBm sync=in-sync from channel estimator\n", - phy->avg_snr_db_sync, phy->avg_rsrp_sync_dbm); + phy->avg_snr_db_cqi, phy->avg_rsrp_dbm); chest_loop->in_sync(); } else { log_h->warning("SNR=%.1f dB RSRP=%.1f dBm, sync=out-of-sync from channel estimator\n", - phy->avg_snr_db_sync, phy->avg_rsrp_sync_dbm); + phy->avg_snr_db_cqi, phy->avg_rsrp_dbm); chest_loop->out_of_sync(); } } @@ -1577,7 +1577,7 @@ void phch_worker::update_measurements() } // Average RSRP taken from CRS - float rsrp_lin = srslte_chest_dl_get_rsrp_neighbour(&ue_dl.chest); + float rsrp_lin = srslte_chest_dl_get_rsrp(&ue_dl.chest); if (isnormal(rsrp_lin)) { if (!phy->avg_rsrp) { phy->avg_rsrp = SRSLTE_VEC_EMA(rsrp_lin, phy->avg_rsrp, snr_ema_coeff); @@ -1617,28 +1617,7 @@ void phch_worker::update_measurements() } } - // To compute CQI use RSRP measurements from resource elements in RS since is more robust to time offset - float rsrp_lin_cqi = srslte_chest_dl_get_rsrp(&ue_dl.chest); - if (isnormal(rsrp_lin_cqi)) { - if (!phy->avg_rsrp_cqi) { - phy->avg_rsrp_cqi = SRSLTE_VEC_EMA(rsrp_lin_cqi, phy->avg_rsrp_cqi, snr_ema_coeff); - } else { - phy->avg_rsrp_cqi = rsrp_lin_cqi; - } - } - float rsrp_sync_dbm = 10*log10(rsrp_lin_cqi) + 30 - phy->rx_gain_offset; - if (isnormal(rsrp_sync_dbm)) { - if (!phy->avg_rsrp_sync_dbm) { - phy->avg_rsrp_sync_dbm = rsrp_sync_dbm; - } else { - phy->avg_rsrp_sync_dbm = SRSLTE_VEC_EMA(rsrp_sync_dbm, phy->avg_rsrp_sync_dbm, snr_ema_coeff); - } - } - - // We compute 2 SNR metrics, 1 for deciding in-sync/out-of-sync and another for CQI measurements - phy->avg_snr_db_cqi = 10*log10(phy->avg_rsrp_cqi/phy->avg_noise); // this for CQI - phy->avg_snr_db_sync = 10*log10(phy->avg_rsrp/phy->avg_noise); // this for sync - + phy->avg_snr_db_cqi = 10*log10(phy->avg_rsrp/phy->avg_noise); // Store metrics dl_metrics.n = phy->avg_noise; diff --git a/srsue/ue.conf.example b/srsue/ue.conf.example index ec2c132fe..0c7b1b75d 100644 --- a/srsue/ue.conf.example +++ b/srsue/ue.conf.example @@ -153,7 +153,9 @@ enable = false # good for long channels. For best performance at highest SNR reduce it to 1. # sfo_correct_disable: Disables phase correction before channel estimation to compensate for # sampling frequency offset. Default is enabled. -# sss_algorithm: Selects the SSS estimation algorithm. Can choose between +# sfo_ema: EMA coefficient to average sample offsets used to compute SFO +# sfo_correct_period: Period in ms to correct sample time to adjust for SFO +# sss_algorithm: Selects the SSS estimation algorithm. Can choose between # {full, partial, diff}. # estimator_fil_auto: The channel estimator smooths the channel estimate with an adaptative filter. # estimator_fil_stddev: Sets the channel estimator smooth gaussian filter standard deviation. @@ -211,6 +213,8 @@ enable = false #equalizer_mode = mmse #time_correct_period = 5 #sfo_correct_disable = false +#sfo_ema = 0.1 +#sfo_correct_period = 10 #sss_algorithm = full #estimator_fil_auto = false #estimator_fil_stddev = 1.0