From 6a11030ae14e8ff461bc20da7257e3619527ad7d Mon Sep 17 00:00:00 2001 From: Andre Puschmann Date: Wed, 3 Jul 2019 15:58:39 +0200 Subject: [PATCH] enb: fix race condition in workers overwriting the DL tx buffer this adds a own tx buffer for each HARQ PID --- srsenb/hdr/stack/mac/ue.h | 14 +++++++++----- srsenb/src/stack/mac/mac.cc | 15 +++++++++++---- srsenb/src/stack/mac/ue.cc | 20 +++++++++++++------- 3 files changed, 33 insertions(+), 16 deletions(-) diff --git a/srsenb/hdr/stack/mac/ue.h b/srsenb/hdr/stack/mac/ue.h index 394fcf2de..5546fb676 100644 --- a/srsenb/hdr/stack/mac/ue.h +++ b/srsenb/hdr/stack/mac/ue.h @@ -46,10 +46,14 @@ public: void set_tti(uint32_t tti); void config(uint16_t rnti, uint32_t nof_prb, sched_interface *sched, rrc_interface_mac *rrc_, rlc_interface_mac *rlc, srslte::log *log_h); - uint8_t* generate_pdu(uint32_t tb_idx, sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST], - uint32_t nof_pdu_elems, uint32_t grant_size); - uint8_t* generate_mch_pdu(sched_interface::dl_pdu_mch_t sched, uint32_t nof_pdu_elems, uint32_t grant_size); - + uint8_t* generate_pdu(uint32_t harq_pid, + uint32_t tb_idx, + sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST], + uint32_t nof_pdu_elems, + uint32_t grant_size); + uint8_t* + generate_mch_pdu(uint32_t harq_pid, sched_interface::dl_pdu_mch_t sched, uint32_t nof_pdu_elems, uint32_t grant_size); + srslte_softbuffer_tx_t* get_tx_softbuffer(uint32_t harq_process, uint32_t tb_idx); srslte_softbuffer_rx_t* get_rx_softbuffer(uint32_t tti); @@ -107,7 +111,7 @@ private: uint8_t* pending_buffers[NOF_RX_HARQ_PROCESSES]; // For DL there are two buffers, one for each Transport block - srslte::byte_buffer_t tx_payload_buffer[SRSLTE_MAX_TB]; + srslte::byte_buffer_t tx_payload_buffer[SRSLTE_FDD_NOF_HARQ][SRSLTE_MAX_TB]; // For UL there are multiple buffers per PID and are managed by pdu_queue srslte::pdu_queue pdus; diff --git a/srsenb/src/stack/mac/mac.cc b/srsenb/src/stack/mac/mac.cc index dac91ee88..96b32435e 100644 --- a/srsenb/src/stack/mac/mac.cc +++ b/srsenb/src/stack/mac/mac.cc @@ -570,8 +570,11 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res) if (sched_result.data[i].nof_pdu_elems[tb] > 0) { /* Get PDU if it's a new transmission */ - dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu( - tb, sched_result.data[i].pdu[tb], sched_result.data[i].nof_pdu_elems[tb], sched_result.data[i].tbs[tb]); + dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu(sched_result.data[i].dci.pid, + tb, + sched_result.data[i].pdu[tb], + sched_result.data[i].nof_pdu_elems[tb], + sched_result.data[i].tbs[tb]); if (!dl_sched_res->pdsch[n].data[tb]) { Error("Error! PDU was not generated (rnti=0x%04x, tb=%d)\n", rnti, tb); @@ -711,7 +714,10 @@ int mac::get_mch_sched(uint32_t tti, bool is_mcch, dl_sched_t* dl_sched_res) mch.pdu[mch.num_mtch_sched].lcid = 0; mch.pdu[mch.num_mtch_sched].nbytes = current_mcch_length; dl_sched_res->pdsch[0].dci.rnti = SRSLTE_MRNTI; - dl_sched_res->pdsch[0].data[0] = ue_db[SRSLTE_MRNTI]->generate_mch_pdu(mch, mch.num_mtch_sched + 1, mcs.tbs / 8); + + // we use TTI % HARQ to make sure we use different buffers for consecutive TTIs to avoid races between PHY workers + dl_sched_res->pdsch[0].data[0] = + ue_db[SRSLTE_MRNTI]->generate_mch_pdu(tti % SRSLTE_FDD_NOF_HARQ, mch, mch.num_mtch_sched + 1, mcs.tbs / 8); } else { uint32_t current_lcid = 1; @@ -733,7 +739,8 @@ int mac::get_mch_sched(uint32_t tti, bool is_mcch, dl_sched_t* dl_sched_res) mch.mtch_sched[0].mtch_payload = mtch_payload_buffer; dl_sched_res->pdsch[0].dci.rnti = SRSLTE_MRNTI; if (bytes_received) { - dl_sched_res->pdsch[0].data[0] = ue_db[SRSLTE_MRNTI]->generate_mch_pdu(mch, 1, mcs_data.tbs / 8); + dl_sched_res->pdsch[0].data[0] = + ue_db[SRSLTE_MRNTI]->generate_mch_pdu(tti % SRSLTE_FDD_NOF_HARQ, mch, 1, mcs_data.tbs / 8); } } else { dl_sched_res->pdsch[0].dci.rnti = 0; diff --git a/srsenb/src/stack/mac/ue.cc b/srsenb/src/stack/mac/ue.cc index edbbb70db..4d77fd677 100644 --- a/srsenb/src/stack/mac/ue.cc +++ b/srsenb/src/stack/mac/ue.cc @@ -409,14 +409,17 @@ void ue::allocate_ce(srslte::sch_pdu *pdu, uint32_t lcid) } } -uint8_t* ue::generate_pdu(uint32_t tb_idx, sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST], - uint32_t nof_pdu_elems, uint32_t grant_size) +uint8_t* ue::generate_pdu(uint32_t harq_pid, + uint32_t tb_idx, + sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST], + uint32_t nof_pdu_elems, + uint32_t grant_size) { uint8_t *ret = NULL; pthread_mutex_lock(&mutex); if (rlc) { - tx_payload_buffer->clear(); - mac_msg_dl.init_tx(&tx_payload_buffer[tb_idx], grant_size, false); + tx_payload_buffer[harq_pid][tb_idx].clear(); + mac_msg_dl.init_tx(&tx_payload_buffer[harq_pid][tb_idx], grant_size, false); for (uint32_t i=0;iclear(); - mch_mac_msg_dl.init_tx(&tx_payload_buffer[0], grant_size); + tx_payload_buffer[harq_pid][0].clear(); + mch_mac_msg_dl.init_tx(&tx_payload_buffer[harq_pid][0], grant_size); for (uint32_t i = 0; i < nof_pdu_elems; i++) { if (sched.pdu[i].lcid == srslte::mch_subh::MCH_SCHED_INFO) {