enb: fix race condition in workers overwriting the DL tx buffer

this adds a own tx buffer for each HARQ PID
This commit is contained in:
Andre Puschmann 2019-07-03 15:58:39 +02:00
parent 92b4b168fe
commit 6a11030ae1
3 changed files with 33 additions and 16 deletions

View File

@ -46,10 +46,14 @@ public:
void set_tti(uint32_t tti);
void config(uint16_t rnti, uint32_t nof_prb, sched_interface *sched, rrc_interface_mac *rrc_, rlc_interface_mac *rlc, srslte::log *log_h);
uint8_t* generate_pdu(uint32_t tb_idx, sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST],
uint32_t nof_pdu_elems, uint32_t grant_size);
uint8_t* generate_mch_pdu(sched_interface::dl_pdu_mch_t sched, uint32_t nof_pdu_elems, uint32_t grant_size);
uint8_t* generate_pdu(uint32_t harq_pid,
uint32_t tb_idx,
sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST],
uint32_t nof_pdu_elems,
uint32_t grant_size);
uint8_t*
generate_mch_pdu(uint32_t harq_pid, sched_interface::dl_pdu_mch_t sched, uint32_t nof_pdu_elems, uint32_t grant_size);
srslte_softbuffer_tx_t* get_tx_softbuffer(uint32_t harq_process, uint32_t tb_idx);
srslte_softbuffer_rx_t* get_rx_softbuffer(uint32_t tti);
@ -107,7 +111,7 @@ private:
uint8_t* pending_buffers[NOF_RX_HARQ_PROCESSES];
// For DL there are two buffers, one for each Transport block
srslte::byte_buffer_t tx_payload_buffer[SRSLTE_MAX_TB];
srslte::byte_buffer_t tx_payload_buffer[SRSLTE_FDD_NOF_HARQ][SRSLTE_MAX_TB];
// For UL there are multiple buffers per PID and are managed by pdu_queue
srslte::pdu_queue pdus;

View File

@ -570,8 +570,11 @@ int mac::get_dl_sched(uint32_t tti, dl_sched_t *dl_sched_res)
if (sched_result.data[i].nof_pdu_elems[tb] > 0) {
/* Get PDU if it's a new transmission */
dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu(
tb, sched_result.data[i].pdu[tb], sched_result.data[i].nof_pdu_elems[tb], sched_result.data[i].tbs[tb]);
dl_sched_res->pdsch[n].data[tb] = ue_db[rnti]->generate_pdu(sched_result.data[i].dci.pid,
tb,
sched_result.data[i].pdu[tb],
sched_result.data[i].nof_pdu_elems[tb],
sched_result.data[i].tbs[tb]);
if (!dl_sched_res->pdsch[n].data[tb]) {
Error("Error! PDU was not generated (rnti=0x%04x, tb=%d)\n", rnti, tb);
@ -711,7 +714,10 @@ int mac::get_mch_sched(uint32_t tti, bool is_mcch, dl_sched_t* dl_sched_res)
mch.pdu[mch.num_mtch_sched].lcid = 0;
mch.pdu[mch.num_mtch_sched].nbytes = current_mcch_length;
dl_sched_res->pdsch[0].dci.rnti = SRSLTE_MRNTI;
dl_sched_res->pdsch[0].data[0] = ue_db[SRSLTE_MRNTI]->generate_mch_pdu(mch, mch.num_mtch_sched + 1, mcs.tbs / 8);
// we use TTI % HARQ to make sure we use different buffers for consecutive TTIs to avoid races between PHY workers
dl_sched_res->pdsch[0].data[0] =
ue_db[SRSLTE_MRNTI]->generate_mch_pdu(tti % SRSLTE_FDD_NOF_HARQ, mch, mch.num_mtch_sched + 1, mcs.tbs / 8);
} else {
uint32_t current_lcid = 1;
@ -733,7 +739,8 @@ int mac::get_mch_sched(uint32_t tti, bool is_mcch, dl_sched_t* dl_sched_res)
mch.mtch_sched[0].mtch_payload = mtch_payload_buffer;
dl_sched_res->pdsch[0].dci.rnti = SRSLTE_MRNTI;
if (bytes_received) {
dl_sched_res->pdsch[0].data[0] = ue_db[SRSLTE_MRNTI]->generate_mch_pdu(mch, 1, mcs_data.tbs / 8);
dl_sched_res->pdsch[0].data[0] =
ue_db[SRSLTE_MRNTI]->generate_mch_pdu(tti % SRSLTE_FDD_NOF_HARQ, mch, 1, mcs_data.tbs / 8);
}
} else {
dl_sched_res->pdsch[0].dci.rnti = 0;

View File

@ -409,14 +409,17 @@ void ue::allocate_ce(srslte::sch_pdu *pdu, uint32_t lcid)
}
}
uint8_t* ue::generate_pdu(uint32_t tb_idx, sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST],
uint32_t nof_pdu_elems, uint32_t grant_size)
uint8_t* ue::generate_pdu(uint32_t harq_pid,
uint32_t tb_idx,
sched_interface::dl_sched_pdu_t pdu[sched_interface::MAX_RLC_PDU_LIST],
uint32_t nof_pdu_elems,
uint32_t grant_size)
{
uint8_t *ret = NULL;
pthread_mutex_lock(&mutex);
if (rlc) {
tx_payload_buffer->clear();
mac_msg_dl.init_tx(&tx_payload_buffer[tb_idx], grant_size, false);
tx_payload_buffer[harq_pid][tb_idx].clear();
mac_msg_dl.init_tx(&tx_payload_buffer[harq_pid][tb_idx], grant_size, false);
for (uint32_t i=0;i<nof_pdu_elems;i++) {
if (pdu[i].lcid <= srslte::sch_subh::PHR_REPORT) {
allocate_sdu(&mac_msg_dl, pdu[i].lcid, pdu[i].nbytes);
@ -435,12 +438,15 @@ uint8_t* ue::generate_pdu(uint32_t tb_idx, sched_interface::dl_sched_pdu_t pdu[s
return ret;
}
uint8_t* ue::generate_mch_pdu(sched_interface::dl_pdu_mch_t sched, uint32_t nof_pdu_elems , uint32_t grant_size)
uint8_t* ue::generate_mch_pdu(uint32_t harq_pid,
sched_interface::dl_pdu_mch_t sched,
uint32_t nof_pdu_elems,
uint32_t grant_size)
{
uint8_t *ret = NULL;
pthread_mutex_lock(&mutex);
tx_payload_buffer->clear();
mch_mac_msg_dl.init_tx(&tx_payload_buffer[0], grant_size);
tx_payload_buffer[harq_pid][0].clear();
mch_mac_msg_dl.init_tx(&tx_payload_buffer[harq_pid][0], grant_size);
for (uint32_t i = 0; i < nof_pdu_elems; i++) {
if (sched.pdu[i].lcid == srslte::mch_subh::MCH_SCHED_INFO) {