Merge branch 'next' into agpl_next

This commit is contained in:
Codebot 2021-10-26 10:32:37 +02:00 committed by Your Name
commit 2cda208936
108 changed files with 984 additions and 453 deletions

View File

@ -1,6 +1,6 @@
<!--
Before filing an issue, search for solutions here:
- srsRAN users mailing list (http://www.softwareradiosystems.com/mailman/listinfo/srsran-users)
- srsRAN users mailing list (https://lists.srsran.com/mailman/listinfo/srsran-users)
-->
## Issue Description ##

View File

@ -75,10 +75,10 @@ option(ENABLE_SOAPYSDR "Enable SoapySDR" ON)
option(ENABLE_SKIQ "Enable Sidekiq SDK" ON)
option(ENABLE_ZEROMQ "Enable ZeroMQ" ON)
option(ENABLE_HARDSIM "Enable support for SIM cards" ON)
option(ENABLE_TTCN3 "Enable TTCN3 test binaries" OFF)
option(ENABLE_ZMQ_TEST "Enable ZMQ based E2E tests" OFF)
option(BUILD_STATIC "Attempt to statically link external deps" OFF)
option(RPATH "Enable RPATH" OFF)
option(ENABLE_ASAN "Enable gcc/clang address sanitizer" OFF)
@ -86,12 +86,12 @@ option(ENABLE_GCOV "Enable gcc/clang address sanitizer" OFF)
option(ENABLE_MSAN "Enable clang memory sanitizer" OFF)
option(ENABLE_TSAN "Enable clang thread sanitizer" OFF)
option(ENABLE_TIDY "Enable clang tidy" OFF)
option(USE_LTE_RATES "Use standard LTE sampling rates" OFF)
option(USE_MKL "Use MKL instead of fftw" OFF)
option(ENABLE_TIMEPROF "Enable time profiling" ON)
option(FORCE_32BIT "Add flags to force 32 bit compilation" OFF)
option(ENABLE_SRSLOG_TRACING "Enable event tracing using srslog" OFF)
@ -105,7 +105,7 @@ option(ENABLE_ALL_TEST "Enable all unit/component test" OFF)
# it automatically so it is necessary to use the gcc wrappers of the compiler
# (gcc-ar, gcc-nm, ...).
option(BUILD_WITH_LTO "Enable LTO (experimental)" OFF)
if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
set(GCC_ARCH armv8-a CACHE STRING "GCC compile for specific architecture.")
message(STATUS "Detected aarch64 processor")
@ -390,7 +390,7 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
ADD_C_COMPILER_FLAG_IF_AVAILABLE("-march=${GCC_ARCH}" HAVE_MARCH_${GCC_ARCH})
ADD_CXX_COMPILER_FLAG_IF_AVAILABLE("-march=${GCC_ARCH}" HAVE_MARCH_${GCC_ARCH})
if (HAVE_AVX2)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpmath=sse -mavx2 -DLV_HAVE_AVX2 -DLV_HAVE_AVX -DLV_HAVE_SSE")
else (HAVE_AVX2)
@ -549,6 +549,11 @@ if(CMAKE_C_COMPILER_ID MATCHES "GNU")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --param large-function-growth=1600")
endif(CMAKE_C_COMPILER_ID MATCHES "GNU")
if (EXTRA_TERM_TIMEOUT_S)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSRSRAN_TERM_TIMEOUT_S=${EXTRA_TERM_TIMEOUT_S}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSRSRAN_TERM_TIMEOUT_S=${EXTRA_TERM_TIMEOUT_S}")
endif (EXTRA_TERM_TIMEOUT_S)
message(STATUS "CMAKE_C_FLAGS is ${CMAKE_C_FLAGS}")
message(STATUS "CMAKE_CXX_FLAGS is ${CMAKE_CXX_FLAGS}")

View File

@ -21,4 +21,4 @@ For license details, see LICENSE file.
Support
=======
Mailing list: http://www.srs.io/mailman/listinfo/srslte-users
Mailing list: https://lists.srsran.com/mailman/listinfo/srsran-users

View File

@ -27,6 +27,7 @@
#include "srsran/common/rwlock_guard.h"
#include "srsran/srslog/srslog.h"
#include <map>
#include <unordered_map>
#include <stdint.h>
namespace srsran {
@ -159,7 +160,7 @@ public:
private:
srslog::basic_logger& logger;
srsenb::rnti_map_t<srsran::detail::ue_bearer_manager_impl> users_map;
std::unordered_map<uint16_t, srsran::detail::ue_bearer_manager_impl> users_map;
};
} // namespace srsenb

View File

@ -85,6 +85,7 @@ public:
bool bind_addr(const char* bind_addr_str, int port);
bool connect_to(const char* dest_addr_str, int dest_port, sockaddr_in* dest_sockaddr = nullptr);
bool start_listen();
bool open_socket(net_utils::addr_family ip, net_utils::socket_type socket_type, net_utils::protocol_type protocol);
int get_socket() const { return sockfd; };
@ -95,8 +96,7 @@ protected:
namespace net_utils {
bool sctp_init_client(unique_socket* socket, net_utils::socket_type socktype, const char* bind_addr_str, int bind_port);
bool sctp_init_server(unique_socket* socket, net_utils::socket_type socktype, const char* bind_addr_str, int port);
bool sctp_init_socket(unique_socket* socket, net_utils::socket_type socktype, const char* bind_addr_str, int bind_port);
} // namespace net_utils

View File

@ -59,7 +59,9 @@ const char* __tsan_default_suppressions()
// Lock order inversion issues in these functions, ignore it as it uses rw locks in read mode
"deadlock:srsenb::mac::rlc_buffer_state\n"
"deadlock:srsenb::mac::snr_info\n"
"deadlock:srsenb::mac::ack_info\n";
"deadlock:srsenb::mac::ack_info\n"
"deadlock:srsenb::rlc::rb_is_um\n"
"deadlock:srsenb::mac::sr_detected\n";
}
#ifdef __cplusplus

View File

@ -40,7 +40,7 @@ public:
class pdcp_interface_rrc
{
public:
virtual void set_enabled(uint16_t rnti, uint32_t lcid, bool enable) = 0;
virtual void set_enabled(uint16_t rnti, uint32_t lcid, bool enable) = 0;
virtual void reset(uint16_t rnti) = 0;
virtual void add_user(uint16_t rnti) = 0;
virtual void rem_user(uint16_t rnti) = 0;

View File

@ -50,6 +50,7 @@ public:
virtual void discard_sdu(uint16_t rnti, uint32_t lcid, uint32_t sn) = 0;
virtual bool rb_is_um(uint16_t rnti, uint32_t lcid) = 0;
virtual bool sdu_queue_is_full(uint16_t rnti, uint32_t lcid) = 0;
virtual bool is_suspended(uint16_t rnti, uint32_t lcid) = 0;
};
// RLC interface for RRC
@ -65,6 +66,7 @@ public:
virtual void write_sdu(uint16_t rnti, uint32_t lcid, srsran::unique_byte_buffer_t sdu) = 0;
virtual bool has_bearer(uint16_t rnti, uint32_t lcid) = 0;
virtual bool suspend_bearer(uint16_t rnti, uint32_t lcid) = 0;
virtual bool is_suspended(uint16_t rnti, uint32_t lcid) = 0;
virtual bool resume_bearer(uint16_t rnti, uint32_t lcid) = 0;
virtual void reestablish(uint16_t rnti) = 0;
};

View File

@ -37,12 +37,12 @@ struct scell_cfg_t {
// Cell to measure for Handover
struct meas_cell_cfg_t {
uint32_t earfcn;
uint16_t pci;
uint32_t eci;
float q_offset;
uint32_t allowed_meas_bw;
bool direct_forward_path_available;
uint32_t earfcn;
uint16_t pci;
uint32_t eci;
asn1::rrc::q_offset_range_e cell_individual_offset;
uint32_t allowed_meas_bw;
bool direct_forward_path_available;
};
// neigh measurement Cell info

View File

@ -203,7 +203,7 @@ class mac_interface_phy_nr
{
public:
const static int MAX_SSB = 4;
const static int MAX_GRANTS = 64;
const static int MAX_GRANTS = 32;
const static int MAX_PUCCH_MSG = 64;
const static int MAX_PUCCH_CANDIDATES = 2;
const static int MAX_NZP_CSI_RS = 4;

View File

@ -60,6 +60,8 @@ public:
///< Allow PDCP to query SDU queue status
virtual bool sdu_queue_is_full(uint32_t lcid) = 0;
virtual bool is_suspended(const uint32_t lcid) = 0;
};
class rlc_interface_mac : public srsran::read_pdu_interface

View File

@ -137,6 +137,10 @@ typedef struct SRSRAN_API {
uint32_t count; ///< Number of resources in the set
} srsran_csi_rs_zp_set_t;
SRSRAN_API bool srsran_csi_rs_resource_mapping_is_valid(const srsran_csi_rs_resource_mapping_t *res);
SRSRAN_API bool srsran_csi_rs_resource_mapping_is_valid(const srsran_csi_rs_resource_mapping_t* res);
SRSRAN_API uint32_t srsran_csi_rs_resource_mapping_info(const srsran_csi_rs_resource_mapping_t* res,
char* str,
uint32_t str_len);
#endif // SRSRAN_CSI_RS_CFG_H

View File

@ -48,7 +48,7 @@ srsran_csi_new_nzp_csi_rs_measurement(const srsran_csi_hl_resource_cfg_t csi_res
*/
SRSRAN_API int srsran_csi_reports_generate(const srsran_csi_hl_cfg_t* cfg,
const srsran_slot_cfg_t* slot_cfg,
srsran_csi_report_cfg_t report_cfg[SRSRAN_CSI_MAX_NOF_REPORT]);
srsran_csi_report_cfg_t report_cfg[SRSRAN_CSI_SLOT_MAX_NOF_REPORT]);
/**
* @brief Quantifies a given set of CSI reports from the given set of measurements
@ -58,9 +58,9 @@ SRSRAN_API int srsran_csi_reports_generate(const srsran_csi_hl_cfg_t* cfg,
* @return The number CSI reports for transmission if the provided data is valid, SRSRAN_ERROR code otherwise
*/
SRSRAN_API int
srsran_csi_reports_quantify(const srsran_csi_report_cfg_t reports[SRSRAN_CSI_MAX_NOF_REPORT],
srsran_csi_reports_quantify(const srsran_csi_report_cfg_t reports[SRSRAN_CSI_SLOT_MAX_NOF_REPORT],
const srsran_csi_channel_measurements_t measurements[SRSRAN_CSI_MAX_NOF_RESOURCES],
srsran_csi_report_value_t report_value[SRSRAN_CSI_MAX_NOF_REPORT]);
srsran_csi_report_value_t report_value[SRSRAN_CSI_SLOT_MAX_NOF_REPORT]);
/**
* @brief Compute number of CSI bits necessary to transmit all the CSI reports for a PUCCH transmission

View File

@ -31,6 +31,11 @@
*/
#define SRSRAN_CSI_MAX_NOF_REPORT 48
/**
* @brief Maximum number of supported simultaneous CSI reports in a single slot transmission
*/
#define SRSRAN_CSI_SLOT_MAX_NOF_REPORT 2
/**
* @brief Maximum number of CSI-RS resources defined in TS 38.331 maxNrofCSI-ResourceConfigurations
*/
@ -202,13 +207,4 @@ typedef struct SRSRAN_API {
};
} srsran_csi_report_value_t;
/**
* @brief Complete report configuration and value
*/
typedef struct SRSRAN_API {
srsran_csi_report_cfg_t cfg[SRSRAN_CSI_MAX_NOF_REPORT]; ///< Configuration ready for encoding
srsran_csi_report_value_t value[SRSRAN_CSI_MAX_NOF_REPORT]; ///< Quantified values
uint32_t nof_reports; ///< Total number of reports to transmit
} srsran_csi_reports_t;
#endif // SRSRAN_CSI_CFG_H

View File

@ -77,11 +77,11 @@ typedef struct {
*/
typedef struct SRSRAN_API {
/// Common Parameters
srsran_harq_ack_cfg_t ack; ///< HARQ-ACK configuration
uint32_t o_sr; ///< Number of SR bits
bool sr_positive_present; ///< Set to true if there is at least one positive SR
srsran_csi_report_cfg_t csi[SRSRAN_CSI_MAX_NOF_REPORT]; ///< CSI report configuration
uint32_t nof_csi; ///< Number of CSI reports
srsran_harq_ack_cfg_t ack; ///< HARQ-ACK configuration
uint32_t o_sr; ///< Number of SR bits
bool sr_positive_present; ///< Set to true if there is at least one positive SR
srsran_csi_report_cfg_t csi[SRSRAN_CSI_SLOT_MAX_NOF_REPORT]; ///< CSI report configuration
uint32_t nof_csi; ///< Number of CSI reports
union {
srsran_uci_nr_pucch_cfg_t pucch; ///< Configuration for transmission in PUCCH
srsran_uci_nr_pusch_cfg_t pusch; ///< Configuration for transmission in PUSCH
@ -92,9 +92,9 @@ typedef struct SRSRAN_API {
* @brief Uplink Control Information (UCI) message packed information
*/
typedef struct SRSRAN_API {
uint8_t ack[SRSRAN_HARQ_ACK_MAX_NOF_BITS]; ///< HARQ ACK feedback bits
uint32_t sr; ///< Number of positive SR
srsran_csi_report_value_t csi[SRSRAN_CSI_MAX_NOF_REPORT]; ///< Packed CSI report values
uint8_t ack[SRSRAN_HARQ_ACK_MAX_NOF_BITS]; ///< HARQ ACK feedback bits
uint32_t sr; ///< Number of positive SR
srsran_csi_report_value_t csi[SRSRAN_CSI_SLOT_MAX_NOF_REPORT]; ///< Packed CSI report values
bool valid; ///< Indicates whether the message has been decoded successfully, ignored in the transmitter
} srsran_uci_value_nr_t;

View File

@ -182,6 +182,9 @@ private:
*/
bool tx_dev(const uint32_t& device_idx, rf_buffer_interface& buffer, const srsran_timestamp_t& tx_time_);
// private unprotected tx_end implementation
void tx_end_nolock();
/**
* Helper method for receiving over a single RF device. This function maps automatically the logical receive buffers
* to the physical RF buffers for the given device.

View File

@ -68,6 +68,7 @@ public:
// MAC interface
bool has_data_locked(const uint32_t lcid);
uint32_t get_buffer_state(const uint32_t lcid);
void get_buffer_state(uint32_t lcid, uint32_t& tx_queue, uint32_t& prio_tx_queue);
uint32_t get_total_mch_buffer_state(uint32_t lcid);
uint32_t read_pdu(uint32_t lcid, uint8_t* payload, uint32_t nof_bytes);
uint32_t read_pdu_mch(uint32_t lcid, uint8_t* payload, uint32_t nof_bytes);

View File

@ -355,6 +355,7 @@ public:
// MAC interface
bool has_data();
uint32_t get_buffer_state();
void get_buffer_state(uint32_t& tx_queue, uint32_t& prio_tx_queue);
uint32_t read_pdu(uint8_t* payload, uint32_t nof_bytes);
void write_pdu(uint8_t* payload, uint32_t nof_bytes);
@ -384,6 +385,7 @@ private:
bool has_data();
uint32_t get_buffer_state();
void get_buffer_state(uint32_t& new_tx, uint32_t& prio_tx);
// Timeout callback interface
void timer_expired(uint32_t timeout_id);
@ -394,6 +396,8 @@ private:
void set_bsr_callback(bsr_callback_t callback);
private:
void stop_nolock();
int build_status_pdu(uint8_t* payload, uint32_t nof_bytes);
int build_retx_pdu(uint8_t* payload, uint32_t nof_bytes);
int build_segment(uint8_t* payload, uint32_t nof_bytes, rlc_amd_retx_t retx);

View File

@ -281,9 +281,10 @@ public:
// MAC interface
virtual bool has_data() = 0;
bool is_suspended() { return suspended; };
virtual uint32_t get_buffer_state() = 0;
virtual uint32_t read_pdu(uint8_t* payload, uint32_t nof_bytes) = 0;
virtual void write_pdu(uint8_t* payload, uint32_t nof_bytes) = 0;
virtual uint32_t get_buffer_state() = 0;
virtual void get_buffer_state(uint32_t& tx_queue, uint32_t& prio_tx_queue) = 0;
virtual uint32_t read_pdu(uint8_t* payload, uint32_t nof_bytes) = 0;
virtual void write_pdu(uint8_t* payload, uint32_t nof_bytes) = 0;
virtual void set_bsr_callback(bsr_callback_t callback) = 0;

View File

@ -63,6 +63,7 @@ public:
// MAC interface
bool has_data() override;
uint32_t get_buffer_state() override;
void get_buffer_state(uint32_t& newtx_queue, uint32_t& prio_tx_queue) override;
uint32_t read_pdu(uint8_t* payload, uint32_t nof_bytes) override;
void write_pdu(uint8_t* payload, uint32_t nof_bytes) override;

View File

@ -67,6 +67,7 @@ public:
// MAC interface
bool has_data();
uint32_t get_buffer_state();
void get_buffer_state(uint32_t& newtx_queue, uint32_t& prio_tx_queue);
uint32_t read_pdu(uint8_t* payload, uint32_t nof_bytes);
void write_pdu(uint8_t* payload, uint32_t nof_bytes);
int get_increment_sequence_num();

View File

@ -127,12 +127,13 @@ void enb_bearer_manager::add_eps_bearer(uint16_t rnti, uint8_t eps_bearer_id, sr
auto user_it = users_map.find(rnti);
if (user_it == users_map.end()) {
// add empty bearer map
auto p = users_map.insert(rnti, srsran::detail::ue_bearer_manager_impl{});
if (!p) {
// users_map.emplace( ) returns pair<iterator,bool>
auto p = users_map.emplace( rnti, srsran::detail::ue_bearer_manager_impl{});
if (!p.second) {
logger.error("Bearers: Unable to add a new bearer map for rnti=0x%x", rnti);
return;
}
user_it = p.value();
user_it = p.first;
}
if (user_it->second.add_eps_bearer(eps_bearer_id, rat, lcid)) {

View File

@ -204,6 +204,8 @@ bool bind_addr(int fd, const sockaddr_in& addr_in)
perror("bind()");
return false;
}
srslog::fetch_basic_logger(LOGSERVICE)
.debug("Successfully bound to address %s:%d", get_ip(addr_in).c_str(), get_port(addr_in));
return true;
}
@ -215,7 +217,11 @@ bool bind_addr(int fd, const char* bind_addr_str, int port, sockaddr_in* addr_re
.error("Failed to convert IP address (%s) to sockaddr_in struct", bind_addr_str);
return false;
}
bind_addr(fd, addr_tmp);
if (not bind_addr(fd, addr_tmp)) {
return false;
}
if (addr_result != nullptr) {
*addr_result = addr_tmp;
}
@ -245,6 +251,22 @@ bool connect_to(int fd, const char* dest_addr_str, int dest_port, sockaddr_in* d
return true;
}
bool start_listen(int fd)
{
if (fd < 0) {
srslog::fetch_basic_logger(LOGSERVICE).error("Tried to listen for connections with an invalid socket.");
return false;
}
// Listen for connections
if (listen(fd, SOMAXCONN) != 0) {
srslog::fetch_basic_logger(LOGSERVICE).error("Failed to listen to incoming SCTP connections");
perror("listen()");
return false;
}
return true;
}
} // namespace net_utils
/********************************************
@ -269,6 +291,18 @@ unique_socket& unique_socket::operator=(unique_socket&& other) noexcept
return *this;
}
bool unique_socket::open_socket(net_utils::addr_family ip_type,
net_utils::socket_type socket_type,
net_utils::protocol_type protocol)
{
if (is_open()) {
srslog::fetch_basic_logger(LOGSERVICE).error("Socket is already open.");
return false;
}
sockfd = net_utils::open_socket(ip_type, socket_type, protocol);
return is_open();
}
void unique_socket::close()
{
if (sockfd >= 0) {
@ -288,16 +322,9 @@ bool unique_socket::connect_to(const char* dest_addr_str, int dest_port, sockadd
return net_utils::connect_to(sockfd, dest_addr_str, dest_port, dest_sockaddr);
}
bool unique_socket::open_socket(net_utils::addr_family ip_type,
net_utils::socket_type socket_type,
net_utils::protocol_type protocol)
bool unique_socket::start_listen()
{
if (is_open()) {
srslog::fetch_basic_logger(LOGSERVICE).error("Socket is already open.");
return false;
}
sockfd = net_utils::open_socket(ip_type, socket_type, protocol);
return is_open();
return net_utils::start_listen(sockfd);
}
/***********************************************************************
@ -306,36 +333,18 @@ bool unique_socket::open_socket(net_utils::addr_family ip_type,
namespace net_utils {
bool sctp_init_socket(unique_socket* socket, net_utils::socket_type socktype, const char* bind_addr_str, int port)
bool sctp_init_socket(unique_socket* socket, net_utils::socket_type socktype, const char* bind_addr_str, int bind_port)
{
if (not socket->open_socket(net_utils::addr_family::ipv4, socktype, net_utils::protocol_type::SCTP)) {
return false;
}
if (not socket->bind_addr(bind_addr_str, port)) {
if (not socket->bind_addr(bind_addr_str, bind_port)) {
socket->close();
return false;
}
return true;
}
bool sctp_init_client(unique_socket* socket, net_utils::socket_type socktype, const char* bind_addr_str, int bind_port)
{
return sctp_init_socket(socket, socktype, bind_addr_str, bind_port);
}
bool sctp_init_server(unique_socket* socket, net_utils::socket_type socktype, const char* bind_addr_str, int port)
{
if (not sctp_init_socket(socket, socktype, bind_addr_str, port)) {
return false;
}
// Listen for connections
if (listen(socket->fd(), SOMAXCONN) != 0) {
srslog::fetch_basic_logger(LOGSERVICE).error("Failed to listen to incoming SCTP connections");
return false;
}
return true;
}
} // namespace net_utils
/***************************************************************

View File

@ -281,10 +281,14 @@ void phy_cfg_nr_default_t::make_pusch_default(srsran_sch_hl_cfg_nr_t& pusch)
// Setup PUSCH DMRS type A position
pusch.typeA_pos = srsran_dmrs_sch_typeA_pos_2;
pusch.scaling = 1.0f;
pusch.beta_offsets.fix_ack = 12.625f;
pusch.beta_offsets.fix_csi1 = 2.25f;
pusch.beta_offsets.fix_csi2 = 2.25f;
pusch.scaling = 1.0f;
pusch.beta_offsets.ack_index1 = 9;
pusch.beta_offsets.ack_index2 = 9;
pusch.beta_offsets.ack_index3 = 9;
pusch.beta_offsets.csi1_index1 = 6;
pusch.beta_offsets.csi1_index2 = 6;
pusch.beta_offsets.csi2_index1 = 6;
pusch.beta_offsets.csi2_index2 = 6;
}
void phy_cfg_nr_default_t::make_pucch_custom_one(srsran_pucch_nr_hl_cfg_t& pucch)

View File

@ -148,6 +148,11 @@ void pdcp_entity_lte::write_sdu(unique_byte_buffer_t sdu, int upper_sn)
return;
}
if (rlc->is_suspended(lcid)) {
logger.warning("Trying to send SDU while re-establishment is in progress. Dropping SDU. LCID=%d", lcid);
return;
}
if (rlc->sdu_queue_is_full(lcid)) {
logger.info(sdu->msg, sdu->N_bytes, "Dropping %s SDU due to full queue", rrc->get_rb_name(lcid));
return;
@ -175,7 +180,6 @@ void pdcp_entity_lte::write_sdu(unique_byte_buffer_t sdu, int upper_sn)
return;
}
}
// check for pending security config in transmit direction
if (enable_security_tx_sn != -1 && enable_security_tx_sn == static_cast<int32_t>(tx_count)) {
enable_integrity(DIRECTION_TX);

View File

@ -37,6 +37,13 @@
*/
#define CSI_RS_MAX_SYMBOLS_SLOT 4
#define RESOURCE_ERROR(R) \
do { \
char res_info_str[256]; \
srsran_csi_rs_resource_mapping_info(R, res_info_str, (uint32_t)sizeof(res_info_str)); \
ERROR("Unhandled configuration %s", res_info_str); \
} while (false)
static int csi_rs_location_f(const srsran_csi_rs_resource_mapping_t* resource, uint32_t i)
{
uint32_t count = 0;
@ -69,7 +76,8 @@ static int csi_rs_location_f(const srsran_csi_rs_resource_mapping_t* resource, u
}
}
ERROR("Unhandled configuration");
// Inform about an unhandled configuration
RESOURCE_ERROR(resource);
return SRSRAN_ERROR;
}
@ -122,7 +130,8 @@ static int csi_rs_location_get_k_list(const srsran_csi_rs_resource_mapping_t* re
}
}
ERROR("Unhandled configuration");
// Inform about an unhandled configuration
RESOURCE_ERROR(resource);
return SRSRAN_ERROR;
}
@ -172,7 +181,8 @@ static int csi_rs_location_get_l_list(const srsran_csi_rs_resource_mapping_t* re
}
}
ERROR("Unhandled configuration");
// Inform about an unhandled configuration
RESOURCE_ERROR(resource);
return SRSRAN_ERROR;
}
@ -225,7 +235,8 @@ static int csi_rs_nof_cdm_groups(const srsran_csi_rs_resource_mapping_t* resourc
return 2;
}
ERROR("Unhandled configuration");
// Inform about an unhandled configuration
RESOURCE_ERROR(resource);
return SRSRAN_ERROR;
}
@ -252,6 +263,89 @@ bool srsran_csi_rs_resource_mapping_is_valid(const srsran_csi_rs_resource_mappin
return true;
}
uint32_t srsran_csi_rs_resource_mapping_info(const srsran_csi_rs_resource_mapping_t* res, char* str, uint32_t str_len)
{
uint32_t len = 0;
const char* row_str = "invalid";
uint32_t nof_freq_domain = 0;
switch (res->row) {
case srsran_csi_rs_resource_mapping_row_1:
row_str = "1";
nof_freq_domain = SRSRAN_CSI_RS_NOF_FREQ_DOMAIN_ALLOC_ROW1;
break;
case srsran_csi_rs_resource_mapping_row_2:
row_str = "2";
nof_freq_domain = SRSRAN_CSI_RS_NOF_FREQ_DOMAIN_ALLOC_ROW2;
break;
case srsran_csi_rs_resource_mapping_row_4:
row_str = "4";
nof_freq_domain = SRSRAN_CSI_RS_NOF_FREQ_DOMAIN_ALLOC_ROW4;
break;
case srsran_csi_rs_resource_mapping_row_other:
row_str = "other";
nof_freq_domain = SRSRAN_CSI_RS_NOF_FREQ_DOMAIN_ALLOC_OTHER;
break;
}
const char* cdm_str = "invalid";
switch (res->cdm) {
case srsran_csi_rs_cdm_nocdm:
cdm_str = "nocdm";
break;
case srsran_csi_rs_cdm_fd_cdm2:
cdm_str = "FD-CDM2";
break;
case srsran_csi_rs_cdm_cdm4_fd2_td2:
cdm_str = "CDM4-FD2-TD2";
break;
case srsran_csi_rs_cdm_cdm8_fd2_td4:
cdm_str = "CDM8-FD2-TD4";
break;
}
const char* density_str = "invalid";
switch (res->density) {
case srsran_csi_rs_resource_mapping_density_three:
density_str = "3";
break;
case srsran_csi_rs_resource_mapping_density_dot5_even:
density_str = ".5 (even)";
break;
case srsran_csi_rs_resource_mapping_density_dot5_odd:
density_str = ".5 (odd)";
break;
case srsran_csi_rs_resource_mapping_density_one:
density_str = "1";
break;
case srsran_csi_rs_resource_mapping_density_spare:
density_str = "spare";
break;
}
char frequency_domain_alloc[SRSRAN_CSI_RS_NOF_FREQ_DOMAIN_ALLOC_MAX + 1];
srsran_vec_sprint_bin(frequency_domain_alloc,
SRSRAN_CSI_RS_NOF_FREQ_DOMAIN_ALLOC_MAX + 1,
(uint8_t*)res->frequency_domain_alloc,
nof_freq_domain);
len = srsran_print_check(str,
str_len,
len,
"row=%s freq=%s nof_ports=%d fist_symb=%d fist_symb2=%d cdm=%s density=%s rb=(%d:%d)",
row_str,
frequency_domain_alloc,
res->nof_ports,
res->first_symbol_idx,
res->first_symbol_idx2,
cdm_str,
density_str,
res->freq_band.start_rb,
res->freq_band.start_rb + res->freq_band.nof_rb - 1);
return len;
}
uint32_t csi_rs_count(srsran_csi_rs_density_t density, uint32_t nprb)
{
switch (density) {
@ -677,6 +771,7 @@ int srsran_csi_rs_nzp_measure_trs(const srsran_carrier_nr_t* carrier,
int ret = csi_rs_nzp_measure_set(carrier, slot_cfg, set, grid, measurements);
if (ret < SRSRAN_SUCCESS) {
ERROR("Error performing measurements");
return SRSRAN_ERROR;
}
uint32_t count = (uint32_t)ret;
@ -772,6 +867,7 @@ int srsran_csi_rs_nzp_measure_channel(const srsran_carrier_nr_t* carrier
int ret = csi_rs_nzp_measure_set(carrier, slot_cfg, set, grid, measurements);
if (ret < SRSRAN_SUCCESS) {
ERROR("Error performing measurements");
return SRSRAN_ERROR;
}
uint32_t count = (uint32_t)ret;

View File

@ -188,7 +188,7 @@ int srsran_csi_new_nzp_csi_rs_measurement(
int srsran_csi_reports_generate(const srsran_csi_hl_cfg_t* cfg,
const srsran_slot_cfg_t* slot_cfg,
srsran_csi_report_cfg_t report_cfg[SRSRAN_CSI_MAX_NOF_REPORT])
srsran_csi_report_cfg_t report_cfg[SRSRAN_CSI_SLOT_MAX_NOF_REPORT])
{
uint32_t count = 0;
@ -198,7 +198,7 @@ int srsran_csi_reports_generate(const srsran_csi_hl_cfg_t* cfg,
}
// Make sure report configuration is initialised to zero
SRSRAN_MEM_ZERO(report_cfg, srsran_csi_report_cfg_t, SRSRAN_CSI_MAX_NOF_REPORT);
SRSRAN_MEM_ZERO(report_cfg, srsran_csi_report_cfg_t, SRSRAN_CSI_SLOT_MAX_NOF_REPORT);
// Iterate every possible configured CSI report
for (uint32_t i = 0; i < SRSRAN_CSI_MAX_NOF_REPORT; i++) {
@ -207,6 +207,13 @@ int srsran_csi_reports_generate(const srsran_csi_hl_cfg_t* cfg,
continue;
}
if (count >= SRSRAN_CSI_SLOT_MAX_NOF_REPORT) {
ERROR("The number of CSI reports in the slot (%d) exceeds the maximum (%d)",
count++,
SRSRAN_CSI_SLOT_MAX_NOF_REPORT);
return SRSRAN_ERROR;
}
// Configure report
report_cfg[count].cfg = cfg->reports[i];
report_cfg[count].nof_ports = 1;
@ -218,9 +225,9 @@ int srsran_csi_reports_generate(const srsran_csi_hl_cfg_t* cfg,
return (int)count;
}
int srsran_csi_reports_quantify(const srsran_csi_report_cfg_t reports[SRSRAN_CSI_MAX_NOF_REPORT],
int srsran_csi_reports_quantify(const srsran_csi_report_cfg_t reports[SRSRAN_CSI_SLOT_MAX_NOF_REPORT],
const srsran_csi_channel_measurements_t measurements[SRSRAN_CSI_MAX_NOF_RESOURCES],
srsran_csi_report_value_t report_value[SRSRAN_CSI_MAX_NOF_REPORT])
srsran_csi_report_value_t report_value[SRSRAN_CSI_SLOT_MAX_NOF_REPORT])
{
uint32_t count = 0;
@ -230,7 +237,7 @@ int srsran_csi_reports_quantify(const srsran_csi_report_cfg_t reports[
}
// Iterate every possible configured CSI report
for (uint32_t i = 0; i < SRSRAN_CSI_MAX_NOF_REPORT; i++) {
for (uint32_t i = 0; i < SRSRAN_CSI_SLOT_MAX_NOF_REPORT; i++) {
// If the report is the last one, break
if (reports->cfg.type == SRSRAN_CSI_REPORT_TYPE_NONE) {
break;

View File

@ -387,13 +387,11 @@ static int pusch_nr_gen_mux_uci(srsran_pusch_nr_t* q, const srsran_uci_cfg_nr_t*
// the number of reserved resource elements for potential HARQ-ACK transmission is calculated according to Clause
// 6.3.2.4.2.1, by setting O_ACK = 2 ;
G_ack_rvd = srsran_uci_nr_pusch_ack_nof_bits(&cfg->pusch, 2);
// Disable non reserved HARQ-ACK bits
G_ack = 0;
}
// Counters
uint32_t m_ack_count = 0;
uint32_t m_rvd_count = 0;
uint32_t m_csi1_count = 0;
uint32_t m_csi2_count = 0;
uint32_t m_ulsch_count = 0;
@ -420,15 +418,26 @@ static int pusch_nr_gen_mux_uci(srsran_pusch_nr_t* q, const srsran_uci_cfg_nr_t*
// Compute HARQ-ACK bits multiplexing
uint32_t ack_d = 0;
uint32_t ack_m_re_count = 0;
uint32_t rvd_d = 0;
uint32_t rvd_m_re_count = 0;
if (l >= l1) {
if (cfg->ack.count <= 2 && m_ack_count < G_ack_rvd) {
ack_d = 1;
ack_m_re_count = M_ulsch_sc;
if (G_ack_rvd - m_ack_count < M_uci_sc * Nl * Qm) {
ack_d = (M_uci_sc * Nl * Qm) / (G_ack_rvd - m_ack_count);
ack_m_re_count = SRSRAN_CEIL(G_ack_rvd - m_ack_count, Nl * Qm);
if (cfg->ack.count <= 2 && m_rvd_count < G_ack_rvd) {
rvd_d = 1;
rvd_m_re_count = M_ulsch_sc;
if (G_ack_rvd - m_rvd_count < M_uci_sc * Nl * Qm) {
rvd_d = (M_uci_sc * Nl * Qm) / (G_ack_rvd - m_rvd_count);
rvd_m_re_count = SRSRAN_CEIL(G_ack_rvd - m_rvd_count, Nl * Qm);
}
M_uci_rvd = rvd_m_re_count;
if (m_ack_count < G_ack) {
ack_d = 1;
ack_m_re_count = M_uci_rvd;
if (G_ack - m_ack_count < M_uci_rvd * Nl * Qm) {
ack_d = (M_uci_rvd * Nl * Qm) / (G_ack - m_ack_count);
ack_m_re_count = SRSRAN_CEIL(G_ack - m_ack_count, Nl * Qm);
}
}
M_uci_rvd = ack_m_re_count;
} else if (m_ack_count < G_ack) {
ack_d = 1;
ack_m_re_count = M_ulsch_sc;
@ -469,14 +478,14 @@ static int pusch_nr_gen_mux_uci(srsran_pusch_nr_t* q, const srsran_uci_cfg_nr_t*
// Leave the rest for UL-SCH
uint32_t ulsch_m_re_count = M_uci_sc;
for (uint32_t i = 0, csi1_i = 0, csi2_i = 0; i < cfg->pusch.M_pusch_sc[l]; i++) {
// Check if RE is reserved for ACK
for (uint32_t i = 0, csi1_i = 0, csi2_i = 0, rvd_i = 0; i < cfg->pusch.M_pusch_sc[l]; i++) {
// Check if RE is reserved
bool reserved = false;
if (ack_m_re_count != 0 && i % ack_d == 0 && m_ack_count < G_ack_rvd) {
if (rvd_m_re_count != 0 && i % rvd_d == 0 && m_rvd_count < G_ack_rvd) {
reserved = true;
}
if (ack_m_re_count != 0 && i % ack_d == 0 && m_ack_count < G_ack) {
if (G_ack_rvd == 0 && ack_m_re_count != 0 && i % ack_d == 0 && m_ack_count < G_ack) {
for (uint32_t j = 0; j < Nl * Qm; j++) {
pos_ack[m_ack_count++] = m_all_count + j;
}
@ -507,14 +516,15 @@ static int pusch_nr_gen_mux_uci(srsran_pusch_nr_t* q, const srsran_uci_cfg_nr_t*
// Set reserved bits only if there are ACK bits
if (reserved) {
if (cfg->ack.count > 0) {
if (ack_m_re_count != 0 && rvd_i % ack_d == 0 && m_ack_count < G_ack) {
for (uint32_t j = 0; j < Nl * Qm; j++) {
pos_ack[m_ack_count++] = m_all_count + j;
}
} else {
m_ack_count += Nl * Qm;
ack_m_re_count--;
}
ack_m_re_count--;
m_rvd_count += Nl * Qm;
rvd_m_re_count--;
rvd_i++;
}
// Increment all bit counter
@ -540,8 +550,8 @@ static int pusch_nr_gen_mux_uci(srsran_pusch_nr_t* q, const srsran_uci_cfg_nr_t*
q->G_ulsch = m_ulsch_count;
// Assert Number of bits
if (G_ack_rvd != 0 && G_ack_rvd != m_ack_count && cfg->ack.count > 0) {
ERROR("Not matched %d!=%d", G_ack_rvd, m_ack_count);
if (G_ack_rvd != 0 && G_ack_rvd != m_rvd_count && cfg->ack.count <= 2) {
ERROR("Not matched %d!=%d", G_ack_rvd, m_rvd_count);
}
if (G_ack != 0 && G_ack != m_ack_count) {
ERROR("Not matched %d!=%d", G_ack, m_ack_count);
@ -1063,11 +1073,12 @@ uint32_t srsran_pusch_nr_rx_info(const srsran_pusch_nr_t* q,
len += pusch_nr_grant_info(q, cfg, grant, res, &str[len], str_len - len);
if (res != NULL && srsran_uci_nr_total_bits(&cfg->uci) > 0) {
len = srsran_print_check(str, str_len, len, "UCI: ");
srsran_uci_data_nr_t uci_data = {};
uci_data.cfg = cfg->uci;
uci_data.value = res->uci;
len += srsran_uci_nr_info(&uci_data, &str[len], str_len - len);
len = srsran_print_check(str, str_len, len, "valid=%c ", res->uci.valid ? 'y' : 'n');
}
if (q->meas_time_en) {

View File

@ -103,6 +103,14 @@ int srsran_rf_open_devname(srsran_rf_t* rf, const char* devname, char* args, uin
{
rf->thread_gain_run = false;
bool no_rf_devs_detected = true;
printf("Available RF device list:");
for (unsigned int i = 0; available_devices[i]; i++) {
no_rf_devs_detected = false;
printf(" %s ", available_devices[i]->name);
}
printf("%s\n", no_rf_devs_detected ? " <none>" : "");
// Try to open the device if name is provided
if (devname && devname[0] != '\0') {
int i = 0;
@ -113,21 +121,30 @@ int srsran_rf_open_devname(srsran_rf_t* rf, const char* devname, char* args, uin
}
i++;
}
ERROR("RF device '%s' not found. Please check the available srsRAN CMAKE options to verify if this device is being "
"detected in your system",
devname);
// provided device not found, abort
return SRSRAN_ERROR;
} else {
// auto-mode, try to open in order of apperance in available_devices[] array
int i = 0;
while (available_devices[i] != NULL) {
if (!available_devices[i]->srsran_rf_open_multi(args, &rf->handler, nof_channels)) {
rf->dev = available_devices[i];
return SRSRAN_SUCCESS;
}
i++;
}
}
ERROR("No compatible RF frontend found");
// auto-mode, try to open in order of apperance in available_devices[] array
int i = 0;
while (available_devices[i] != NULL) {
printf("Trying to open RF device '%s'\n", available_devices[i]->name);
if (!available_devices[i]->srsran_rf_open_multi(args, &rf->handler, nof_channels)) {
rf->dev = available_devices[i];
printf("RF device '%s' successfully opened\n", available_devices[i]->name);
return SRSRAN_SUCCESS;
}
printf("Unable to open RF device '%s'\n", available_devices[i]->name);
i++;
}
ERROR(
"Failed to open a RF frontend device. Please check the available srsRAN CMAKE options to verify what RF frontend "
"devices have been detected in your system");
return SRSRAN_ERROR;
}

View File

@ -287,7 +287,9 @@ int rf_zmq_open_multi(char* args, void** h, uint32_t nof_channels)
}
}
} else {
fprintf(stderr, "[zmq] Error: RF device args are required for ZMQ no-RF module\n");
fprintf(stderr,
"[zmq] Error: No device 'args' option has been set. Please make sure to set this option to be able to "
"use the ZMQ no-RF module\n");
goto clean_exit;
}

View File

@ -548,7 +548,7 @@ bool radio::tx_dev(const uint32_t& device_idx, rf_buffer_interface& buffer, cons
// if the gap is bigger than TX_MAX_GAP_ZEROS, stop burst
if (fabs(srsran_timestamp_real(&ts_overlap)) > tx_max_gap_zeros) {
logger.info("Detected RF gap of %.1f us. Sending end-of-burst.", srsran_timestamp_real(&ts_overlap) * 1.0e6);
tx_end();
tx_end_nolock();
} else {
logger.debug("Detected RF gap of %.1f us. Tx'ing zeroes.", srsran_timestamp_real(&ts_overlap) * 1.0e6);
// Otherwise, transmit zeros
@ -601,6 +601,12 @@ bool radio::tx_dev(const uint32_t& device_idx, rf_buffer_interface& buffer, cons
}
void radio::tx_end()
{
std::unique_lock<std::mutex> lock(tx_mutex);
tx_end_nolock();
}
void radio::tx_end_nolock()
{
if (!is_initialized) {
return;

View File

@ -248,20 +248,24 @@ bool rlc::has_data_locked(const uint32_t lcid)
return has_data(lcid);
}
uint32_t rlc::get_buffer_state(uint32_t lcid)
void rlc::get_buffer_state(uint32_t lcid, uint32_t& tx_queue, uint32_t& prio_tx_queue)
{
uint32_t ret = 0;
rwlock_read_guard lock(rwlock);
if (valid_lcid(lcid)) {
if (rlc_array.at(lcid)->is_suspended()) {
ret = 0;
tx_queue = 0;
prio_tx_queue = 0;
} else {
ret = rlc_array.at(lcid)->get_buffer_state();
rlc_array.at(lcid)->get_buffer_state(tx_queue, prio_tx_queue);
}
}
}
return ret;
uint32_t rlc::get_buffer_state(uint32_t lcid)
{
uint32_t tx_queue = 0, prio_tx_queue = 0;
get_buffer_state(lcid, tx_queue, prio_tx_queue);
return tx_queue + prio_tx_queue;
}
uint32_t rlc::get_total_mch_buffer_state(uint32_t lcid)
@ -601,9 +605,9 @@ bool rlc::valid_lcid_mrb(uint32_t lcid)
void rlc::update_bsr(uint32_t lcid)
{
if (bsr_callback) {
uint32_t tx_queue = get_buffer_state(lcid);
uint32_t retx_queue = 0; // todo: separate tx_queue and retx_queue
bsr_callback(lcid, tx_queue, retx_queue);
uint32_t tx_queue = 0, prio_tx_queue = 0;
get_buffer_state(lcid, tx_queue, prio_tx_queue);
bsr_callback(lcid, tx_queue, prio_tx_queue);
}
}

View File

@ -267,6 +267,11 @@ uint32_t rlc_am_lte::get_buffer_state()
return tx.get_buffer_state();
}
void rlc_am_lte::get_buffer_state(uint32_t& tx_queue, uint32_t& prio_tx_queue)
{
tx.get_buffer_state(tx_queue, prio_tx_queue);
}
uint32_t rlc_am_lte::read_pdu(uint8_t* payload, uint32_t nof_bytes)
{
uint32_t read_bytes = tx.read_pdu(payload, nof_bytes);
@ -347,7 +352,11 @@ bool rlc_am_lte::rlc_am_lte_tx::configure(const rlc_config_t& cfg_)
void rlc_am_lte::rlc_am_lte_tx::stop()
{
std::lock_guard<std::mutex> lock(mutex);
stop_nolock();
}
void rlc_am_lte::rlc_am_lte_tx::stop_nolock()
{
empty_queue_nolock();
tx_enabled = false;
@ -400,7 +409,8 @@ void rlc_am_lte::rlc_am_lte_tx::empty_queue_nolock()
void rlc_am_lte::rlc_am_lte_tx::reestablish()
{
stop();
std::lock_guard<std::mutex> lock(mutex);
stop_nolock();
tx_enabled = true;
}
@ -445,9 +455,18 @@ void rlc_am_lte::rlc_am_lte_tx::check_sn_reached_max_retx(uint32_t sn)
uint32_t rlc_am_lte::rlc_am_lte_tx::get_buffer_state()
{
uint32_t new_tx_queue = 0, prio_tx_queue = 0;
get_buffer_state(new_tx_queue, prio_tx_queue);
return new_tx_queue + prio_tx_queue;
}
void rlc_am_lte::rlc_am_lte_tx::get_buffer_state(uint32_t& n_bytes_newtx, uint32_t& n_bytes_prio)
{
n_bytes_newtx = 0;
n_bytes_prio = 0;
uint32_t n_sdus = 0;
std::lock_guard<std::mutex> lock(mutex);
uint32_t n_bytes = 0;
uint32_t n_sdus = 0;
logger.debug("%s Buffer state - do_status=%s, status_prohibit_running=%s (%d/%d)",
RB_NAME,
@ -458,8 +477,8 @@ uint32_t rlc_am_lte::rlc_am_lte_tx::get_buffer_state()
// Bytes needed for status report
if (do_status() && not status_prohibit_timer.is_running()) {
n_bytes += parent->rx.get_status_pdu_length();
logger.debug("%s Buffer state - total status report: %d bytes", RB_NAME, n_bytes);
n_bytes_prio += parent->rx.get_status_pdu_length();
logger.debug("%s Buffer state - total status report: %d bytes", RB_NAME, n_bytes_prio);
}
// Bytes needed for retx
@ -477,8 +496,8 @@ uint32_t rlc_am_lte::rlc_am_lte_tx::get_buffer_state()
logger.error("In get_buffer_state(): Removing retx.sn=%d from queue", retx.sn);
retx_queue.pop();
} else {
n_bytes += req_bytes;
logger.debug("Buffer state - retx: %d bytes", n_bytes);
n_bytes_prio += req_bytes;
logger.debug("Buffer state - retx: %d bytes", n_bytes_prio);
}
}
}
@ -486,25 +505,23 @@ uint32_t rlc_am_lte::rlc_am_lte_tx::get_buffer_state()
// Bytes needed for tx SDUs
if (tx_window.size() < 1024) {
n_sdus = tx_sdu_queue.get_n_sdus();
n_bytes += tx_sdu_queue.size_bytes();
n_bytes_newtx += tx_sdu_queue.size_bytes();
if (tx_sdu != NULL) {
n_sdus++;
n_bytes += tx_sdu->N_bytes;
n_bytes_newtx += tx_sdu->N_bytes;
}
}
// Room needed for header extensions? (integer rounding)
if (n_sdus > 1) {
n_bytes += ((n_sdus - 1) * 1.5) + 0.5;
n_bytes_newtx += ((n_sdus - 1) * 1.5) + 0.5;
}
// Room needed for fixed header of data PDUs
if (n_bytes > 0 && n_sdus > 0) {
n_bytes += 2; // Two bytes for fixed header with SN length = 10
logger.debug("%s Total buffer state - %d SDUs (%d B)", RB_NAME, n_sdus, n_bytes);
if (n_bytes_newtx > 0 && n_sdus > 0) {
n_bytes_newtx += 2; // Two bytes for fixed header with SN length = 10
logger.debug("%s Total buffer state - %d SDUs (%d B)", RB_NAME, n_sdus, n_bytes_newtx);
}
return n_bytes;
}
int rlc_am_lte::rlc_am_lte_tx::write_sdu(unique_byte_buffer_t sdu)
@ -622,7 +639,9 @@ void rlc_am_lte::rlc_am_lte_tx::timer_expired(uint32_t timeout_id)
lock.unlock();
if (bsr_callback) {
bsr_callback(parent->lcid, get_buffer_state(), 0);
uint32_t new_tx_queue = 0, prio_tx_queue = 0;
get_buffer_state(new_tx_queue, prio_tx_queue);
bsr_callback(parent->lcid, new_tx_queue, prio_tx_queue);
}
}

View File

@ -133,6 +133,12 @@ uint32_t rlc_tm::get_buffer_state()
return ul_queue.size_bytes();
}
void rlc_tm::get_buffer_state(uint32_t& newtx_queue, uint32_t& prio_tx_queue)
{
newtx_queue = get_buffer_state();
prio_tx_queue = 0;
}
rlc_bearer_metrics_t rlc_tm::get_metrics()
{
std::lock_guard<std::mutex> lock(metrics_mutex);

View File

@ -143,6 +143,12 @@ uint32_t rlc_um_base::get_buffer_state()
return 0;
}
void rlc_um_base::get_buffer_state(uint32_t& newtx_queue, uint32_t& prio_tx_queue)
{
newtx_queue = get_buffer_state();
prio_tx_queue = 0;
}
uint32_t rlc_um_base::read_pdu(uint8_t* payload, uint32_t nof_bytes)
{
if (tx && tx_enabled) {

View File

@ -26,7 +26,9 @@
#include <cstdio>
#include <unistd.h>
#ifndef SRSRAN_TERM_TIMEOUT_S
#define SRSRAN_TERM_TIMEOUT_S (5)
#endif
/// Handler called after the user interrupts the program.
static std::atomic<srsran_signal_hanlder> user_handler;

View File

@ -61,11 +61,12 @@ int test_socket_handler()
const char* server_addr = "127.0.100.1";
using namespace srsran::net_utils;
TESTASSERT(sctp_init_server(&server_socket, socket_type::seqpacket, server_addr, server_port));
TESTASSERT(sctp_init_socket(&server_socket, socket_type::seqpacket, server_addr, server_port));
TESTASSERT(server_socket.start_listen());
logger.info("Listening from fd=%d", server_socket.fd());
TESTASSERT(sctp_init_client(&client_socket, socket_type::seqpacket, "127.0.0.1", 0));
TESTASSERT(sctp_init_client(&client_socket2, socket_type::seqpacket, "127.0.0.2", 0));
TESTASSERT(sctp_init_socket(&client_socket, socket_type::seqpacket, "127.0.0.1", 0));
TESTASSERT(sctp_init_socket(&client_socket2, socket_type::seqpacket, "127.0.0.2", 0));
TESTASSERT(client_socket.connect_to(server_addr, server_port));
TESTASSERT(client_socket2.connect_to(server_addr, server_port));
@ -123,6 +124,18 @@ int test_socket_handler()
return 0;
}
int test_sctp_bind_error()
{
srsran::unique_socket sock;
TESTASSERT(not srsran::net_utils::sctp_init_socket(
&sock, srsran::net_utils::socket_type::seqpacket, "1.1.1.1", 8000)); // Bogus IP address
// should not be able to bind
TESTASSERT(srsran::net_utils::sctp_init_socket(
&sock, srsran::net_utils::socket_type::seqpacket, "127.0.0.1", 8000)); // Good IP address
// should be able to bind
return SRSRAN_SUCCESS;
}
int main()
{
auto& logger = srslog::fetch_basic_logger("S1AP", false);
@ -132,6 +145,7 @@ int main()
srslog::init();
TESTASSERT(test_socket_handler() == 0);
TESTASSERT(test_sctp_bind_error() == 0);
return 0;
}

View File

@ -64,6 +64,8 @@ public:
logger.info("Discard_count=%" PRIu64 "", discard_count);
}
bool is_suspended(uint32_t lcid) { return false; }
uint64_t rx_count = 0;
uint64_t discard_count = 0;

View File

@ -69,7 +69,7 @@ private:
int encode_pdsch(stack_interface_phy_lte::dl_sched_grant_t* grants, uint32_t nof_grants);
int encode_pmch(stack_interface_phy_lte::dl_sched_grant_t* grant, srsran_mbsfn_cfg_t* mbsfn_cfg);
void decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_grant,
bool decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_grant,
srsran_ul_cfg_t& ul_cfg,
srsran_pusch_res_t& pusch_res);
void decode_pusch(stack_interface_phy_lte::ul_sched_grant_t* grants, uint32_t nof_pusch);

View File

@ -99,12 +99,6 @@ private:
uint32_t nof_prach_workers = 0;
double srate_hz = 0.0; ///< Current sampling rate in Hz
// Current configuration
std::mutex common_cfg_mutex;
srsran_carrier_nr_t carrier = {};
srsran_pdcch_cfg_nr_t pdcch_cfg = {};
srsran_ssb_cfg_t ssb_cfg = {};
public:
struct args_t {
double srate_hz = 0.0;

View File

@ -236,7 +236,7 @@ public:
void set_mch_period_stop(uint32_t stop);
// Getters and setters for ul grants which need to be shared between workers
const stack_interface_phy_lte::ul_sched_list_t& get_ul_grants(uint32_t tti);
const stack_interface_phy_lte::ul_sched_list_t get_ul_grants(uint32_t tti);
void set_ul_grants(uint32_t tti, const stack_interface_phy_lte::ul_sched_list_t& ul_grants);
void clear_grants(uint16_t rnti);

View File

@ -23,6 +23,7 @@
#define SRSRAN_UE_BUFFER_MANAGER_H
#include "sched_config.h"
#include "srsran/adt/span.h"
#include "srsran/common/common_lte.h"
#include "srsran/common/common_nr.h"
#include "srsran/srslog/srslog.h"
@ -37,7 +38,7 @@ template <bool isNR>
class ue_buffer_manager
{
protected:
const static uint32_t MAX_LC_ID = isNR ? srsran::MAX_NR_NOF_BEARERS : srsran::MAX_LTE_LCID;
const static uint32_t MAX_LC_ID = isNR ? (srsran::MAX_NR_NOF_BEARERS - 1) : srsran::MAX_LTE_LCID;
const static uint32_t MAX_LCG_ID = isNR ? 7 : 3; // Should import from sched_interface and sched_nr_interface
const static uint32_t MAX_SRB_LC_ID = isNR ? srsran::MAX_NR_SRB_ID : srsran::MAX_LTE_SRB_ID;
const static uint32_t MAX_NOF_LCIDS = MAX_LC_ID + 1;
@ -45,16 +46,18 @@ protected:
constexpr static uint32_t pbr_infinity = -1;
public:
explicit ue_buffer_manager(srslog::basic_logger& logger_);
explicit ue_buffer_manager(uint16_t rnti, srslog::basic_logger& logger_);
// Bearer configuration
void config_lcids(srsran::const_span<mac_lc_ch_cfg_t> bearer_cfg_list);
void config_lcid(uint32_t lcid, const mac_lc_ch_cfg_t& bearer_cfg);
// Buffer Status update
void ul_bsr(uint32_t lcg_id, uint32_t val);
void dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t retx_queue);
void dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue);
// Configuration getters
uint16_t get_rnti() const { return rnti; }
bool is_bearer_active(uint32_t lcid) const { return get_cfg(lcid).is_active(); }
bool is_bearer_ul(uint32_t lcid) const { return get_cfg(lcid).is_ul(); }
bool is_bearer_dl(uint32_t lcid) const { return get_cfg(lcid).is_dl(); }
@ -67,13 +70,13 @@ public:
/// DL newtx buffer status for given LCID (no RLC overhead included)
int get_dl_tx(uint32_t lcid) const { return is_bearer_dl(lcid) ? channels[lcid].buf_tx : 0; }
/// DL retx buffer status for given LCID (no RLC overhead included)
int get_dl_retx(uint32_t lcid) const { return is_bearer_dl(lcid) ? channels[lcid].buf_retx : 0; }
/// DL high prio tx buffer status for given LCID (no RLC overhead included)
int get_dl_prio_tx(uint32_t lcid) const { return is_bearer_dl(lcid) ? channels[lcid].buf_prio_tx : 0; }
/// Sum of DL RLC newtx and retx buffer status for given LCID (no RLC overhead included)
int get_dl_tx_total(uint32_t lcid) const { return get_dl_tx(lcid) + get_dl_retx(lcid); }
/// Sum of DL RLC newtx and high prio tx buffer status for given LCID (no RLC overhead included)
int get_dl_tx_total(uint32_t lcid) const { return get_dl_tx(lcid) + get_dl_prio_tx(lcid); }
/// Sum of DL RLC newtx and retx buffer status for all LCIDS
/// Sum of DL RLC newtx and high prio buffer status for all LCIDS
int get_dl_tx_total() const;
// UL BSR methods
@ -86,12 +89,15 @@ public:
static bool is_lcg_valid(uint32_t lcg) { return lcg <= MAX_LCG_ID; }
protected:
bool config_lcid_internal(uint32_t lcid, const mac_lc_ch_cfg_t& bearer_cfg);
srslog::basic_logger& logger;
uint16_t rnti;
struct logical_channel {
mac_lc_ch_cfg_t cfg;
int buf_tx = 0;
int buf_retx = 0;
int buf_prio_tx = 0;
int Bj = 0;
int bucket_size = 0;
};

View File

@ -59,8 +59,8 @@ public:
void ul_bsr(uint16_t rnti, uint32_t lcg_id, uint32_t bsr) override;
void dl_buffer_state(uint16_t rnti, uint32_t lcid, uint32_t newtx, uint32_t retx);
int run_slot(slot_point pdsch_tti, uint32_t cc, dl_sched_res_t& result) override;
int get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_sched_t& result) override;
int run_slot(slot_point pdsch_tti, uint32_t cc, dl_res_t& result) override;
int get_ul_sched(slot_point pusch_tti, uint32_t cc, ul_res_t& result) override;
void get_metrics(mac_metrics_t& metrics);

View File

@ -57,8 +57,8 @@ using ue_cc_cfg_t = sched_nr_interface::ue_cc_cfg_t;
using pdcch_cce_pos_list = srsran::bounded_vector<uint32_t, SRSRAN_SEARCH_SPACE_MAX_NOF_CANDIDATES_NR>;
using bwp_cce_pos_list = std::array<std::array<pdcch_cce_pos_list, MAX_NOF_AGGR_LEVELS>, SRSRAN_NOF_SF_X_FRAME>;
using dl_sched_t = sched_nr_interface::dl_sched_t;
using ul_sched_t = sched_nr_interface::ul_sched_t;
using dl_sched_res_t = sched_nr_interface::dl_sched_res_t;
using ul_sched_t = sched_nr_interface::ul_res_t;
using dl_sched_res_t = sched_nr_interface::dl_res_t;
/// Generate list of CCE locations for UE based on coreset and search space configurations
void get_dci_locs(const srsran_coreset_t& coreset,

View File

@ -55,7 +55,7 @@ public:
int ack_info(uint32_t tb_idx, bool ack);
void new_slot(slot_point slot_rx);
bool clear_if_maxretx(slot_point slot_rx);
void reset();
bool new_tx(slot_point slot_tx, slot_point slot_ack, const prb_grant& grant, uint32_t mcs, uint32_t max_retx);
bool new_retx(slot_point slot_tx, slot_point slot_ack, const prb_grant& grant);
@ -121,7 +121,7 @@ private:
class harq_entity
{
public:
explicit harq_entity(uint32_t nprb, uint32_t nof_harq_procs = SCHED_NR_MAX_HARQ);
explicit harq_entity(uint16_t rnti, uint32_t nprb, uint32_t nof_harq_procs, srslog::basic_logger& logger);
void new_slot(slot_point slot_rx_);
int dl_ack_info(uint32_t pid, uint32_t tb_idx, bool ack) { return dl_harqs[pid].ack_info(tb_idx, ack); }
@ -163,6 +163,9 @@ private:
return (it == ul_harqs.end()) ? nullptr : &(*it);
}
uint16_t rnti;
srslog::basic_logger& logger;
slot_point slot_rx;
std::vector<dl_harq_proc> dl_harqs;
std::vector<ul_harq_proc> ul_harqs;

View File

@ -26,6 +26,7 @@
#include "srsran/adt/bounded_vector.h"
#include "srsran/adt/optional.h"
#include "srsran/adt/span.h"
#include "srsran/common/common_nr.h"
#include "srsran/common/phy_cfg_nr.h"
#include "srsran/common/slot_point.h"
#include "srsran/interfaces/gnb_interfaces.h"
@ -39,7 +40,7 @@ const static size_t SCHED_NR_MAX_NOF_RBGS = 18;
const static size_t SCHED_NR_MAX_TB = 1;
const static size_t SCHED_NR_MAX_HARQ = SRSRAN_DEFAULT_HARQ_PROC_DL_NR;
const static size_t SCHED_NR_MAX_BWP_PER_CELL = 2;
const static size_t SCHED_NR_MAX_LCID = 32;
const static size_t SCHED_NR_MAX_LCID = srsran::MAX_NR_NOF_BEARERS;
const static size_t SCHED_NR_MAX_LC_GROUP = 7;
struct sched_nr_ue_cc_cfg_t {
@ -122,12 +123,13 @@ public:
///// Sched Result /////
using dl_sched_t = mac_interface_phy_nr::dl_sched_t;
using ul_sched_t = mac_interface_phy_nr::ul_sched_t;
using ul_res_t = mac_interface_phy_nr::ul_sched_t;
using sched_rar_list_t = srsran::bounded_vector<rar_t, MAX_GRANTS>;
struct dl_sched_res_t {
sched_rar_list_t rar;
dl_sched_t dl_sched;
struct dl_res_t {
sched_rar_list_t& rar;
dl_sched_t& dl_sched;
dl_res_t(sched_rar_list_t& rar_, dl_sched_t& dl_sched_) : rar(rar_), dl_sched(dl_sched_) {}
};
virtual ~sched_nr_interface() = default;
@ -135,8 +137,8 @@ public:
virtual void ue_cfg(uint16_t rnti, const ue_cfg_t& ue_cfg) = 0;
virtual void ue_rem(uint16_t rnti) = 0;
virtual bool ue_exists(uint16_t rnti) = 0;
virtual int run_slot(slot_point slot_rx, uint32_t cc, dl_sched_res_t& result) = 0;
virtual int get_ul_sched(slot_point slot_rx, uint32_t cc, ul_sched_t& result) = 0;
virtual int run_slot(slot_point slot_rx, uint32_t cc, dl_res_t& result) = 0;
virtual int get_ul_sched(slot_point slot_rx, uint32_t cc, ul_res_t& result) = 0;
virtual void dl_ack_info(uint16_t rnti, uint32_t cc, uint32_t pid, uint32_t tb_idx, bool ack) = 0;
virtual void ul_crc_info(uint16_t rnti, uint32_t cc, uint32_t pid, bool crc) = 0;

View File

@ -81,9 +81,9 @@ private:
pdcch_grant_type_t alloc_type;
slot_ue* ue;
};
srsran::bounded_vector<alloc_record, MAX_GRANTS> dci_list;
pdcch_dl_list_t& pdcch_dl_list;
pdcch_ul_list_t& pdcch_ul_list;
srsran::bounded_vector<alloc_record, 2 * MAX_GRANTS> dci_list;
pdcch_dl_list_t& pdcch_dl_list;
pdcch_ul_list_t& pdcch_ul_list;
// DFS decision tree of PDCCH grants
struct tree_node {
@ -94,7 +94,7 @@ private:
/// Accumulation of all PDCCH masks for the current solution (DFS path)
coreset_bitmap total_mask, current_mask;
};
using alloc_tree_dfs_t = srsran::bounded_vector<tree_node, MAX_GRANTS>;
using alloc_tree_dfs_t = std::vector<tree_node>;
alloc_tree_dfs_t dfs_tree, saved_dfs_tree;
srsran::span<const uint32_t> get_cce_loc_table(const alloc_record& record) const;

View File

@ -72,7 +72,11 @@ class ue_carrier
{
public:
ue_carrier(uint16_t rnti, const ue_cfg_t& cfg, const cell_params_t& cell_params_);
void set_cfg(const ue_cfg_t& ue_cfg);
void set_cfg(const ue_cfg_t& ue_cfg);
/// Called after CC Feedback has been processed
void new_slot(slot_point slot_tx);
slot_ue try_reserve(slot_point pdcch_slot, uint32_t dl_harq_bytes, uint32_t ul_harq_bytes);
const uint16_t rnti;

View File

@ -62,7 +62,7 @@ public:
uint32_t get_ul_buffer(uint16_t rnti) final;
uint32_t get_dl_buffer(uint16_t rnti) final;
int dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue) final;
int dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t prio_tx_queue) final;
int dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code, uint32_t nof_cmds = 1) final;
int dl_ack_info(uint32_t tti, uint16_t rnti, uint32_t enb_cc_idx, uint32_t tb_idx, bool ack) final;

View File

@ -55,30 +55,30 @@ public:
} cell_cfg_sib_t;
struct sched_args_t {
std::string sched_policy = "time_pf";
std::string sched_policy_args = "2";
int pdsch_mcs = -1;
int pdsch_max_mcs = 28;
int pusch_mcs = -1;
int pusch_max_mcs = 28;
uint32_t min_nof_ctrl_symbols = 1;
uint32_t max_nof_ctrl_symbols = 3;
int min_aggr_level = 0;
int max_aggr_level = 3;
bool adaptive_aggr_level = false;
bool pucch_mux_enabled = false;
int pucch_harq_max_rb = 0;
float target_bler = 0.05;
float max_delta_dl_cqi = 5;
float max_delta_ul_snr = 5;
std::string sched_policy = "time_pf";
std::string sched_policy_args = "2";
int pdsch_mcs = -1;
int pdsch_max_mcs = 28;
int pusch_mcs = -1;
int pusch_max_mcs = 28;
uint32_t min_nof_ctrl_symbols = 1;
uint32_t max_nof_ctrl_symbols = 3;
int min_aggr_level = 0;
int max_aggr_level = 3;
bool adaptive_aggr_level = false;
bool pucch_mux_enabled = false;
int pucch_harq_max_rb = 0;
float target_bler = 0.05;
float max_delta_dl_cqi = 5;
float max_delta_ul_snr = 5;
float adaptive_dl_mcs_step_size = 0.001;
float adaptive_ul_mcs_step_size = 0.001;
uint32_t min_tpc_tti_interval = 1;
float ul_snr_avg_alpha = 0.05;
int init_ul_snr_value = 5;
int init_dl_cqi = 5;
float max_sib_coderate = 0.8;
int pdcch_cqi_offset = 0;
uint32_t min_tpc_tti_interval = 1;
float ul_snr_avg_alpha = 0.05;
int init_ul_snr_value = 5;
int init_dl_cqi = 5;
float max_sib_coderate = 0.8;
int pdcch_cqi_offset = 0;
};
struct cell_cfg_t {
@ -276,10 +276,10 @@ public:
* @param rnti user rnti
* @param lc_id logical channel id for which the buffer update is concerned
* @param tx_queue number of pending bytes for new DL RLC transmissions
* @param retx_queue number of pending bytes concerning RLC retransmissions
* @param prio_tx_queue number of pending bytes concerning RLC retransmissions and status PDUs
* @return error code
*/
virtual int dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue) = 0;
virtual int dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t prio_tx_queue) = 0;
/**
* Enqueue MAC CEs for DL transmission

View File

@ -35,7 +35,7 @@ class lch_ue_manager : private ue_buffer_manager<false>
using base_type = ue_buffer_manager<false>;
public:
lch_ue_manager() : ue_buffer_manager(srslog::fetch_basic_logger("MAC")) {}
explicit lch_ue_manager(uint16_t rnti) : ue_buffer_manager(rnti, srslog::fetch_basic_logger("MAC")) {}
void set_cfg(const sched_interface::ue_cfg_t& cfg_);
void new_tti();
@ -44,7 +44,7 @@ public:
using base_type::dl_buffer_state;
using base_type::get_bsr;
using base_type::get_bsr_state;
using base_type::get_dl_retx;
using base_type::get_dl_prio_tx;
using base_type::get_dl_tx;
using base_type::get_dl_tx_total;
using base_type::is_bearer_active;
@ -60,7 +60,7 @@ public:
bool has_pending_dl_txs() const;
int get_dl_tx_total_with_overhead(uint32_t lcid) const;
int get_dl_tx_with_overhead(uint32_t lcid) const;
int get_dl_retx_with_overhead(uint32_t lcid) const;
int get_dl_prio_tx_with_overhead(uint32_t lcid) const;
int get_bsr_with_overhead(uint32_t lcid) const;
int get_max_prio_lcid() const;
@ -70,7 +70,7 @@ public:
srsran::deque<ce_cmd> pending_ces;
private:
int alloc_retx_bytes(uint8_t lcid, int rem_bytes);
int alloc_prio_tx_bytes(uint8_t lcid, int rem_bytes);
int alloc_tx_bytes(uint8_t lcid, int rem_bytes);
size_t prio_idx = 0;

View File

@ -113,7 +113,15 @@ public:
class ue
{
public:
ue(rrc_nr* parent_, uint16_t rnti_, const sched_nr_ue_cfg_t& uecfg);
enum activity_timeout_type_t {
MSG3_RX_TIMEOUT = 0, ///< Msg3 has its own timeout to quickly remove fake UEs from random PRACHs
UE_INACTIVITY_TIMEOUT, ///< (currently unused) UE inactivity timeout (usually bigger than reestablishment timeout)
MSG5_RX_TIMEOUT, ///< (currently unused) for receiving RRCConnectionSetupComplete/RRCReestablishmentComplete
nulltype
};
/// @param [in] start_msg3_timer: indicates whether the UE is created as part of a RACH process
ue(rrc_nr* parent_, uint16_t rnti_, const sched_nr_ue_cfg_t& uecfg, bool start_msg3_timer = true);
void send_connection_setup();
void send_dl_ccch(asn1::rrc_nr::dl_ccch_msg_s* dl_dcch_msg);
@ -128,14 +136,23 @@ public:
bool is_endc() { return endc; }
uint16_t get_eutra_rnti() { return eutra_rnti; }
void get_metrics(rrc_ue_metrics_t& ue_metrics) { ue_metrics = {}; /*TODO fill RRC metrics*/ };
// setters
int pack_rrc_reconfiguration();
void deactivate_bearers();
/// methods to handle activity timer
std::string to_string(const activity_timeout_type_t& type);
void set_activity_timeout(activity_timeout_type_t type);
void set_activity(bool enabled = true);
void activity_timer_expired(const activity_timeout_type_t type);
private:
rrc_nr* parent = nullptr;
uint16_t rnti = SRSRAN_INVALID_RNTI;
/// for basic DL/UL activity timeout
srsran::unique_timer activity_timer;
int pack_rrc_reconfiguration(asn1::dyn_octstring& packed_rrc_reconfig);
int pack_secondary_cell_group_cfg(asn1::dyn_octstring& packed_secondary_cell_config);
@ -231,8 +248,10 @@ private:
uint32_t nof_si_messages = 0;
// Private Methods
/// Private Methods
void handle_pdu(uint16_t rnti, uint32_t lcid, srsran::unique_byte_buffer_t pdu);
/// This gets called by rrc_nr::sgnb_addition_request and WILL NOT TRIGGER the RX MSG3 activity timer
int add_user(uint16_t rnti, const sched_nr_ue_cfg_t& uecfg, bool start_msg3_timer);
// logging
typedef enum { Rx = 0, Tx } direction_t;

View File

@ -20,6 +20,7 @@
*/
#include <map>
#include <unordered_map>
#include <string.h>
#include "srsenb/hdr/common/common_enb.h"
@ -131,7 +132,7 @@ private:
pdcp_interface_gtpu* pdcp = nullptr;
srslog::basic_logger& logger;
rnti_map_t<ue_bearer_tunnel_list> ue_teidin_db;
std::unordered_map<uint16_t, ue_bearer_tunnel_list> ue_teidin_db;
tunnel_list_t tunnels;
};

View File

@ -86,6 +86,7 @@ private:
void discard_sdu(uint32_t lcid, uint32_t discard_sn);
bool rb_is_um(uint32_t lcid);
bool sdu_queue_is_full(uint32_t lcid);
bool is_suspended(uint32_t lcid);
};
class user_interface_gtpu : public srsue::gw_interface_pdcp

View File

@ -62,6 +62,7 @@ public:
bool has_bearer(uint16_t rnti, uint32_t lcid);
bool suspend_bearer(uint16_t rnti, uint32_t lcid);
bool resume_bearer(uint16_t rnti, uint32_t lcid);
bool is_suspended(uint16_t rnti, uint32_t lcid);
void reestablish(uint16_t rnti) final;
// rlc_interface_pdcp

View File

@ -84,6 +84,7 @@ cell_list =
pci = 2;
//direct_forward_path_available = false;
//allowed_meas_bw = 6;
//cell_individual_offset = 0;
}
);

View File

@ -776,13 +776,14 @@ static int parse_meas_cell_list(rrc_meas_cfg_t* meas_cfg, Setting& root)
{
meas_cfg->meas_cells.resize(root.getLength());
for (uint32_t i = 0; i < meas_cfg->meas_cells.size(); ++i) {
auto& cell = meas_cfg->meas_cells[i];
cell.earfcn = root[i]["dl_earfcn"];
cell.pci = (unsigned int)root[i]["pci"] % SRSRAN_NUM_PCI;
cell.eci = (unsigned int)root[i]["eci"];
cell.q_offset = 0; // LIBLTE_RRC_Q_OFFSET_RANGE_DB_0; // TODO
auto& cell = meas_cfg->meas_cells[i];
cell.earfcn = root[i]["dl_earfcn"];
cell.pci = (unsigned int)root[i]["pci"] % SRSRAN_NUM_PCI;
cell.eci = (unsigned int)root[i]["eci"];
parse_default_field(cell.direct_forward_path_available, root[i], "direct_forward_path_available", false);
parse_default_field(cell.allowed_meas_bw, root[i], "allowed_meas_bw", 6u);
asn1_parsers::default_number_to_enum(
cell.cell_individual_offset, root[i], "cell_individual_offset", asn1::rrc::q_offset_range_opts::db0);
srsran_assert(srsran::is_lte_cell_nof_prb(cell.allowed_meas_bw), "Invalid measurement Bandwidth");
}
return 0;

View File

@ -504,6 +504,13 @@ static void execute_cmd(metrics_stdout* metrics, srsenb::enb_command_interface*
// Set cell gain
control->cmd_cell_gain(cell_id, gain_db);
} else if (cmd[0] == "flush") {
if (cmd.size() != 1) {
cout << "Usage: " << cmd[0] << endl;
return;
}
srslog::flush();
cout << "Flushed log file buffers" << endl;
} else {
cout << "Available commands: " << endl;
cout << " t: starts console trace" << endl;
@ -511,6 +518,7 @@ static void execute_cmd(metrics_stdout* metrics, srsenb::enb_command_interface*
cout << " cell_gain: set relative cell gain" << endl;
cout << " sleep: pauses the commmand line operation for a given time in seconds" << endl;
cout << " p: starts MAC padding" << endl;
cout << " flush: flushes the buffers for the log file" << endl;
cout << endl;
}
}

View File

@ -262,7 +262,7 @@ void cc_worker::work_dl(const srsran_dl_sf_cfg_t& dl_sf_cfg,
}
}
void cc_worker::decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_grant,
bool cc_worker::decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_grant,
srsran_ul_cfg_t& ul_cfg,
srsran_pusch_res_t& pusch_res)
{
@ -270,19 +270,19 @@ void cc_worker::decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_
// Invalid RNTI
if (rnti == SRSRAN_INVALID_RNTI) {
return;
return false;
}
// RNTI does not exist
if (ue_db.count(rnti) == 0) {
return;
return false;
}
// Get UE configuration
if (phy->ue_db.get_ul_config(rnti, cc_idx, ul_cfg) < SRSRAN_SUCCESS) {
// It could happen that the UL configuration is missing due to intra-enb HO which is not an error
Info("Failed retrieving UL configuration for cc=%d rnti=0x%x", cc_idx, rnti);
return;
return false;
}
// Fill UCI configuration
@ -293,7 +293,7 @@ void cc_worker::decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_
srsran_pusch_grant_t& grant = ul_cfg.pusch.grant;
if (srsran_ra_ul_dci_to_grant(&enb_ul.cell, &ul_sf, &ul_cfg.hopping, &ul_grant.dci, &grant)) {
Error("Computing PUSCH dci for RNTI %x", rnti);
return;
return false;
}
// Handle Format0 adaptive retx
@ -302,7 +302,7 @@ void cc_worker::decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_
int rv_idx = grant.tb.rv;
if (phy->ue_db.get_last_ul_tb(rnti, cc_idx, ul_grant.pid, grant.tb) < SRSRAN_SUCCESS) {
Error("Error retrieving last UL TB for RNTI %x, CC %d, PID %d", rnti, cc_idx, ul_grant.pid);
return;
return false;
}
grant.tb.rv = rv_idx;
Info("Adaptive retx: rnti=0x%x, pid=%d, rv_idx=%d, mcs=%d, old_tbs=%d",
@ -323,7 +323,7 @@ void cc_worker::decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_
if (pusch_res.data) {
if (srsran_enb_ul_get_pusch(&enb_ul, &ul_sf, &ul_cfg.pusch, &pusch_res)) {
Error("Decoding PUSCH for RNTI %x", rnti);
return;
return false;
}
}
// Save PHICH scheduling for this user. Each user can have just 1 PUSCH dci per TTI
@ -353,6 +353,7 @@ void cc_worker::decode_pusch_rnti(stack_interface_phy_lte::ul_sched_grant_t& ul_
// Save metrics stats
ue_db[rnti]->metrics_ul(ul_grant.dci.tb.mcs_idx, 0, enb_ul.chest_res.snr_db, pusch_res.avg_iterations_block);
}
return true;
}
void cc_worker::decode_pusch(stack_interface_phy_lte::ul_sched_grant_t* grants, uint32_t nof_pusch)
@ -367,7 +368,9 @@ void cc_worker::decode_pusch(stack_interface_phy_lte::ul_sched_grant_t* grants,
srsran_ul_cfg_t ul_cfg = {};
// Decodes PUSCH for the given grant
decode_pusch_rnti(ul_grant, ul_cfg, pusch_res);
if (!decode_pusch_rnti(ul_grant, ul_cfg, pusch_res)) {
return;
}
// Notify MAC new received data and HARQ Indication value
if (ul_grant.data != nullptr) {

View File

@ -157,6 +157,11 @@ bool slot_worker::work_ul()
return false;
}
if (ul_sched.pucch.empty() && ul_sched.pusch.empty()) {
// early exit if nothing has been scheduled
return true;
}
// Demodulate
if (srsran_gnb_ul_fft(&gnb_ul) < SRSRAN_SUCCESS) {
logger.error("Error in demodulation");

View File

@ -98,27 +98,6 @@ slot_worker* worker_pool::wait_worker(uint32_t tti)
{
slot_worker* w = (slot_worker*)pool.wait_worker(tti);
// Only if a worker was available
if (w != nullptr) {
srsran_carrier_nr_t carrier_;
srsran_pdcch_cfg_nr_t pdcch_cfg_;
srsran_ssb_cfg_t ssb_cfg_;
// Copy configuration
{
std::unique_lock<std::mutex> lock(common_cfg_mutex);
carrier_ = carrier;
pdcch_cfg_ = pdcch_cfg;
ssb_cfg_ = ssb_cfg;
}
// Set worker configuration
if (not w->set_common_cfg(carrier_, pdcch_cfg_, ssb_cfg_)) {
logger.error("Error setting common config");
return nullptr;
}
}
// Save current TTI
current_tti = tti;
@ -162,15 +141,28 @@ int worker_pool::set_common_cfg(const phy_interface_rrc_nr::common_cfg_t& common
prach.init(0, cell, prach_cfg, &prach_stack_adaptor, logger, 0, nof_prach_workers);
prach.set_max_prach_offset_us(1000);
// Save current configuration
{
std::unique_lock<std::mutex> lock(common_cfg_mutex);
carrier = common_cfg.carrier;
pdcch_cfg = common_cfg.pdcch;
ssb_cfg = common_cfg.ssb;
ssb_cfg.srate_hz = srate_hz;
ssb_cfg.scaling =
srsran_convert_dB_to_amplitude(srsran_gnb_dl_get_maximum_signal_power_dBfs(common_cfg.carrier.nof_prb));
// Setup SSB sampling rate and scaling
srsran_ssb_cfg_t ssb_cfg = common_cfg.ssb;
ssb_cfg.srate_hz = srate_hz;
ssb_cfg.scaling =
srsran_convert_dB_to_amplitude(srsran_gnb_dl_get_maximum_signal_power_dBfs(common_cfg.carrier.nof_prb));
// For each worker set configuration
for (uint32_t i = 0; i < pool.get_nof_workers(); i++) {
// Reserve worker from pool
slot_worker* w = (slot_worker*)pool.wait_worker_id(i);
if (w == nullptr) {
// Skip worker if invalid pointer
continue;
}
// Setup worker common configuration
if (not w->set_common_cfg(common_cfg.carrier, common_cfg.pdcch, ssb_cfg)) {
return SRSRAN_ERROR;
}
// Release worker
w->release();
}
return SRSRAN_SUCCESS;

View File

@ -94,7 +94,7 @@ void phy_common::clear_grants(uint16_t rnti)
}
}
const stack_interface_phy_lte::ul_sched_list_t& phy_common::get_ul_grants(uint32_t tti)
const stack_interface_phy_lte::ul_sched_list_t phy_common::get_ul_grants(uint32_t tti)
{
std::lock_guard<std::mutex> lock(grant_mutex);
return ul_grants[tti];

View File

@ -793,6 +793,7 @@ int phy_ue_db::set_ul_grant_available(uint32_t tti, const stack_interface_phy_lt
// Check that eNb Cell/Carrier is active for the given RNTI
if (_assert_active_enb_cc(rnti, enb_cc_idx) != SRSRAN_SUCCESS) {
ret = SRSRAN_ERROR;
srslog::fetch_basic_logger("PHY").error("Error setting grant for rnti=0x%x, cc=%d\n", rnti, enb_cc_idx);
continue;
}
// Rise Grant available flag

View File

@ -20,6 +20,7 @@
*/
#include "srsenb/hdr/stack/mac/common/ue_buffer_manager.h"
#include "srsran/adt/bounded_vector.h"
#include "srsran/common/string_helpers.h"
#include "srsran/srslog/bundled/fmt/format.h"
#include "srsran/srslog/bundled/fmt/ranges.h"
@ -27,21 +28,71 @@
namespace srsenb {
template <bool isNR>
ue_buffer_manager<isNR>::ue_buffer_manager(srslog::basic_logger& logger_) : logger(logger_)
ue_buffer_manager<isNR>::ue_buffer_manager(uint16_t rnti_, srslog::basic_logger& logger_) : logger(logger_), rnti(rnti_)
{
std::fill(lcg_bsr.begin(), lcg_bsr.end(), 0);
}
template <bool isNR>
void ue_buffer_manager<isNR>::config_lcids(srsran::const_span<mac_lc_ch_cfg_t> bearer_cfg_list)
{
bool log_enabled = logger.info.enabled();
srsran::bounded_vector<uint32_t, MAX_NOF_LCIDS> changed_list;
for (uint32_t lcid = 0; is_lcid_valid(lcid); ++lcid) {
if (config_lcid_internal(lcid, bearer_cfg_list[lcid]) and log_enabled) {
// add to the changed_list the lcids that have been updated with new parameters
changed_list.push_back(lcid);
}
}
// Log configurations of the LCIDs for which there were param updates
if (not changed_list.empty()) {
fmt::memory_buffer fmtbuf;
for (uint32_t i = 0; i < changed_list.size(); ++i) {
uint32_t lcid = changed_list[i];
fmt::format_to(fmtbuf,
"{}{{lcid={}, mode={}, prio={}, lcg={}}}",
i > 0 ? ", " : "",
lcid,
to_string(channels[lcid].cfg.direction),
channels[lcid].cfg.priority,
channels[lcid].cfg.group);
}
logger.info("SCHED: rnti=0x%x, new lcid configuration: [%s]", rnti, srsran::to_c_str(fmtbuf));
}
}
template <bool isNR>
void ue_buffer_manager<isNR>::config_lcid(uint32_t lcid, const mac_lc_ch_cfg_t& bearer_cfg)
{
bool cfg_changed = config_lcid_internal(lcid, bearer_cfg);
if (cfg_changed) {
logger.info("SCHED: rnti=0x%x, lcid=%d configured: mode=%s, prio=%d, lcg=%d",
rnti,
lcid,
to_string(channels[lcid].cfg.direction),
channels[lcid].cfg.priority,
channels[lcid].cfg.group);
}
}
/**
* @brief configure MAC logical channel. The function checks if the configuration is valid
* and whether there was any change compared to previous value
* @return true if the lcid was updated with new parameters. False in case of case of error or no update.
*/
template <bool isNR>
bool ue_buffer_manager<isNR>::config_lcid_internal(uint32_t lcid, const mac_lc_ch_cfg_t& bearer_cfg)
{
if (not is_lcid_valid(lcid)) {
logger.warning("Configuring bearer with invalid logical channel id=%d", lcid);
return;
logger.warning("SCHED: Configuring rnti=0x%x bearer with invalid lcid=%d", rnti, lcid);
return false;
}
if (not is_lcg_valid(bearer_cfg.group)) {
logger.warning("Configuring bearer with invalid logical channel group id=%d", bearer_cfg.group);
return;
logger.warning(
"SCHED: Configuring rnti=0x%x bearer with invalid logical channel group id=%d", rnti, bearer_cfg.group);
return false;
}
// update bearer config
@ -54,12 +105,9 @@ void ue_buffer_manager<isNR>::config_lcid(uint32_t lcid, const mac_lc_ch_cfg_t&
channels[lcid].bucket_size = channels[lcid].cfg.bsd * channels[lcid].cfg.pbr;
channels[lcid].Bj = 0;
}
logger.info("SCHED: bearer configured: lcid=%d, mode=%s, prio=%d, lcg=%d",
lcid,
to_string(channels[lcid].cfg.direction),
channels[lcid].cfg.priority,
channels[lcid].cfg.group);
return true;
}
return false;
}
template <bool isNR>
@ -108,7 +156,7 @@ template <bool isNR>
void ue_buffer_manager<isNR>::ul_bsr(uint32_t lcg_id, uint32_t val)
{
if (not is_lcg_valid(lcg_id)) {
logger.warning("The provided logical channel group id=%d is not valid", lcg_id);
logger.warning("SCHED: The provided lcg_id=%d for rnti=0x%x is not valid", lcg_id, rnti);
return;
}
lcg_bsr[lcg_id] = val;
@ -116,25 +164,26 @@ void ue_buffer_manager<isNR>::ul_bsr(uint32_t lcg_id, uint32_t val)
if (logger.debug.enabled()) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", lcg_bsr);
logger.debug("SCHED: lcg_id=%d, bsr=%d. Current state=%s", lcg_id, val, srsran::to_c_str(str_buffer));
logger.debug(
"SCHED: rnti=0x%x, lcg_id=%d, bsr=%d. Current state=%s", rnti, lcg_id, val, srsran::to_c_str(str_buffer));
}
}
template <bool isNR>
void ue_buffer_manager<isNR>::dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t retx_queue)
void ue_buffer_manager<isNR>::dl_buffer_state(uint8_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue)
{
if (not is_lcid_valid(lcid)) {
logger.warning("The provided lcid=%d is not valid", lcid);
return;
}
if (lcid <= MAX_SRB_LC_ID and
(channels[lcid].buf_tx != (int)tx_queue or channels[lcid].buf_retx != (int)retx_queue)) {
logger.info("SCHED: DL lcid=%d buffer_state=%d,%d", lcid, tx_queue, retx_queue);
(channels[lcid].buf_tx != (int)tx_queue or channels[lcid].buf_prio_tx != (int)prio_tx_queue)) {
logger.info("SCHED: rnti=0x%x DL lcid=%d buffer_state=%d,%d", rnti, lcid, tx_queue, prio_tx_queue);
} else {
logger.debug("SCHED: DL lcid=%d buffer_state=%d,%d", lcid, tx_queue, retx_queue);
logger.debug("SCHED: rnti=0x%x DL lcid=%d buffer_state=%d,%d", rnti, lcid, tx_queue, prio_tx_queue);
}
channels[lcid].buf_retx = retx_queue;
channels[lcid].buf_tx = tx_queue;
channels[lcid].buf_prio_tx = prio_tx_queue;
channels[lcid].buf_tx = tx_queue;
}
// Explicit instantiation

View File

@ -225,10 +225,7 @@ int mac::ue_rem(uint16_t rnti)
int mac::ue_set_crnti(uint16_t temp_crnti, uint16_t crnti, const sched_interface::ue_cfg_t& cfg)
{
srsran::rwlock_read_guard lock(rwlock);
if (temp_crnti != crnti) {
// if C-RNTI is changed, it corresponds to older user. Handover scenario.
ue_db[crnti]->reset();
} else {
if (temp_crnti == crnti) {
// Schedule ConRes Msg4
scheduler.dl_mac_buffer_state(crnti, (uint32_t)srsran::dl_sch_lcid::CON_RES_ID);
}

View File

@ -176,9 +176,9 @@ void mac_nr::rach_detected(const rach_info_t& rach_info)
uecfg.carriers[0].cc = 0;
uecfg.ue_bearers[0].direction = mac_lc_ch_cfg_t::BOTH;
srsran::phy_cfg_nr_default_t::reference_cfg_t ref_args{};
ref_args.duplex = cell_config[0].duplex.mode == SRSRAN_DUPLEX_MODE_TDD
? srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_TDD_CUSTOM_6_4
: srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_FDD;
ref_args.duplex = cell_config[0].duplex.mode == SRSRAN_DUPLEX_MODE_TDD
? srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_TDD_CUSTOM_6_4
: srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_FDD;
uecfg.phy_cfg = srsran::phy_cfg_nr_default_t{ref_args};
uecfg.phy_cfg.csi = {}; // disable CSI until RA is complete
@ -304,16 +304,18 @@ int mac_nr::get_dl_sched(const srsran_slot_cfg_t& slot_cfg, dl_sched_t& dl_sched
{
logger.set_context(slot_cfg.idx - TX_ENB_DELAY);
slot_point pdsch_slot = srsran::slot_point{NUMEROLOGY_IDX, slot_cfg.idx};
sched_nr_interface::dl_sched_res_t dl_res;
slot_point pdsch_slot = srsran::slot_point{NUMEROLOGY_IDX, slot_cfg.idx};
// Run Scheduler
sched_nr_interface::sched_rar_list_t rar_list;
sched_nr_interface::dl_res_t dl_res(rar_list, dl_sched);
int ret = sched.run_slot(pdsch_slot, 0, dl_res);
if (ret != SRSRAN_SUCCESS) {
return ret;
}
dl_sched = dl_res.dl_sched;
// Generate MAC DL PDUs
uint32_t rar_count = 0;
srsran::rwlock_read_guard rw_lock(rwmutex);
for (pdsch_t& pdsch : dl_sched.pdsch) {

View File

@ -140,10 +140,10 @@ void sched_nr::ue_cfg_impl(uint16_t rnti, const ue_cfg_t& uecfg)
}
/// Generate {pdcch_slot,cc} scheduling decision
int sched_nr::run_slot(slot_point slot_dl, uint32_t cc, dl_sched_res_t& result)
int sched_nr::run_slot(slot_point slot_dl, uint32_t cc, dl_res_t& result)
{
// Copy UL results to intermediate buffer
ul_sched_t& ul_res = pending_results->add_ul_result(slot_dl, cc);
ul_res_t& ul_res = pending_results->add_ul_result(slot_dl, cc);
// Generate {slot_idx,cc} result
sched_workers->run_slot(slot_dl, cc, result, ul_res);
@ -152,7 +152,7 @@ int sched_nr::run_slot(slot_point slot_dl, uint32_t cc, dl_sched_res_t& result)
}
/// Fetch {ul_slot,cc} UL scheduling decision
int sched_nr::get_ul_sched(slot_point slot_ul, uint32_t cc, ul_sched_t& result)
int sched_nr::get_ul_sched(slot_point slot_ul, uint32_t cc, ul_res_t& result)
{
if (not pending_results->has_ul_result(slot_ul, cc)) {
// sched result hasn't been generated

View File

@ -142,7 +142,7 @@ void ra_sched::run_slot(bwp_slot_allocator& slot_alloc)
if (pdcch_slot >= rar.rar_win.stop()) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer,
"SCHED: Could not transmit RAR within the window Window={}, PRACH={}, RAR={}",
"SCHED: Could not transmit RAR within the window={}, PRACH={}, RAR={}",
rar.rar_win,
rar.prach_slot,
pdcch_slot);

View File

@ -119,6 +119,13 @@ alloc_result bwp_slot_allocator::alloc_rar_and_msg3(uint16_t
if (ret != alloc_result::success) {
return ret;
}
if (bwp_pdcch_slot.rar.full()) {
return alloc_result::no_grant_space;
}
if (pending_rars.size() > MAX_GRANTS) {
logger.error("SCHED: Trying to allocate too many Msg3 grants in a single slot (%zd)", pending_rars.size());
return alloc_result::invalid_grant_params;
}
// Check DL RB collision
if (bwp_pdcch_slot.dl_prbs.collides(interv)) {
@ -218,6 +225,10 @@ alloc_result bwp_slot_allocator::alloc_pdsch(slot_ue& ue, const prb_grant& dl_gr
if (result != alloc_result::success) {
return result;
}
if (bwp_uci_slot.pending_acks.full()) {
logger.warning("SCHED: PDSCH allocation for rnti=0x%x failed due to lack of space for respective ACK", ue.rnti);
return alloc_result::no_grant_space;
}
if (bwp_pdsch_slot.dl_prbs.collides(dl_grant)) {
return alloc_result::sch_collision;
}

View File

@ -37,11 +37,13 @@ int harq_proc::ack_info(uint32_t tb_idx, bool ack)
return ack ? tb[tb_idx].tbs : 0;
}
void harq_proc::new_slot(slot_point slot_rx)
bool harq_proc::clear_if_maxretx(slot_point slot_rx)
{
if (has_pending_retx(slot_rx) and nof_retx() + 1 >= max_nof_retx()) {
if (has_pending_retx(slot_rx) and nof_retx() + 1 > max_nof_retx()) {
tb[0].active = false;
return true;
}
return false;
}
void harq_proc::reset()
@ -129,14 +131,14 @@ bool dl_harq_proc::new_tx(slot_point slot_tx,
uint32_t max_retx)
{
if (harq_proc::new_tx(slot_tx, slot_ack, grant, mcs, max_retx)) {
softbuffer->reset();
pdu->clear();
return true;
}
return false;
}
harq_entity::harq_entity(uint32_t nprb, uint32_t nof_harq_procs)
harq_entity::harq_entity(uint16_t rnti_, uint32_t nprb, uint32_t nof_harq_procs, srslog::basic_logger& logger_) :
rnti(rnti_), logger(logger_)
{
// Create HARQs
dl_harqs.reserve(nof_harq_procs);
@ -151,10 +153,20 @@ void harq_entity::new_slot(slot_point slot_rx_)
{
slot_rx = slot_rx_;
for (harq_proc& dl_h : dl_harqs) {
dl_h.new_slot(slot_rx);
if (dl_h.clear_if_maxretx(slot_rx)) {
logger.info("SCHED: discarding rnti=0x%x, DL TB pid=%d. Cause: Maximum number of retx exceeded (%d)",
rnti,
dl_h.pid,
dl_h.max_nof_retx());
}
}
for (harq_proc& ul_h : ul_harqs) {
ul_h.new_slot(slot_rx);
if (ul_h.clear_if_maxretx(slot_rx)) {
logger.info("SCHED: discarding rnti=0x%x, UL TB pid=%d. Cause: Maximum number of retx exceeded (%d)",
rnti,
ul_h.pid,
ul_h.max_nof_retx());
}
}
}

View File

@ -65,6 +65,7 @@ bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type,
srsran_assert((user == nullptr) xor
(alloc_type == pdcch_grant_type_t::dl_data or alloc_type == pdcch_grant_type_t::ul_data),
"UE should be only provided for DL or UL data allocations");
srsran_assert(not dci_list.full(), "SCHED: Unable to allocate DCI");
saved_dfs_tree.clear();
alloc_record record;
@ -89,13 +90,13 @@ bool coreset_region::alloc_dci(pdcch_grant_type_t alloc_type,
dci_list.push_back(record);
return true;
}
if (dfs_tree.empty()) {
if (saved_dfs_tree.empty()) {
saved_dfs_tree = dfs_tree;
}
} while (get_next_dfs());
// Revert steps to initial state, before dci record allocation was attempted
dfs_tree = saved_dfs_tree;
dfs_tree.swap(saved_dfs_tree);
if (record.alloc_type == pdcch_grant_type_t::ul_data) {
pdcch_ul_list.pop_back();
} else {

View File

@ -43,6 +43,10 @@ void sched_nzp_csi_rs(srsran::const_span<srsran_csi_rs_nzp_set_t> nzp_csi_rs_set
// Check if the resource is scheduled for this slot
if (srsran_csi_rs_send(&nzp_csi_resource.periodicity, &slot_cfg)) {
if (csi_rs_list.full()) {
srslog::fetch_basic_logger("MAC-NR").error("SCHED: Failed to allocate NZP-CSI RS");
return;
}
csi_rs_list.push_back(nzp_csi_resource);
}
}
@ -51,6 +55,10 @@ void sched_nzp_csi_rs(srsran::const_span<srsran_csi_rs_nzp_set_t> nzp_csi_rs_set
void sched_ssb_basic(const slot_point& sl_point, uint32_t ssb_periodicity, ssb_list& ssb_list)
{
if (ssb_list.full()) {
srslog::fetch_basic_logger("MAC-NR").error("SCHED: Failed to allocate SSB");
return;
}
// If the periodicity is 0, it means that the parameter was not passed by the upper layers.
// In that case, we use default value of 5ms (see Clause 4.1, TS 38.213)
if (ssb_periodicity == 0) {

View File

@ -35,7 +35,7 @@ ue_carrier::ue_carrier(uint16_t rnti_, const ue_cfg_t& uecfg_, const cell_params
cc(cell_params_.cc),
bwp_cfg(rnti_, cell_params_.bwps[0], uecfg_),
cell_params(cell_params_),
harq_ent(cell_params_.nof_prb())
harq_ent(rnti_, cell_params_.nof_prb(), SCHED_NR_MAX_HARQ, cell_params_.bwps[0].logger)
{}
void ue_carrier::set_cfg(const ue_cfg_t& ue_cfg)
@ -43,6 +43,11 @@ void ue_carrier::set_cfg(const ue_cfg_t& ue_cfg)
bwp_cfg = bwp_ue_cfg(rnti, cell_params.bwps[0], ue_cfg);
}
void ue_carrier::new_slot(slot_point slot_tx)
{
harq_ent.new_slot(slot_tx - TX_ENB_DELAY);
}
slot_ue ue_carrier::try_reserve(slot_point pdcch_slot, uint32_t dl_pending_bytes, uint32_t ul_pending_bytes)
{
slot_point slot_rx = pdcch_slot - TX_ENB_DELAY;
@ -87,7 +92,7 @@ slot_ue ue_carrier::try_reserve(slot_point pdcch_slot, uint32_t dl_pending_bytes
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ue::ue(uint16_t rnti_, const ue_cfg_t& cfg, const sched_params& sched_cfg_) :
rnti(rnti_), sched_cfg(sched_cfg_), buffers(srslog::fetch_basic_logger(sched_cfg_.sched_cfg.logger_name))
rnti(rnti_), sched_cfg(sched_cfg_), buffers(rnti_, srslog::fetch_basic_logger(sched_cfg_.sched_cfg.logger_name))
{
set_cfg(cfg);
}
@ -105,23 +110,13 @@ void ue::set_cfg(const ue_cfg_t& cfg)
}
}
for (uint32_t lcid = 0; lcid < cfg.ue_bearers.size(); ++lcid) {
buffers.config_lcid(lcid, cfg.ue_bearers[lcid]);
}
buffers.config_lcids(cfg.ue_bearers);
}
void ue::new_slot(slot_point pdcch_slot)
{
last_pdcch_slot = pdcch_slot;
for (auto& ue_cc_cfg : ue_cfg.carriers) {
auto& cc = carriers[ue_cc_cfg.cc];
if (cc != nullptr) {
// Update CC HARQ state
cc->harq_ent.new_slot(pdcch_slot - TX_ENB_DELAY);
}
}
// Compute pending DL/UL bytes for {rnti, pdcch_slot}
if (sched_cfg.sched_cfg.auto_refill_buffer) {
dl_pending_bytes = 1000000;
@ -153,10 +148,7 @@ void ue::new_slot(slot_point pdcch_slot)
slot_ue ue::try_reserve(slot_point pdcch_slot, uint32_t cc)
{
if (carriers[cc] == nullptr) {
return slot_ue();
}
srsran_assert(carriers[cc] != nullptr, "try_reserve() called for inexistent rnti=0x%x,cc=%d", rnti, cc);
return carriers[cc]->try_reserve(pdcch_slot, dl_pending_bytes, ul_pending_bytes);
}

View File

@ -89,6 +89,9 @@ void slot_cc_worker::run(slot_point pdcch_slot, ue_map_t& ue_db)
continue;
}
// Update UE CC state
u.carriers[cfg.cc]->new_slot(pdcch_slot);
// info for a given UE on a slot to be process
slot_ues.insert(rnti, u.try_reserve(pdcch_slot, cfg.cc));
if (slot_ues[rnti].empty()) {
@ -189,6 +192,10 @@ void slot_cc_worker::postprocess_decisions()
}
if (not has_pusch) {
// If any UCI information is triggered, schedule PUCCH
if (bwp_slot.pucch.full()) {
logger.warning("SCHED: Cannot fit pending UCI into PUCCH");
continue;
}
bwp_slot.pucch.emplace_back();
mac_interface_phy_nr::pucch_t& pucch = bwp_slot.pucch.back();
@ -383,7 +390,7 @@ void sched_worker_manager::get_metrics_nolocking(mac_metrics_t& metrics)
{
for (mac_ue_metrics_t& ue_metric : metrics.ues) {
if (ue_db.contains(ue_metric.rnti) and ue_db[ue_metric.rnti]->carriers[0] != nullptr) {
auto& ue_cc = *ue_db[ue_metric.rnti]->carriers[0];
auto& ue_cc = *ue_db[ue_metric.rnti]->carriers[0];
std::lock_guard<std::mutex> lock(ue_cc.metrics_mutex);
ue_metric.tx_brate = ue_cc.metrics.tx_brate;
ue_metric.tx_errors = ue_cc.metrics.tx_errors;

View File

@ -174,9 +174,9 @@ uint32_t sched::get_ul_buffer(uint16_t rnti)
return ret;
}
int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t retx_queue)
int sched::dl_rlc_buffer_state(uint16_t rnti, uint32_t lc_id, uint32_t tx_queue, uint32_t prio_tx_queue)
{
return ue_db_access_locked(rnti, [&](sched_ue& ue) { ue.dl_buffer_state(lc_id, tx_queue, retx_queue); });
return ue_db_access_locked(rnti, [&](sched_ue& ue) { ue.dl_buffer_state(lc_id, tx_queue, prio_tx_queue); });
}
int sched::dl_mac_buffer_state(uint16_t rnti, uint32_t ce_code, uint32_t nof_cmds)

View File

@ -42,7 +42,7 @@ namespace srsenb {
*******************************************************/
sched_ue::sched_ue(uint16_t rnti_, const std::vector<sched_cell_params_t>& cell_list_params_, const ue_cfg_t& cfg_) :
logger(srslog::fetch_basic_logger("MAC")), rnti(rnti_)
logger(srslog::fetch_basic_logger("MAC")), rnti(rnti_), lch_handler(rnti_)
{
cells.reserve(cell_list_params_.size());
for (auto& c : cell_list_params_) {

View File

@ -66,9 +66,7 @@ uint32_t get_ul_mac_sdu_size_with_overhead(uint32_t rlc_pdu_bytes)
void lch_ue_manager::set_cfg(const sched_interface::ue_cfg_t& cfg)
{
for (uint32_t lcid = 0; is_lcid_valid(lcid); lcid++) {
config_lcid(lcid, cfg.ue_bearers[lcid]);
}
config_lcids(cfg.ue_bearers);
}
void lch_ue_manager::new_tti()
@ -87,15 +85,18 @@ void lch_ue_manager::new_tti()
void lch_ue_manager::ul_buffer_add(uint8_t lcid, uint32_t bytes)
{
if (lcid >= sched_interface::MAX_LC) {
logger.warning("The provided lcid=%d is not valid", lcid);
logger.warning("SCHED: The provided lcid=%d for rnti=0x%x is not valid", lcid, rnti);
return;
}
lcg_bsr[channels[lcid].cfg.group] += bytes;
if (logger.debug.enabled()) {
fmt::memory_buffer str_buffer;
fmt::format_to(str_buffer, "{}", get_bsr_state());
logger.debug(
"SCHED: UL buffer update=%d, lcg_id=%d, bsr=%s", bytes, channels[lcid].cfg.group, srsran::to_c_str(str_buffer));
logger.debug("SCHED: rnti=0x%x UL buffer update=%d, lcg_id=%d, bsr=%s",
rnti,
bytes,
channels[lcid].cfg.group,
srsran::to_c_str(str_buffer));
}
}
@ -103,9 +104,9 @@ int lch_ue_manager::get_max_prio_lcid() const
{
int min_prio_val = std::numeric_limits<int>::max(), prio_lcid = -1;
// Prioritize retxs
// Prioritized Txs first (e.g. Retxs, status PDUs)
for (uint32_t lcid = 0; is_lcid_valid(lcid); ++lcid) {
if (get_dl_retx(lcid) > 0 and channels[lcid].cfg.priority < min_prio_val) {
if (get_dl_prio_tx(lcid) > 0 and channels[lcid].cfg.priority < min_prio_val) {
min_prio_val = channels[lcid].cfg.priority;
prio_lcid = lcid;
}
@ -156,10 +157,10 @@ int lch_ue_manager::alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* rlc_pdu, int
return alloc_bytes;
}
// try first to allocate retxs
alloc_bytes = alloc_retx_bytes(lcid, rem_bytes);
// try first to allocate high priority txs (e.g. retxs, status pdus)
alloc_bytes = alloc_prio_tx_bytes(lcid, rem_bytes);
// if no retx alloc, try newtx
// if no prio tx alloc, try newtx
if (alloc_bytes == 0) {
alloc_bytes = alloc_tx_bytes(lcid, rem_bytes);
}
@ -177,15 +178,15 @@ int lch_ue_manager::alloc_rlc_pdu(sched_interface::dl_sched_pdu_t* rlc_pdu, int
return alloc_bytes;
}
int lch_ue_manager::alloc_retx_bytes(uint8_t lcid, int rem_bytes)
int lch_ue_manager::alloc_prio_tx_bytes(uint8_t lcid, int rem_bytes)
{
const int rlc_overhead = (lcid == 0) ? 0 : RLC_MAX_HEADER_SIZE_NO_LI;
if (rem_bytes <= rlc_overhead) {
return 0;
}
int rem_bytes_no_header = rem_bytes - rlc_overhead;
int alloc = std::min(rem_bytes_no_header, get_dl_retx(lcid));
channels[lcid].buf_retx -= alloc;
int alloc = std::min(rem_bytes_no_header, get_dl_prio_tx(lcid));
channels[lcid].buf_prio_tx -= alloc;
return alloc + (alloc > 0 ? rlc_overhead : 0);
}
@ -220,7 +221,7 @@ bool lch_ue_manager::has_pending_dl_txs() const
int lch_ue_manager::get_dl_tx_total_with_overhead(uint32_t lcid) const
{
return get_dl_retx_with_overhead(lcid) + get_dl_tx_with_overhead(lcid);
return get_dl_prio_tx_with_overhead(lcid) + get_dl_tx_with_overhead(lcid);
}
int lch_ue_manager::get_dl_tx_with_overhead(uint32_t lcid) const
@ -228,9 +229,9 @@ int lch_ue_manager::get_dl_tx_with_overhead(uint32_t lcid) const
return get_dl_mac_sdu_size_with_overhead(lcid, get_dl_tx(lcid));
}
int lch_ue_manager::get_dl_retx_with_overhead(uint32_t lcid) const
int lch_ue_manager::get_dl_prio_tx_with_overhead(uint32_t lcid) const
{
return get_dl_mac_sdu_size_with_overhead(lcid, get_dl_retx(lcid));
return get_dl_mac_sdu_size_with_overhead(lcid, get_dl_prio_tx(lcid));
}
int lch_ue_manager::get_bsr_with_overhead(uint32_t lcg) const

View File

@ -620,7 +620,7 @@ bool ngap::connect_amf()
logger.info("Connecting to AMF %s:%d", args.amf_addr.c_str(), int(AMF_PORT));
// Init SCTP socket and bind it
if (not sctp_init_client(&amf_socket, socket_type::seqpacket, args.ngc_bind_addr.c_str(), 0)) {
if (not sctp_init_socket(&amf_socket, socket_type::seqpacket, args.ngc_bind_addr.c_str(), 0)) {
return false;
}
logger.info("SCTP socket opened. fd=%d", amf_socket.fd());

View File

@ -166,11 +166,15 @@ rrc_nr_cfg_t rrc_nr::update_default_cfg(const rrc_nr_cfg_t& current)
return cfg_default;
}
// This function is called from PRACH worker (can wait)
int rrc_nr::add_user(uint16_t rnti, const sched_nr_ue_cfg_t& uecfg)
/* @brief PRIVATE function, gets called by sgnb_addition_request
*
* This function WILL NOT TRIGGER the RX MSG3 activity timer
*/
int rrc_nr::add_user(uint16_t rnti, const sched_nr_ue_cfg_t& uecfg, bool start_msg3_timer)
{
if (users.count(rnti) == 0) {
users.insert(std::make_pair(rnti, std::unique_ptr<ue>(new ue(this, rnti, uecfg))));
// If in the ue ctor, "start_msg3_timer" is set to true, this will start the MSG3 RX TIMEOUT at ue creation
users.insert(std::make_pair(rnti, std::unique_ptr<ue>(new ue(this, rnti, uecfg, start_msg3_timer))));
rlc->add_user(rnti);
pdcp->add_user(rnti);
logger.info("Added new user rnti=0x%x", rnti);
@ -181,6 +185,16 @@ int rrc_nr::add_user(uint16_t rnti, const sched_nr_ue_cfg_t& uecfg)
}
}
/* @brief PUBLIC function, gets called by mac_nr::rach_detected
*
* This function is called from PRACH worker (can wait) and WILL TRIGGER the RX MSG3 activity timer
*/
int rrc_nr::add_user(uint16_t rnti, const sched_nr_ue_cfg_t& uecfg)
{
// Set "triggered_by_rach" to true to start the MSG3 RX TIMEOUT
return add_user(rnti, uecfg, true);
}
void rrc_nr::rem_user(uint16_t rnti)
{
auto user_it = users.find(rnti);
@ -503,9 +517,9 @@ void rrc_nr::sgnb_addition_request(uint16_t eutra_rnti, const sgnb_addition_req_
uecfg.carriers[0].cc = 0;
uecfg.ue_bearers[0].direction = mac_lc_ch_cfg_t::BOTH;
srsran::phy_cfg_nr_default_t::reference_cfg_t ref_args{};
ref_args.duplex = cfg.cell_list[0].duplex_mode == SRSRAN_DUPLEX_MODE_TDD
? srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_TDD_CUSTOM_6_4
: srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_FDD;
ref_args.duplex = cfg.cell_list[0].duplex_mode == SRSRAN_DUPLEX_MODE_TDD
? srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_TDD_CUSTOM_6_4
: srsran::phy_cfg_nr_default_t::reference_cfg_t::R_DUPLEX_FDD;
uecfg.phy_cfg = srsran::phy_cfg_nr_default_t{ref_args};
uecfg.phy_cfg.csi = {}; // disable CSI until RA is complete
@ -516,7 +530,7 @@ void rrc_nr::sgnb_addition_request(uint16_t eutra_rnti, const sgnb_addition_req_
return;
}
if (add_user(nr_rnti, uecfg) != SRSRAN_SUCCESS) {
if (add_user(nr_rnti, uecfg, false) != SRSRAN_SUCCESS) {
logger.error("Failed to allocate RNTI at RRC");
rrc_eutra->sgnb_addition_reject(eutra_rnti);
return;
@ -554,11 +568,82 @@ void rrc_nr::sgnb_release_request(uint16_t nr_rnti)
Every function in UE class is called from a mutex environment thus does not
need extra protection.
*******************************************************************************/
rrc_nr::ue::ue(rrc_nr* parent_, uint16_t rnti_, const sched_nr_ue_cfg_t& uecfg_) :
rrc_nr::ue::ue(rrc_nr* parent_, uint16_t rnti_, const sched_nr_ue_cfg_t& uecfg_, bool start_msg3_timer) :
parent(parent_), rnti(rnti_), uecfg(uecfg_)
{
// Derive UE cfg from rrc_cfg_nr_t
uecfg.phy_cfg.pdcch = parent->cfg.cell_list[0].phy_cell.pdcch;
// Set timer for MSG3_RX_TIMEOUT or UE_INACTIVITY_TIMEOUT
activity_timer = parent->task_sched.get_unique_timer();
start_msg3_timer ? set_activity_timeout(MSG3_RX_TIMEOUT) : set_activity_timeout(UE_INACTIVITY_TIMEOUT);
}
void rrc_nr::ue::set_activity_timeout(activity_timeout_type_t type)
{
uint32_t deadline_ms = 0;
switch (type) {
case MSG3_RX_TIMEOUT:
// TODO: Retrieve the parameters from somewhere(RRC?) - Currently hardcoded to 100ms
deadline_ms = 100;
break;
case UE_INACTIVITY_TIMEOUT:
// TODO: Add a value for the inactivity timeout - currently no activity set this case
return;
default:
parent->logger.error("Unknown timeout type %d", type);
return;
}
// Currently we only set the timer for the MSG3_RX_TIMEOUT case
activity_timer.set(deadline_ms, [this, type](uint32_t tid) { activity_timer_expired(type); });
parent->logger.debug("Setting timer for %s for rnti=0x%x to %dms", to_string(type).c_str(), rnti, deadline_ms);
set_activity();
}
void rrc_nr::ue::set_activity(bool enabled)
{
if (not enabled) {
if (activity_timer.is_running()) {
parent->logger.debug("Inactivity timer interrupted for rnti=0x%x", rnti);
}
activity_timer.stop();
return;
}
// re-start activity timer with current timeout value
activity_timer.run();
parent->logger.debug("Activity registered for rnti=0x%x (timeout_value=%dms)", rnti, activity_timer.duration());
}
void rrc_nr::ue::activity_timer_expired(const activity_timeout_type_t type)
{
parent->logger.info("Activity timer for rnti=0x%x expired after %d ms", rnti, activity_timer.time_elapsed());
state = rrc_nr_state_t::RRC_IDLE;
switch (type) {
case UE_INACTIVITY_TIMEOUT:
// TODO: Add action to be executed
break;
case MSG3_RX_TIMEOUT:
// MSG3 timeout, no need to notify NGAP or LTE stack. Just remove UE
parent->rem_user(rnti);
break;
default:
// Unhandled activity timeout, just remove UE and log an error
parent->rem_user(rnti);
parent->logger.error(
"Unhandled reason for activity timer expiration. rnti=0x%x, cause %d", rnti, static_cast<unsigned>(type));
}
}
std::string rrc_nr::ue::to_string(const activity_timeout_type_t& type)
{
constexpr static const char* options[] = {"Msg3 reception", "UE inactivity", "UE reestablishment"};
return srsran::enum_to_text(options, (uint32_t)activity_timeout_type_t::nulltype, (uint32_t)type);
}
void rrc_nr::ue::send_connection_setup()
@ -1312,6 +1397,10 @@ void rrc_nr::ue::crnti_ce_received()
// send SgNB addition complete for ENDC users
parent->rrc_eutra->sgnb_addition_complete(eutra_rnti, rnti);
// stop RX MSG3 activity timer on MAC CE RNTI reception
activity_timer.stop();
parent->logger.debug("Received MAC CE-RNTI for 0x%x - stopping MSG3 timer", rnti);
// Add DRB1 to MAC
for (auto& drb : cell_group_cfg.rlc_bearer_to_add_mod_list) {
uecfg.ue_bearers[drb.lc_ch_id].direction = mac_lc_ch_cfg_t::BOTH;

View File

@ -93,8 +93,8 @@ std::tuple<bool, meas_obj_t*, cells_to_add_mod_s*> add_cell_enb_cfg(meas_obj_lis
bool inserted_flag = true;
cells_to_add_mod_s new_cell;
asn1::number_to_enum(new_cell.cell_individual_offset, (uint8_t)cellcfg.q_offset);
new_cell.pci = cellcfg.pci;
new_cell.cell_individual_offset = cellcfg.cell_individual_offset;
new_cell.pci = cellcfg.pci;
std::pair<meas_obj_t*, meas_cell_t*> ret = find_cell(meas_obj_list, cellcfg.earfcn, cellcfg.pci);

View File

@ -488,7 +488,8 @@ bool s1ap::connect_mme()
logger.info("Connecting to MME %s:%d", args.mme_addr.c_str(), int(MME_PORT));
// Init SCTP socket and bind it
if (not sctp_init_client(&mme_socket, socket_type::seqpacket, args.s1c_bind_addr.c_str(), args.s1c_bind_port)) {
if (not srsran::net_utils::sctp_init_socket(
&mme_socket, socket_type::seqpacket, args.s1c_bind_addr.c_str(), args.s1c_bind_port)) {
return false;
}
logger.info("SCTP socket opened. fd=%d", mme_socket.fd());

View File

@ -59,10 +59,8 @@ const gtpu_tunnel_manager::tunnel* gtpu_tunnel_manager::find_tunnel(uint32_t tei
gtpu_tunnel_manager::ue_bearer_tunnel_list* gtpu_tunnel_manager::find_rnti_tunnels(uint16_t rnti)
{
if (not ue_teidin_db.contains(rnti)) {
return nullptr;
}
return &ue_teidin_db[rnti];
auto it = ue_teidin_db.find(rnti);
return it != ue_teidin_db.end() ? &ue_teidin_db[rnti] : nullptr;
}
srsran::span<gtpu_tunnel_manager::bearer_teid_pair>
@ -101,9 +99,9 @@ gtpu_tunnel_manager::add_tunnel(uint16_t rnti, uint32_t eps_bearer_id, uint32_t
tun->teid_out = teidout;
tun->spgw_addr = spgw_addr;
if (not ue_teidin_db.contains(rnti)) {
auto ret = ue_teidin_db.insert(rnti, ue_bearer_tunnel_list());
if (ret.is_error()) {
if (ue_teidin_db.find(rnti) == ue_teidin_db.end()) {
auto ret = ue_teidin_db.emplace(rnti, ue_bearer_tunnel_list());
if (!ret.second) {
logger.error("Failed to allocate rnti=0x%x", rnti);
return nullptr;
}
@ -142,7 +140,7 @@ bool gtpu_tunnel_manager::update_rnti(uint16_t old_rnti, uint16_t new_rnti)
logger.info("Modifying bearer rnti. Old rnti: 0x%x, new rnti: 0x%x", old_rnti, new_rnti);
// create new RNTI and update TEIDs of old rnti to reflect new rnti
if (new_rnti_ptr == nullptr and not ue_teidin_db.insert(new_rnti, ue_bearer_tunnel_list())) {
if (new_rnti_ptr == nullptr and not ue_teidin_db.insert({new_rnti, ue_bearer_tunnel_list()}).second) {
logger.error("Failure to create new rnti=0x%x", new_rnti);
return false;
}
@ -195,7 +193,8 @@ bool gtpu_tunnel_manager::remove_tunnel(uint32_t teidin)
bool gtpu_tunnel_manager::remove_rnti(uint16_t rnti)
{
if (not ue_teidin_db.contains(rnti)) {
auto it = ue_teidin_db.find(rnti);
if (it == ue_teidin_db.end()) {
logger.warning("Removing rnti. rnti=0x%x not found.", rnti);
return false;
}

View File

@ -226,6 +226,11 @@ bool pdcp::user_interface_rlc::rb_is_um(uint32_t lcid)
return rlc->rb_is_um(rnti, lcid);
}
bool pdcp::user_interface_rlc::is_suspended(uint32_t lcid)
{
return rlc->is_suspended(rnti, lcid);
}
bool pdcp::user_interface_rlc::sdu_queue_is_full(uint32_t lcid)
{
return rlc->sdu_queue_is_full(rnti, lcid);

View File

@ -159,6 +159,17 @@ bool rlc::suspend_bearer(uint16_t rnti, uint32_t lcid)
return result;
}
bool rlc::is_suspended(uint16_t rnti, uint32_t lcid)
{
pthread_rwlock_rdlock(&rwlock);
bool result = false;
if (users.count(rnti)) {
result = users[rnti].rlc->is_suspended(lcid);
}
pthread_rwlock_unlock(&rwlock);
return result;
}
bool rlc::resume_bearer(uint16_t rnti, uint32_t lcid)
{
pthread_rwlock_rdlock(&rwlock);
@ -182,10 +193,10 @@ void rlc::reestablish(uint16_t rnti)
// In the eNodeB, there is no polling for buffer state from the scheduler.
// This function is called by UE RLC instance every time the tx/retx buffers are updated
void rlc::update_bsr(uint32_t rnti, uint32_t lcid, uint32_t tx_queue, uint32_t retx_queue)
void rlc::update_bsr(uint32_t rnti, uint32_t lcid, uint32_t tx_queue, uint32_t prio_tx_queue)
{
logger.debug("Buffer state: rnti=0x%x, lcid=%d, tx_queue=%d", rnti, lcid, tx_queue);
mac->rlc_buffer_state(rnti, lcid, tx_queue, retx_queue);
logger.debug("Buffer state: rnti=0x%x, lcid=%d, tx_queue=%d, prio_tx_queue=%d", rnti, lcid, tx_queue, prio_tx_queue);
mac->rlc_buffer_state(rnti, lcid, tx_queue, prio_tx_queue);
}
int rlc::read_pdu(uint16_t rnti, uint32_t lcid, uint8_t* payload, uint32_t nof_bytes)

View File

@ -39,6 +39,7 @@ public:
void write_sdu(uint16_t rnti, uint32_t lcid, srsran::unique_byte_buffer_t sdu) override { last_sdu = std::move(sdu); }
bool has_bearer(uint16_t rnti, uint32_t lcid) override { return false; }
bool suspend_bearer(uint16_t rnti, uint32_t lcid) override { return true; }
bool is_suspended(uint16_t rnti, uint32_t lcid) override { return false; }
bool resume_bearer(uint16_t rnti, uint32_t lcid) override { return true; }
void reestablish(uint16_t rnti) override {}

View File

@ -66,6 +66,7 @@ void test_single_prach()
auto run_slot = [&alloc, &rasched, &pdcch_slot, &slot_ues, &u]() -> const bwp_slot_grid* {
mac_logger.set_context(pdcch_slot.to_uint());
u.new_slot(pdcch_slot);
u.carriers[0]->new_slot(pdcch_slot);
slot_ues.clear();
slot_ue sfu = u.try_reserve(pdcch_slot, 0);
if (not sfu.empty()) {

View File

@ -51,8 +51,8 @@ int sched_nr_ue_sim::update(const sched_nr_cc_result_view& cc_out)
{
update_dl_harqs(cc_out);
for (uint32_t i = 0; i < cc_out.dl_cc_result->dl_sched.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result->dl_sched.pdcch_dl[i];
for (uint32_t i = 0; i < cc_out.dl_cc_result.dl_sched.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result.dl_sched.pdcch_dl[i];
if (data.dci.ctx.rnti != ctxt.rnti) {
continue;
}
@ -73,8 +73,8 @@ int sched_nr_ue_sim::update(const sched_nr_cc_result_view& cc_out)
void sched_nr_ue_sim::update_dl_harqs(const sched_nr_cc_result_view& cc_out)
{
uint32_t cc = cc_out.cc;
for (uint32_t i = 0; i < cc_out.dl_cc_result->dl_sched.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result->dl_sched.pdcch_dl[i];
for (uint32_t i = 0; i < cc_out.dl_cc_result.dl_sched.pdcch_dl.size(); ++i) {
const auto& data = cc_out.dl_cc_result.dl_sched.pdcch_dl[i];
if (data.dci.ctx.rnti != ctxt.rnti) {
continue;
}
@ -216,7 +216,9 @@ void sched_nr_base_tester::run_slot(slot_point slot_tx)
void sched_nr_base_tester::generate_cc_result(uint32_t cc)
{
// Run scheduler
sched_ptr->run_slot(current_slot_tx, cc, cc_results[cc].dl_res);
sched_nr_interface::dl_res_t dl_sched(cc_results[cc].rar, cc_results[cc].dl_res);
sched_ptr->run_slot(current_slot_tx, cc, dl_sched);
cc_results[cc].rar = dl_sched.rar;
sched_ptr->get_ul_sched(current_slot_tx, cc, cc_results[cc].ul_res);
auto tp2 = std::chrono::steady_clock::now();
cc_results[cc].cc_latency_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(tp2 - slot_start_tp);
@ -238,17 +240,14 @@ void sched_nr_base_tester::process_results()
// Derived class-defined tests
process_slot_result(slot_ctxt, cc_results);
sched_nr_cc_result_view cc_out;
cc_out.slot = current_slot_tx;
for (uint32_t cc = 0; cc < cell_params.size(); ++cc) {
cc_out.cc = cc;
cc_out.dl_cc_result = &cc_results[cc].dl_res;
cc_out.ul_cc_result = &cc_results[cc].ul_res;
sched_nr_cc_result_view cc_out{
current_slot_tx, cc, cc_results[cc].rar, cc_results[cc].dl_res, cc_results[cc].ul_res};
// Run common tests
test_dl_pdcch_consistency(cc_out.dl_cc_result->dl_sched.pdcch_dl);
test_pdsch_consistency(cc_out.dl_cc_result->dl_sched.pdsch);
test_ssb_scheduled_grant(cc_out.slot, cell_params[cc_out.cc].cfg, cc_out.dl_cc_result->dl_sched.ssb);
test_dl_pdcch_consistency(cc_out.dl_cc_result.dl_sched.pdcch_dl);
test_pdsch_consistency(cc_out.dl_cc_result.dl_sched.pdsch);
test_ssb_scheduled_grant(cc_out.slot, cell_params[cc_out.cc].cfg, cc_out.dl_cc_result.dl_sched.ssb);
// Run UE-dedicated tests
test_dl_sched_result(slot_ctxt, cc_out);

View File

@ -48,10 +48,18 @@ struct ue_nr_harq_ctxt_t {
slot_point last_slot_tx, first_slot_tx, last_slot_ack;
};
struct sched_nr_cc_result_view {
slot_point slot;
uint32_t cc;
const sched_nr_interface::dl_sched_res_t* dl_cc_result;
const sched_nr_interface::ul_sched_t* ul_cc_result;
slot_point slot;
uint32_t cc;
const sched_nr_interface::dl_res_t dl_cc_result;
const sched_nr_interface::ul_res_t* ul_cc_result;
sched_nr_cc_result_view(slot_point slot_,
uint32_t cc_,
sched_nr_interface::sched_rar_list_t& rar,
sched_nr_interface::dl_sched_t& dl_res,
sched_nr_interface::ul_res_t& ul_res) :
slot(slot_), cc(cc_), dl_cc_result(rar, dl_res), ul_cc_result(&ul_res)
{}
};
struct ue_nr_cc_ctxt_t {
@ -117,11 +125,12 @@ class sched_nr_base_tester
{
public:
struct cc_result_t {
slot_point slot_tx;
uint32_t cc;
sched_nr_interface::dl_sched_res_t dl_res;
sched_nr_interface::ul_sched_t ul_res;
std::chrono::nanoseconds cc_latency_ns;
slot_point slot_tx;
uint32_t cc;
sched_nr_interface::dl_sched_t dl_res;
sched_nr_interface::sched_rar_list_t rar;
sched_nr_interface::ul_res_t ul_res;
std::chrono::nanoseconds cc_latency_ns;
};
sched_nr_base_tester(const sched_nr_interface::sched_args_t& sched_args,

View File

@ -45,12 +45,17 @@ public:
})->cc_latency_ns.count();
for (auto& cc_out : cc_list) {
pdsch_count += cc_out.dl_res.dl_sched.pdcch_dl.size();
pdsch_count += cc_out.dl_res.pdcch_dl.size();
cc_res_count++;
TESTASSERT(cc_out.dl_res.dl_sched.pdcch_dl.size() <= 1);
if (srsran_duplex_nr_is_dl(&cell_params[cc_out.cc].cfg.duplex, 0, current_slot_tx.slot_idx())) {
TESTASSERT(cc_out.dl_res.dl_sched.pdcch_dl.size() == 1 or not cc_out.dl_res.dl_sched.ssb.empty());
bool is_dl_slot = srsran_duplex_nr_is_dl(&cell_params[cc_out.cc].cfg.duplex, 0, current_slot_tx.slot_idx());
if (is_dl_slot) {
if (cc_out.dl_res.ssb.empty()) {
TESTASSERT(slot_ctxt.ue_db.empty() or cc_out.dl_res.pdcch_dl.size() == 1);
} else {
TESTASSERT(cc_out.dl_res.pdcch_dl.size() == 0);
}
}
}
}
@ -85,12 +90,13 @@ void run_sched_nr_test(uint32_t nof_workers)
}
sched_nr_tester tester(cfg, cells_cfg, test_name, nof_workers);
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(nof_sectors);
tester.add_user(rnti, uecfg, slot_point{0, 0}, 0);
for (uint32_t nof_slots = 0; nof_slots < max_nof_ttis; ++nof_slots) {
slot_point slot_rx(0, nof_slots % 10240);
slot_point slot_tx = slot_rx + TX_ENB_DELAY;
if (slot_rx.to_uint() == 9) {
sched_nr_interface::ue_cfg_t uecfg = get_default_ue_cfg(nof_sectors);
tester.add_user(rnti, uecfg, slot_rx, 0);
}
tester.run_slot(slot_tx);
}

View File

@ -30,7 +30,7 @@ using namespace srsenb::sched_nr_impl;
void test_dl_sched_result(const sim_nr_enb_ctxt_t& enb_ctxt, const sched_nr_cc_result_view& cc_out)
{
slot_point pdcch_slot = cc_out.slot;
const pdcch_dl_list_t& pdcchs = cc_out.dl_cc_result->dl_sched.pdcch_dl;
const pdcch_dl_list_t& pdcchs = cc_out.dl_cc_result.dl_sched.pdcch_dl;
// Iterate over UE PDCCH allocations
for (const pdcch_dl_t& pdcch : pdcchs) {

View File

@ -53,7 +53,7 @@ int test_pdu_alloc_successful(srsenb::lch_ue_manager& lch_handler,
int test_retx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_t rlc_payload_size)
{
int start_rlc_bytes = lch_handler.get_dl_retx(lcid);
int start_rlc_bytes = lch_handler.get_dl_prio_tx(lcid);
int nof_pdus = ceil(static_cast<float>(start_rlc_bytes) / static_cast<float>(rlc_payload_size));
int rem_rlc_bytes = start_rlc_bytes;
@ -62,7 +62,7 @@ int test_retx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32_
uint32_t expected_payload_size = std::min(rlc_payload_size, (uint32_t)rem_rlc_bytes);
TESTASSERT(test_pdu_alloc_successful(lch_handler, pdu, lcid, expected_payload_size) == SRSRAN_SUCCESS);
rem_rlc_bytes -= expected_payload_size;
TESTASSERT(lch_handler.get_dl_retx(lcid) == rem_rlc_bytes);
TESTASSERT(lch_handler.get_dl_prio_tx(lcid) == rem_rlc_bytes);
}
return start_rlc_bytes;
}
@ -85,7 +85,7 @@ int test_newtx_until_empty(srsenb::lch_ue_manager& lch_handler, int lcid, uint32
int test_lc_ch_pbr_infinity()
{
srsenb::lch_ue_manager lch_handler;
srsenb::lch_ue_manager lch_handler{0x46};
srsenb::sched_interface::ue_cfg_t ue_cfg = generate_default_ue_cfg();
ue_cfg = generate_setup_ue_cfg(ue_cfg);
@ -106,15 +106,15 @@ int test_lc_ch_pbr_infinity()
lch_handler.dl_buffer_state(drb_to_lcid(lte_drb::drb2), 5000, 10000);
// TEST1 - retx of SRB1 is prioritized. Do not transmit other bearers until there are no SRB1 retxs
int nof_pending_bytes = lch_handler.get_dl_retx(srb_to_lcid(lte_srb::srb1));
int nof_pending_bytes = lch_handler.get_dl_prio_tx(srb_to_lcid(lte_srb::srb1));
TESTASSERT(test_retx_until_empty(lch_handler, srb_to_lcid(lte_srb::srb1), 500) == nof_pending_bytes);
// TEST2 - the DRB2 has lower prio level than SRB1, but has retxs
nof_pending_bytes = lch_handler.get_dl_retx(drb_to_lcid(lte_drb::drb2));
nof_pending_bytes = lch_handler.get_dl_prio_tx(drb_to_lcid(lte_drb::drb2));
TESTASSERT(test_retx_until_empty(lch_handler, drb_to_lcid(lte_drb::drb2), 500) == nof_pending_bytes);
// TEST3 - the DRB1 has lower prio level, but has retxs
nof_pending_bytes = lch_handler.get_dl_retx(drb_to_lcid(lte_drb::drb1));
nof_pending_bytes = lch_handler.get_dl_prio_tx(drb_to_lcid(lte_drb::drb1));
TESTASSERT(test_retx_until_empty(lch_handler, drb_to_lcid(lte_drb::drb1), 500) == nof_pending_bytes);
// TEST4 - The SRB1 newtx buffer is emptied before other bearers newtxs
@ -134,7 +134,7 @@ int test_lc_ch_pbr_infinity()
int test_lc_ch_pbr_finite()
{
srsenb::lch_ue_manager lch_handler;
srsenb::lch_ue_manager lch_handler{0x46};
sched_interface::dl_sched_pdu_t pdu;
srsenb::sched_interface::ue_cfg_t ue_cfg = generate_default_ue_cfg();
@ -163,11 +163,11 @@ int test_lc_ch_pbr_finite()
lch_handler.dl_buffer_state(drb_to_lcid(lte_drb::drb2), 50000, 0);
// TEST1 - SRB1 retxs are emptied first
int nof_pending_bytes = lch_handler.get_dl_retx(srb_to_lcid(lte_srb::srb1));
int nof_pending_bytes = lch_handler.get_dl_prio_tx(srb_to_lcid(lte_srb::srb1));
TESTASSERT(test_retx_until_empty(lch_handler, srb_to_lcid(lte_srb::srb1), 500) == nof_pending_bytes);
// TEST2 - DRB1 retxs are emptied
nof_pending_bytes = lch_handler.get_dl_retx(drb_to_lcid(lte_drb::drb1));
nof_pending_bytes = lch_handler.get_dl_prio_tx(drb_to_lcid(lte_drb::drb1));
TESTASSERT(test_retx_until_empty(lch_handler, drb_to_lcid(lte_drb::drb1), 500) == nof_pending_bytes);
// TEST3 - SRB1 newtxs are emptied (PBR==infinity)

View File

@ -40,14 +40,14 @@ namespace srsenb {
*/
int test_correct_meascfg_insertion()
{
meas_cell_cfg_t cell1 = generate_cell1(), cell2{}, cell3{}, cell4{};
cell2 = cell1;
cell2.pci = 2;
cell2.eci = 0x19C02;
cell3 = cell1;
cell3.earfcn = 2850;
cell4 = cell1;
cell4.q_offset = 1;
meas_cell_cfg_t cell1 = generate_cell1(), cell2{}, cell3{}, cell4{};
cell2 = cell1;
cell2.pci = 2;
cell2.eci = 0x19C02;
cell3 = cell1;
cell3.earfcn = 2850;
cell4 = cell1;
cell4.cell_individual_offset = asn1::rrc::q_offset_range_opts::db1;
report_cfg_eutra_s rep1 = generate_rep1();
@ -117,13 +117,13 @@ int test_correct_meascfg_calculation()
meas_cfg_s src_var{}, target_var{};
meas_cell_cfg_t cell1{}, cell2{};
cell1.earfcn = 3400;
cell1.pci = 1;
cell1.q_offset = 0;
cell1.eci = 0x19C01;
cell2 = cell1;
cell2.pci = 2;
cell2.eci = 0x19C02;
cell1.earfcn = 3400;
cell1.pci = 1;
cell1.cell_individual_offset = asn1::rrc::q_offset_range_opts::db0;
cell1.eci = 0x19C01;
cell2 = cell1;
cell2.pci = 2;
cell2.eci = 0x19C02;
report_cfg_eutra_s rep1 = generate_rep1(), rep2{}, rep3{};
rep2 = rep1;
@ -178,8 +178,8 @@ int test_correct_meascfg_calculation()
TESTASSERT(result_meascfg.report_cfg_to_add_mod_list.size() == 0);
// TEST 3: Cell is added to cellsToAddModList if just a field was updated
cell1.q_offset = 5;
src_var = target_var;
cell1.cell_individual_offset = asn1::rrc::q_offset_range_opts::db5;
src_var = target_var;
add_cell_enb_cfg(target_var.meas_obj_to_add_mod_list, cell1);
TESTASSERT(compute_diff_meascfg(src_var, target_var, result_meascfg));
TESTASSERT(result_meascfg.meas_obj_to_add_mod_list_present);

View File

@ -182,10 +182,10 @@ namespace srsenb {
meas_cell_cfg_t generate_cell1()
{
meas_cell_cfg_t cell1{};
cell1.earfcn = 3400;
cell1.pci = 1;
cell1.q_offset = 0;
cell1.eci = 0x19C01;
cell1.earfcn = 3400;
cell1.pci = 1;
cell1.cell_individual_offset = asn1::rrc::q_offset_range_opts::db0;
cell1.eci = 0x19C01;
return cell1;
}
@ -207,7 +207,7 @@ report_cfg_eutra_s generate_rep1()
bool is_cell_cfg_equal(const meas_cell_cfg_t& cfg, const cells_to_add_mod_s& cell)
{
return cfg.pci == cell.pci and cell.cell_individual_offset.to_number() == (int8_t)round(cfg.q_offset);
return cfg.pci == cell.pci and cell.cell_individual_offset == cell.cell_individual_offset;
}
} // namespace srsenb

View File

@ -87,8 +87,12 @@ int s1ap::init(const s1ap_args_t& s1ap_args)
// Get pointer to GTP-C class
m_mme_gtpc = mme_gtpc::get_instance();
// Initialize S1-MME
m_s1mme = enb_listen();
if (m_s1mme == SRSRAN_ERROR) {
return SRSRAN_ERROR;
}
// Init PCAP
m_pcap_enable = s1ap_args.pcap_enable;

View File

@ -82,8 +82,9 @@ private:
bool measure_rat(measure_context_t context, std::vector<cf_t>& buffer, float rx_gain_offset) override;
srslog::basic_logger& logger;
srsran_cell_t serving_cell = {}; ///< Current serving cell in the EARFCN, to avoid reporting it
uint32_t current_earfcn = 0; ///< Current EARFCN
srsran_cell_t serving_cell = {}; ///< Current serving cell in the EARFCN, to avoid reporting it
std::atomic<uint32_t> current_earfcn = {0}; ///< Current EARFCN
std::mutex mutex;
/// LTE-based measuring objects
scell_recv scell_rx; ///< Secondary cell searcher

View File

@ -126,12 +126,12 @@ private:
};
lockable_grant cur_grant;
uint32_t pid;
uint32_t current_tx_nb;
uint32_t current_irv;
bool harq_feedback;
bool is_grant_configured;
bool is_initiated;
uint32_t pid;
std::atomic<uint32_t> current_tx_nb = {0};
uint32_t current_irv;
bool harq_feedback;
bool is_grant_configured;
bool is_initiated;
srslog::basic_logger& logger;
ul_harq_entity* harq_entity;

View File

@ -97,6 +97,8 @@ private:
std::unique_ptr<byte_buffer_t> harq_buffer = nullptr;
void save_grant(const mac_interface_phy_nr::mac_nr_grant_ul_t& grant);
void generate_tx(mac_interface_phy_nr::tb_action_ul_t* action);
void generate_new_tx(const mac_interface_phy_nr::mac_nr_grant_ul_t& grant,
mac_interface_phy_nr::tb_action_ul_t* action);
@ -113,6 +115,8 @@ private:
srsran::ul_harq_cfg_t harq_cfg = {};
ul_harq_metrics_t metrics = {};
std::mutex metrics_mutex;
const static uint8_t NDI_NOT_SET = 100;
};
typedef std::unique_ptr<ul_harq_entity_nr> ul_harq_entity_nr_ptr;

View File

@ -654,6 +654,9 @@ static void* input_loop(void*)
} else if (key == "rlf") {
simulate_rlf.store(true, std::memory_order_relaxed);
cout << "Sending Radio Link Failure" << endl;
} else if (key == "flush") {
srslog::flush();
cout << "Flushed log file buffers" << endl;
} else if (key == "q") {
// let the signal handler do the job
raise(SIGTERM);

View File

@ -315,12 +315,14 @@ bool phy_common::is_any_ul_pending_ack()
#define tti_pusch_hi(sf) \
(sf->tti + \
(cell.frame_type == SRSRAN_FDD ? FDD_HARQ_DELAY_UL_MS \
: I_phich ? 7 : k_pusch[sf->tdd_config.sf_config][sf->tti % 10]) + \
: I_phich ? 7 \
: k_pusch[sf->tdd_config.sf_config][sf->tti % 10]) + \
(FDD_HARQ_DELAY_DL_MS - FDD_HARQ_DELAY_UL_MS))
#define tti_pusch_gr(sf) \
(sf->tti + \
(cell.frame_type == SRSRAN_FDD ? FDD_HARQ_DELAY_UL_MS \
: dci->ul_idx == 1 ? 7 : k_pusch[sf->tdd_config.sf_config][sf->tti % 10]) + \
: dci->ul_idx == 1 ? 7 \
: k_pusch[sf->tdd_config.sf_config][sf->tti % 10]) + \
(FDD_HARQ_DELAY_DL_MS - FDD_HARQ_DELAY_UL_MS))
// SF->TTI is at which Format0 dci is received
@ -897,9 +899,12 @@ void phy_common::reset()
reset_radio();
sr.reset();
cur_pathloss = 0;
cur_pusch_power = 0;
last_ri = 0;
{
std::unique_lock<std::mutex> lock(meas_mutex);
cur_pathloss = 0;
cur_pusch_power = 0;
}
last_ri = 0;
// Reset all measurements
reset_measurements(SRSRAN_MAX_CARRIERS);

View File

@ -51,8 +51,11 @@ void intra_measure_lte::init(uint32_t cc_idx, const args_t& args)
void intra_measure_lte::set_primary_cell(uint32_t earfcn, srsran_cell_t cell)
{
{
std::lock_guard<std::mutex> lock(mutex);
serving_cell = cell;
}
current_earfcn = earfcn;
serving_cell = cell;
set_current_sf_len((uint32_t)SRSRAN_SF_LEN_PRB(cell.nof_prb));
}
@ -60,8 +63,14 @@ bool intra_measure_lte::measure_rat(measure_context_t context, std::vector<cf_t>
{
std::set<uint32_t> cells_to_measure = context.active_pci;
srsran_cell_t serving_cell_copy{};
{
std::lock_guard<std::mutex> lock(mutex);
serving_cell_copy = serving_cell;
}
// Detect new cells using PSS/SSS
scell_rx.find_cells(buffer.data(), serving_cell, context.meas_len_ms, cells_to_measure);
scell_rx.find_cells(buffer.data(), serving_cell_copy, context.meas_len_ms, cells_to_measure);
// Initialise empty neighbour cell list
std::vector<phy_meas_t> neighbour_cells = {};
@ -71,10 +80,10 @@ bool intra_measure_lte::measure_rat(measure_context_t context, std::vector<cf_t>
// Use Cell Reference signal to measure cells in the time domain for all known active PCI
for (const uint32_t& id : cells_to_measure) {
// Do not measure serving cell here since it's measured by workers
if (id == serving_cell.id) {
if (id == serving_cell_copy.id) {
continue;
}
srsran_cell_t cell = serving_cell;
srsran_cell_t cell = serving_cell_copy;
cell.id = id;
if (srsran_refsignal_dl_sync_set_cell(&refsignal_dl_sync, cell) < SRSRAN_SUCCESS) {

Some files were not shown because too many files have changed in this diff Show More