2021-01-08 06:59:32 -08:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* \section COPYRIGHT
|
|
|
|
*
|
|
|
|
* Copyright 2013-2020 Software Radio Systems Limited
|
|
|
|
*
|
|
|
|
* By using this file, you agree to the terms and conditions set
|
|
|
|
* forth in the LICENSE file which can be found at the top level of
|
|
|
|
* the distribution.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef SRSLTE_MEM_POOL_H
|
|
|
|
#define SRSLTE_MEM_POOL_H
|
|
|
|
|
2021-01-10 08:40:56 -08:00
|
|
|
#include <cstdint>
|
|
|
|
#include <memory>
|
|
|
|
#include <mutex>
|
|
|
|
|
2021-01-08 06:59:32 -08:00
|
|
|
namespace srslte {
|
|
|
|
|
|
|
|
/// Stores provided mem blocks in a stack in an non-owning manner. Not thread-safe
|
|
|
|
class memblock_stack
|
|
|
|
{
|
2021-01-10 08:40:56 -08:00
|
|
|
struct node {
|
|
|
|
node* prev;
|
|
|
|
explicit node(node* prev_) : prev(prev_) {}
|
|
|
|
};
|
|
|
|
|
2021-01-08 06:59:32 -08:00
|
|
|
public:
|
2021-01-10 10:31:13 -08:00
|
|
|
constexpr static size_t min_memblock_size() { return sizeof(node); }
|
2021-01-10 08:40:56 -08:00
|
|
|
|
2021-01-08 06:59:32 -08:00
|
|
|
memblock_stack() = default;
|
|
|
|
|
|
|
|
memblock_stack(const memblock_stack&) = delete;
|
|
|
|
|
|
|
|
memblock_stack(memblock_stack&& other) noexcept : head(other.head) { other.head = nullptr; }
|
|
|
|
|
|
|
|
memblock_stack& operator=(const memblock_stack&) = delete;
|
|
|
|
|
|
|
|
memblock_stack& operator=(memblock_stack&& other) noexcept
|
|
|
|
{
|
|
|
|
head = other.head;
|
|
|
|
other.head = nullptr;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
void push(uint8_t* block) noexcept
|
|
|
|
{
|
|
|
|
// printf("head: %ld\n", (long)head);
|
|
|
|
node* next = ::new (block) node(head);
|
|
|
|
head = next;
|
2021-01-08 07:21:55 -08:00
|
|
|
count++;
|
2021-01-08 06:59:32 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t* try_pop() noexcept
|
|
|
|
{
|
|
|
|
if (is_empty()) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
node* last_head = head;
|
|
|
|
head = head->prev;
|
2021-01-08 07:21:55 -08:00
|
|
|
count--;
|
2021-01-08 06:59:32 -08:00
|
|
|
return (uint8_t*)last_head;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool is_empty() const { return head == nullptr; }
|
|
|
|
|
2021-01-08 07:21:55 -08:00
|
|
|
size_t size() const { return count; }
|
|
|
|
|
2021-01-08 06:59:32 -08:00
|
|
|
void clear() { head = nullptr; }
|
|
|
|
|
|
|
|
private:
|
2021-01-08 07:21:55 -08:00
|
|
|
node* head = nullptr;
|
|
|
|
size_t count = 0;
|
2021-01-08 06:59:32 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
/// memblock stack that mutexes pushing/popping
|
|
|
|
class mutexed_memblock_stack
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
mutexed_memblock_stack() = default;
|
|
|
|
|
|
|
|
mutexed_memblock_stack(const mutexed_memblock_stack&) = delete;
|
|
|
|
|
|
|
|
mutexed_memblock_stack(mutexed_memblock_stack&& other) noexcept
|
|
|
|
{
|
|
|
|
std::unique_lock<std::mutex> lk1(other.mutex, std::defer_lock);
|
|
|
|
std::unique_lock<std::mutex> lk2(mutex, std::defer_lock);
|
|
|
|
std::lock(lk1, lk2);
|
|
|
|
stack = std::move(other.stack);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutexed_memblock_stack& operator=(const mutexed_memblock_stack&) = delete;
|
|
|
|
|
|
|
|
mutexed_memblock_stack& operator=(mutexed_memblock_stack&& other) noexcept
|
|
|
|
{
|
|
|
|
std::unique_lock<std::mutex> lk1(other.mutex, std::defer_lock);
|
|
|
|
std::unique_lock<std::mutex> lk2(mutex, std::defer_lock);
|
|
|
|
std::lock(lk1, lk2);
|
|
|
|
stack = std::move(other.stack);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
void push(uint8_t* block) noexcept
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
stack.push(block);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t* try_pop() noexcept
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
uint8_t* block = stack.try_pop();
|
|
|
|
return block;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool is_empty() const noexcept { return stack.is_empty(); }
|
|
|
|
|
2021-01-08 10:25:03 -08:00
|
|
|
size_t size() const noexcept
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
return stack.size();
|
|
|
|
}
|
|
|
|
|
2021-01-08 06:59:32 -08:00
|
|
|
void clear()
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
stack.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2021-01-08 10:25:03 -08:00
|
|
|
memblock_stack stack;
|
|
|
|
mutable std::mutex mutex;
|
2021-01-08 06:59:32 -08:00
|
|
|
};
|
|
|
|
|
2021-01-08 10:25:03 -08:00
|
|
|
/**
|
2021-01-10 10:31:13 -08:00
|
|
|
* Pool specialized for big objects. Created objects are of same time, and are not contiguous in memory.
|
|
|
|
* Memory management of created objects is automatically handled. Relevant methods:
|
2021-01-08 10:25:03 -08:00
|
|
|
* - ::make(...) - create an object whose memory is automatically managed by the pool. The object dtor returns the
|
|
|
|
* allocated memory back to the pool
|
|
|
|
* - ::reserve(N) - prereserve memory slots for faster object creation
|
|
|
|
* @tparam T object type
|
2021-01-10 10:31:13 -08:00
|
|
|
* @tparam ThreadSafe if object pool is thread-safe or not
|
2021-01-08 10:25:03 -08:00
|
|
|
*/
|
2021-01-10 08:40:56 -08:00
|
|
|
template <typename T, bool ThreadSafe = false>
|
2021-01-08 10:25:03 -08:00
|
|
|
class obj_pool
|
2021-01-08 06:59:32 -08:00
|
|
|
{
|
|
|
|
/// single-thread obj pool deleter
|
|
|
|
struct obj_deleter {
|
2021-01-08 10:25:03 -08:00
|
|
|
explicit obj_deleter(obj_pool<T, ThreadSafe>* pool_) : pool(pool_) {}
|
2021-01-10 08:40:56 -08:00
|
|
|
void operator()(void* block)
|
|
|
|
{
|
|
|
|
static_cast<T*>(block)->~T();
|
|
|
|
pool->stack.push(static_cast<uint8_t*>(block));
|
|
|
|
}
|
2021-01-08 10:25:03 -08:00
|
|
|
obj_pool<T, ThreadSafe>* pool;
|
2021-01-08 06:59:32 -08:00
|
|
|
};
|
2021-01-08 10:25:03 -08:00
|
|
|
|
|
|
|
// memory stack type derivation (thread safe or not)
|
|
|
|
using stack_type = typename std::conditional<ThreadSafe, mutexed_memblock_stack, memblock_stack>::type;
|
|
|
|
|
|
|
|
// memory stack to cache allocate memory chunks
|
|
|
|
stack_type stack;
|
|
|
|
|
|
|
|
public:
|
2021-01-08 06:59:32 -08:00
|
|
|
using obj_ptr = std::unique_ptr<T, obj_deleter>;
|
|
|
|
|
2021-01-08 10:25:03 -08:00
|
|
|
~obj_pool()
|
2021-01-08 07:21:55 -08:00
|
|
|
{
|
|
|
|
uint8_t* block = stack.try_pop();
|
|
|
|
while (block != nullptr) {
|
|
|
|
delete[] block;
|
|
|
|
block = stack.try_pop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-08 10:25:03 -08:00
|
|
|
/// create new object with given arguments. If no memory is pre-reserved in the pool, malloc is called.
|
2021-01-08 06:59:32 -08:00
|
|
|
template <typename... Args>
|
|
|
|
obj_ptr make(Args&&... args)
|
|
|
|
{
|
2021-01-10 10:31:13 -08:00
|
|
|
uint8_t* block = allocate_node();
|
2021-01-08 06:59:32 -08:00
|
|
|
new (block) T(std::forward<Args>(args)...);
|
|
|
|
return obj_ptr(reinterpret_cast<T*>(block), obj_deleter(this));
|
|
|
|
}
|
|
|
|
|
2021-01-08 10:25:03 -08:00
|
|
|
/// Pre-reserve N memory chunks for future object allocations
|
2021-01-08 06:59:32 -08:00
|
|
|
void reserve(size_t N)
|
|
|
|
{
|
2021-01-10 10:31:13 -08:00
|
|
|
static const size_t blocksize = std::max(sizeof(T), memblock_stack::min_memblock_size());
|
2021-01-08 06:59:32 -08:00
|
|
|
for (size_t i = 0; i < N; ++i) {
|
2021-01-10 10:31:13 -08:00
|
|
|
stack.push(new uint8_t[blocksize]);
|
2021-01-08 06:59:32 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-08 07:21:55 -08:00
|
|
|
size_t capacity() const { return stack.size(); }
|
2021-01-10 10:31:13 -08:00
|
|
|
|
|
|
|
private:
|
|
|
|
uint8_t* allocate_node()
|
|
|
|
{
|
|
|
|
static const size_t blocksize = std::max(sizeof(T), memblock_stack::min_memblock_size());
|
|
|
|
uint8_t* block = stack.try_pop();
|
|
|
|
if (block == nullptr) {
|
|
|
|
block = new uint8_t[blocksize];
|
|
|
|
}
|
|
|
|
return block;
|
|
|
|
}
|
2021-01-08 06:59:32 -08:00
|
|
|
};
|
2021-01-10 08:40:56 -08:00
|
|
|
template <typename T>
|
|
|
|
using mutexed_pool_obj = obj_pool<T, true>;
|
|
|
|
|
2021-01-08 06:59:32 -08:00
|
|
|
template <typename T>
|
2021-01-08 10:25:03 -08:00
|
|
|
using unique_pool_obj = typename obj_pool<T, false>::obj_ptr;
|
|
|
|
template <typename T>
|
|
|
|
using unique_mutexed_pool_obj = typename obj_pool<T, true>::obj_ptr;
|
2021-01-08 06:59:32 -08:00
|
|
|
|
|
|
|
} // namespace srslte
|
|
|
|
|
|
|
|
#endif // SRSLTE_MEM_POOL_H
|