implementation of custom allocator for memory caching. This allocator may be used for unbounded queues

This commit is contained in:
Francisco 2021-05-05 20:52:05 +01:00 committed by Francisco Paisana
parent f0d2a22cb6
commit 28847badcf
3 changed files with 216 additions and 0 deletions

View File

@ -0,0 +1,109 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#ifndef SRSRAN_CACHED_ALLOC_H
#define SRSRAN_CACHED_ALLOC_H
#include "../intrusive_list.h"
#include "memblock_cache.h"
#include <deque>
#include <queue>
namespace srsran {
template <typename T>
class cached_alloc : public std::allocator<T>
{
struct memblock_t : public intrusive_double_linked_list_element<> {
memblock_t(size_t sz) : block_size(sz) {}
size_t block_size;
};
const size_t min_n = (sizeof(memblock_t) + sizeof(T) - 1) / sizeof(T);
public:
using value_type = T;
~cached_alloc()
{
while (not free_list.empty()) {
memblock_t& b = free_list.front();
free_list.pop_front();
size_t n = b.block_size;
b.~memblock_t();
std::allocator<T>::deallocate(reinterpret_cast<T*>(&b), n);
}
}
cached_alloc() = default;
cached_alloc(cached_alloc<T>&& other) noexcept = default;
cached_alloc(const cached_alloc<T>& other) noexcept : cached_alloc() {}
template <typename U>
explicit cached_alloc(const cached_alloc<U>& other) noexcept : cached_alloc()
{
// start empty, as cached blocks cannot be copied
}
cached_alloc& operator=(const cached_alloc<T>& other) noexcept { return *this; }
cached_alloc& operator=(cached_alloc&& other) noexcept = default;
T* allocate(size_t n, const void* ptr = nullptr)
{
size_t req_n = std::max(n, min_n);
for (memblock_t& b : free_list) {
if (b.block_size == req_n) {
free_list.pop(&b);
b.~memblock_t();
return reinterpret_cast<T*>(&b);
}
}
return std::allocator<T>::allocate(req_n, ptr);
}
void deallocate(T* p, size_t n) noexcept
{
size_t req_n = std::max(n, min_n);
auto* block = reinterpret_cast<memblock_t*>(p);
new (block) memblock_t(req_n);
free_list.push_front(block);
}
template <typename U>
struct rebind {
using other = cached_alloc<U>;
};
private:
intrusive_double_linked_list<memblock_t> free_list;
};
} // namespace srsran
template <typename T1, typename T2>
bool operator==(const srsran::cached_alloc<T1>& lhs, const srsran::cached_alloc<T2>& rhs) noexcept
{
return &lhs == &rhs;
}
template <typename T1, typename T2>
bool operator!=(const srsran::cached_alloc<T1>& lhs, const srsran::cached_alloc<T2>& rhs) noexcept
{
return not(lhs == rhs);
}
namespace srsran {
template <typename T>
using deque = std::deque<T, cached_alloc<T> >;
template <typename T>
using queue = std::queue<T, srsran::deque<T> >;
} // namespace srsran
#endif // SRSRAN_CACHED_ALLOC_H

View File

@ -57,3 +57,7 @@ add_test(fsm_test fsm_test)
add_executable(optional_test optional_test.cc)
target_link_libraries(optional_test srsran_common)
add_test(optional_test optional_test)
add_executable(cached_alloc_test cached_alloc_test.cc)
target_link_libraries(cached_alloc_test srsran_common)
add_test(cached_alloc_test cached_alloc_test)

View File

@ -0,0 +1,103 @@
/**
*
* \section COPYRIGHT
*
* Copyright 2013-2021 Software Radio Systems Limited
*
* By using this file, you agree to the terms and conditions set
* forth in the LICENSE file which can be found at the top level of
* the distribution.
*
*/
#include "srsran/adt/pool/cached_alloc.h"
#include "srsran/common/test_common.h"
#include <chrono>
void test_cached_deque_basic_operations()
{
srsran::deque<int> my_deque;
TESTASSERT(my_deque.empty() and my_deque.size() == 0);
my_deque.push_front(0);
my_deque.push_back(1);
TESTASSERT(my_deque.front() == 0 and my_deque.back() == 1);
TESTASSERT(my_deque.size() == 2);
srsran::deque<int> my_deque2(my_deque);
TESTASSERT(my_deque == my_deque2);
my_deque.clear();
TESTASSERT(my_deque != my_deque2);
TESTASSERT(my_deque2.size() == 2 and my_deque2.back() == 1);
TESTASSERT(my_deque.empty());
my_deque = my_deque2;
TESTASSERT(my_deque == my_deque2);
my_deque2.clear();
TESTASSERT(my_deque2.empty());
my_deque2 = std::move(my_deque);
TESTASSERT(my_deque.empty() and my_deque2.size() == 2);
}
struct C {
C() = default;
C(C&&) noexcept = default;
C(const C&) = delete;
C& operator=(C&&) noexcept = default;
C& operator=(const C&) = delete;
bool operator==(const C& other) { return true; }
};
void test_cached_queue_basic_operations()
{
srsran::queue<C> my_queue;
TESTASSERT(my_queue.empty());
my_queue.push(C{});
TESTASSERT(my_queue.size() == 1);
srsran::queue<C> my_queue2(std::move(my_queue));
TESTASSERT(my_queue2.size() == 1);
}
void cached_deque_benchmark()
{
using std::chrono::high_resolution_clock;
using std::chrono::microseconds;
srsran::deque<int> my_deque;
std::deque<int> std_deque;
high_resolution_clock::time_point tp;
size_t N = 10000000, n_elems = 10;
for (size_t i = 0; i < n_elems; ++i) {
my_deque.push_back(i);
std_deque.push_back(i);
}
// NOTE: this benchmark doesnt account for when memory is fragmented
tp = high_resolution_clock::now();
for (size_t i = n_elems; i < N; ++i) {
std_deque.push_back(i);
std_deque.pop_front();
}
microseconds t_std = std::chrono::duration_cast<microseconds>(high_resolution_clock::now() - tp);
tp = high_resolution_clock::now();
for (size_t i = n_elems; i < N; ++i) {
my_deque.push_back(i);
my_deque.pop_front();
}
microseconds t_cached = std::chrono::duration_cast<microseconds>(high_resolution_clock::now() - tp);
fmt::print("Time elapsed: cached alloc={} usec, std alloc={} usec", t_cached.count(), t_std.count());
}
int main()
{
test_cached_deque_basic_operations();
test_cached_queue_basic_operations();
cached_deque_benchmark();
return 0;
}