Function chCoreGetStatusX() changed to return a memory region object instead of a simple size. Started implementation of a memory heap checker.

git-svn-id: svn://svn.code.sf.net/p/chibios/svn/trunk@15090 27425a3e-05d8-49a3-a47f-9c15f0e5edd8
This commit is contained in:
Giovanni Di Sirio 2021-11-16 13:34:05 +00:00
parent 533d1a4599
commit a5f516e19e
7 changed files with 130 additions and 87 deletions

View File

@ -50,14 +50,15 @@
typedef struct {
/**
* @brief Memory region base.
* @note Zero if not used.
* @note Value -1 is reserved as end-on-array marker.
*/
uint8_t *base;
/**
* @brief Memory region end (inclusive).
* @note Zero if not used.
* @brief Memory region size.
* @note Value 0 represents the whole address space and is only valid
* when @p base is also zero.
*/
uint8_t *end;
size_t size;
} memory_region_t;
/*===========================================================================*/
@ -96,7 +97,7 @@ extern "C" {
*
* @param[in] mrp pointer to an array of valid regions terminated with
* a zero element
* @param[in] base pointer to the area to be checked
* @param[in] p pointer to the area to be checked
* @param[in] size size of the area to be checked
* @return The test result.
* @retval false if the area is entirely contained within one of the
@ -106,12 +107,12 @@ extern "C" {
* @xclass
*/
static inline bool chMemIsAreaWithinX(const memory_region_t *mrp,
const void *base,
const void *p,
size_t size) {
uint8_t *start = (uint8_t *)base;
uint8_t *base = (uint8_t *)p;
return (bool)((start >= mrp->base) && (start <= mrp->end) &&
(size <= (size_t)(mrp->end - start + 1U)));
return (bool)((base >= mrp->base) &&
(size <= (size_t)(mrp->base + mrp->size - base)));
}
#if CH_CFG_USE_MEMCHECKS == FALSE

View File

@ -125,7 +125,7 @@ extern "C" {
void *chCoreAllocFromTopI(size_t size, unsigned align, size_t offset);
void *chCoreAllocFromBase(size_t size, unsigned align, size_t offset);
void *chCoreAllocFromTop(size_t size, unsigned align, size_t offset);
size_t chCoreGetStatusX(void);
void chCoreGetStatusX(memory_region_t *mrp);
#ifdef __cplusplus
}
#endif

View File

@ -80,13 +80,31 @@ typedef union heap_header heap_header_t;
* @brief Memory heap block header.
*/
union heap_header {
/**
* @brief Header for free blocks.
*/
struct {
heap_header_t *next; /**< @brief Next block in free list. */
size_t pages; /**< @brief Size of the area in pages. */
/**
* @brief Next block in free list.
*/
heap_header_t *next;
/**
* @brief Size of the area in pages.
*/
size_t pages;
} free;
/**
* @brief Header for used blocks.
*/
struct {
memory_heap_t *heap; /**< @brief Block owner heap. */
size_t size; /**< @brief Size of the area in bytes. */
/**
* @brief Block owner heap.
*/
memory_heap_t *heap;
/**
* @brief Size of the area in bytes.
*/
size_t size;
} used;
};
@ -94,13 +112,28 @@ union heap_header {
* @brief Structure describing a memory heap.
*/
struct memory_heap {
memgetfunc2_t provider; /**< @brief Memory blocks provider for
this heap. */
heap_header_t header; /**< @brief Free blocks list header. */
/**
* @brief Memory blocks provider for this heap.
*/
memgetfunc2_t provider;
/**
* @brief Memory region for this heap.
*/
memory_region_t region;
/**
* @brief Free blocks list header.
*/
heap_header_t header;
#if (CH_CFG_USE_MUTEXES == TRUE) || defined(__DOXYGEN__)
mutex_t mtx; /**< @brief Heap access mutex. */
/**
* @brief Heap access mutex.
*/
mutex_t mtx;
#else
semaphore_t sem; /**< @brief Heap access semaphore. */
/**
* @brief Heap access fallback semaphore.
*/
semaphore_t sem;
#endif
};

View File

@ -40,20 +40,22 @@
* @brief Default writable memory regions.
* @details By default all memory is writable, user must provide its own
* writable regions array for the device in use.
* @note The array is terminated by an end marker (base=-1).
*/
CC_WEAK memory_region_t __ch_mem_writable_regions[] = {
{(uint8_t *)0, (uint8_t *)-1},
{(uint8_t *)0, (uint8_t *)0},
{(uint8_t *)0, 0U}, /* Whole space is writable. */
{(uint8_t *)-1, 0U},
};
/**
* @brief Default readable memory regions.
* @details By default all memory is readable, user must provide its own
* readable regions array for the device in use.
* @note The array is terminated by an end marker (base=-1).
*/
CC_WEAK memory_region_t __ch_mem_readable_regions[] = {
{(uint8_t *)0, (uint8_t *)-1},
{(uint8_t *)0, (uint8_t *)0},
{(uint8_t *)0, 0U}, /* Whole space is readable. */
{(uint8_t *)-1, 0U},
};
/*===========================================================================*/
@ -94,7 +96,7 @@ bool chMemIsAreaContainedX(const memory_region_t regions[],
chDbgCheck(base != NULL);
/* Scanning the array of the valid regions for a mismatch.*/
while (mrp->base != mrp->end) {
while (mrp->base != (uint8_t *)-1) {
if (chMemIsAreaWithinX(mrp, base, size)) {
return true;
}
@ -111,8 +113,8 @@ bool chMemIsAreaContainedX(const memory_region_t regions[],
* @note This function is only effective if @p CH_CFG_SYS_WRITABLE_REGIONS
* is defined, if it is not defined then just the alignment of
* the pointer is checked.
* @note @p CH_CFG_SYS_WRITABLE_REGIONS must be the name of a global
* @p memory_region_t array terminated with a zero element.
* @note @p __ch_mem_writable_regions must be the name of a global
* @p memory_region_t array terminated with an end marker (-1, 0).
*
* @param[in] p pointer to be checked
* @param[in] align required pointer alignment to be checked, must be
@ -144,8 +146,8 @@ bool chMemIsAreaWritableX(const void *p,
* @note This function is only effective if @p CH_CFG_SYS_READABLE_REGIONS
* is defined, if it is not defined then just the alignment of
* the pointer is checked.
* @note @p CH_CFG_SYS_READABLE_REGIONS must be the name of a global
* @p memory_region_t array terminated with a zero element.
* @note @p __ch_mem_readable_regions must be the name of a global
* @p memory_region_t array terminated with an end marker (-1, 0).
*
* @param[in] p pointer to be checked
* @param[in] align required pointer alignment to be checked, must be

View File

@ -212,14 +212,15 @@ void *chCoreAllocFromTop(size_t size, unsigned align, size_t offset) {
/**
* @brief Core memory status.
*
* @return The size, in bytes, of the free core memory.
* @param[in] mrp Memory region representing available core space.
*
* @xclass
*/
size_t chCoreGetStatusX(void) {
void chCoreGetStatusX(memory_region_t *mrp) {
mrp->base = ch_memcore.basemem;
/*lint -save -e9033 [10.8] The cast is safe.*/
return (size_t)(ch_memcore.topmem - ch_memcore.basemem);
mrp->size = (size_t)(ch_memcore.topmem - ch_memcore.basemem);
/*lint -restore*/
}
#endif /* CH_CFG_USE_MEMCORE == TRUE */

View File

@ -56,15 +56,17 @@
#define H_BLOCK(hp) ((hp) + 1U)
#define H_LIMIT(hp) (H_BLOCK(hp) + H_PAGES(hp))
#define H_FREE_PAGES(hp) ((hp)->free.pages)
#define H_NEXT(hp) ((hp)->free.next)
#define H_FREE_NEXT(hp) ((hp)->free.next)
#define H_PAGES(hp) ((hp)->free.pages)
#define H_FREE_FULLSIZE(hp) (size_t)(((hp)->free.pages + 1U) * sizeof (heap_header_t))
#define H_HEAP(hp) ((hp)->used.heap)
#define H_FREE_LIMIT(hp) (H_BLOCK(hp) + H_FREE_PAGES(hp))
#define H_SIZE(hp) ((hp)->used.size)
#define H_USED_HEAP(hp) ((hp)->used.heap)
#define H_USED_SIZE(hp) ((hp)->used.size)
/*
* Number of pages between two pointers in a MISRA-compatible way.
@ -107,8 +109,9 @@ static memory_heap_t default_heap;
void __heap_init(void) {
default_heap.provider = chCoreAllocAlignedWithOffset;
H_NEXT(&default_heap.header) = NULL;
H_PAGES(&default_heap.header) = 0;
chCoreGetStatusX(&default_heap.region);
H_FREE_NEXT(&default_heap.header) = NULL;
H_FREE_PAGES(&default_heap.header) = 0;
#if (CH_CFG_USE_MUTEXES == TRUE) || defined(__DOXYGEN__)
chMtxObjectInit(&default_heap.mtx);
#else
@ -141,10 +144,12 @@ void chHeapObjectInit(memory_heap_t *heapp, void *buf, size_t size) {
/* Initializing the heap header.*/
heapp->provider = NULL;
H_NEXT(&heapp->header) = hp;
H_PAGES(&heapp->header) = 0;
H_NEXT(hp) = NULL;
H_PAGES(hp) = (size - sizeof (heap_header_t)) / CH_HEAP_ALIGNMENT;
H_FREE_NEXT(&heapp->header) = hp;
H_FREE_PAGES(&heapp->header) = 0;
H_FREE_NEXT(hp) = NULL;
H_FREE_PAGES(hp) = (size - sizeof (heap_header_t)) / CH_HEAP_ALIGNMENT;
heapp->region.base = (uint8_t *)(void *)hp;
heapp->region.size = H_FREE_FULLSIZE(hp);
#if (CH_CFG_USE_MUTEXES == TRUE) || defined(__DOXYGEN__)
chMtxObjectInit(&heapp->mtx);
#else
@ -193,15 +198,15 @@ void *chHeapAllocAligned(memory_heap_t *heapp, size_t size, unsigned align) {
/* Start of the free blocks list.*/
qp = &heapp->header;
while (H_NEXT(qp) != NULL) {
while (H_FREE_NEXT(qp) != NULL) {
/* Next free block.*/
hp = H_NEXT(qp);
hp = H_FREE_NEXT(qp);
/* Pointer aligned to the requested alignment.*/
ahp = (heap_header_t *)MEM_ALIGN_NEXT(H_BLOCK(hp), align) - 1U;
if ((ahp < H_LIMIT(hp)) && (pages <= NPAGES(H_LIMIT(hp), ahp + 1U))) {
if ((ahp < H_FREE_LIMIT(hp)) && (pages <= NPAGES(H_FREE_LIMIT(hp), ahp + 1U))) {
/* The block is large enough to contain a correctly aligned area
of sufficient size.*/
@ -209,19 +214,19 @@ void *chHeapAllocAligned(memory_heap_t *heapp, size_t size, unsigned align) {
/* The block is not properly aligned, must split it.*/
size_t bpages;
bpages = NPAGES(H_LIMIT(hp), H_BLOCK(ahp));
H_PAGES(hp) = NPAGES(ahp, H_BLOCK(hp));
bpages = NPAGES(H_FREE_LIMIT(hp), H_BLOCK(ahp));
H_FREE_PAGES(hp) = NPAGES(ahp, H_BLOCK(hp));
if (bpages > pages) {
/* The block is bigger than required, must split the excess.*/
heap_header_t *fp;
/* Creating the excess block.*/
fp = H_BLOCK(ahp) + pages;
H_PAGES(fp) = (bpages - pages) - 1U;
H_FREE_PAGES(fp) = (bpages - pages) - 1U;
/* Linking the excess block.*/
H_NEXT(fp) = H_NEXT(hp);
H_NEXT(hp) = fp;
H_FREE_NEXT(fp) = H_FREE_NEXT(hp);
H_FREE_NEXT(hp) = fp;
}
hp = ahp;
@ -229,24 +234,24 @@ void *chHeapAllocAligned(memory_heap_t *heapp, size_t size, unsigned align) {
else {
/* The block is already properly aligned.*/
if (H_PAGES(hp) == pages) {
if (H_FREE_PAGES(hp) == pages) {
/* Exact size, getting the whole block.*/
H_NEXT(qp) = H_NEXT(hp);
H_FREE_NEXT(qp) = H_FREE_NEXT(hp);
}
else {
/* The block is bigger than required, must split the excess.*/
heap_header_t *fp;
fp = H_BLOCK(hp) + pages;
H_NEXT(fp) = H_NEXT(hp);
H_PAGES(fp) = NPAGES(H_LIMIT(hp), H_BLOCK(fp));
H_NEXT(qp) = fp;
H_FREE_NEXT(fp) = H_FREE_NEXT(hp);
H_FREE_PAGES(fp) = NPAGES(H_FREE_LIMIT(hp), H_BLOCK(fp));
H_FREE_NEXT(qp) = fp;
}
}
/* Setting in the block owner heap and size.*/
H_SIZE(hp) = size;
H_HEAP(hp) = heapp;
H_USED_SIZE(hp) = size;
H_USED_HEAP(hp) = heapp;
/* Releasing heap mutex.*/
H_UNLOCK(heapp);
@ -271,8 +276,8 @@ void *chHeapAllocAligned(memory_heap_t *heapp, size_t size, unsigned align) {
sizeof (heap_header_t));
if (ahp != NULL) {
hp = ahp - 1U;
H_HEAP(hp) = heapp;
H_SIZE(hp) = size;
H_USED_HEAP(hp) = heapp;
H_USED_SIZE(hp) = size;
/*lint -save -e9087 [11.3] Safe cast.*/
return (void *)ahp;
@ -299,38 +304,38 @@ void chHeapFree(void *p) {
/*lint -save -e9087 [11.3] Safe cast.*/
hp = (heap_header_t *)p - 1U;
/*lint -restore*/
heapp = H_HEAP(hp);
heapp = H_USED_HEAP(hp);
qp = &heapp->header;
/* Size is converted in number of elementary allocation units.*/
H_PAGES(hp) = MEM_ALIGN_NEXT(H_SIZE(hp),
H_FREE_PAGES(hp) = MEM_ALIGN_NEXT(H_USED_SIZE(hp),
CH_HEAP_ALIGNMENT) / CH_HEAP_ALIGNMENT;
/* Taking heap mutex.*/
H_LOCK(heapp);
while (true) {
chDbgAssert((hp < qp) || (hp >= H_LIMIT(qp)), "within free block");
chDbgAssert((hp < qp) || (hp >= H_FREE_LIMIT(qp)), "within free block");
if (((qp == &heapp->header) || (hp > qp)) &&
((H_NEXT(qp) == NULL) || (hp < H_NEXT(qp)))) {
((H_FREE_NEXT(qp) == NULL) || (hp < H_FREE_NEXT(qp)))) {
/* Insertion after qp.*/
H_NEXT(hp) = H_NEXT(qp);
H_NEXT(qp) = hp;
H_FREE_NEXT(hp) = H_FREE_NEXT(qp);
H_FREE_NEXT(qp) = hp;
/* Verifies if the newly inserted block should be merged.*/
if (H_LIMIT(hp) == H_NEXT(hp)) {
if (H_FREE_LIMIT(hp) == H_FREE_NEXT(hp)) {
/* Merge with the next block.*/
H_PAGES(hp) += H_PAGES(H_NEXT(hp)) + 1U;
H_NEXT(hp) = H_NEXT(H_NEXT(hp));
H_FREE_PAGES(hp) += H_FREE_PAGES(H_FREE_NEXT(hp)) + 1U;
H_FREE_NEXT(hp) = H_FREE_NEXT(H_FREE_NEXT(hp));
}
if ((H_LIMIT(qp) == hp)) {
if ((H_FREE_LIMIT(qp) == hp)) {
/* Merge with the previous block.*/
H_PAGES(qp) += H_PAGES(hp) + 1U;
H_NEXT(qp) = H_NEXT(hp);
H_FREE_PAGES(qp) += H_FREE_PAGES(hp) + 1U;
H_FREE_NEXT(qp) = H_FREE_NEXT(hp);
}
break;
}
qp = H_NEXT(qp);
qp = H_FREE_NEXT(qp);
}
/* Releasing heap mutex.*/
@ -367,8 +372,8 @@ size_t chHeapStatus(memory_heap_t *heapp, size_t *totalp, size_t *largestp) {
lpages = 0U;
n = 0U;
qp = &heapp->header;
while (H_NEXT(qp) != NULL) {
size_t pages = H_PAGES(H_NEXT(qp));
while (H_FREE_NEXT(qp) != NULL) {
size_t pages = H_FREE_PAGES(H_FREE_NEXT(qp));
/* Updating counters.*/
n++;
@ -377,7 +382,7 @@ size_t chHeapStatus(memory_heap_t *heapp, size_t *totalp, size_t *largestp) {
lpages = pages;
}
qp = H_NEXT(qp);
qp = H_FREE_NEXT(qp);
}
/* Writing out fragmented free memory.*/
@ -394,8 +399,6 @@ size_t chHeapStatus(memory_heap_t *heapp, size_t *totalp, size_t *largestp) {
return n;
}
#define isvalidramexcl(p, a) true
#define isvalidramincl(p, a) true
#define isvalidfunction(p) true
/**
@ -428,11 +431,12 @@ bool chHeapIntegrityCheck(memory_heap_t *heapp) {
H_LOCK(heapp);
hp = &heapp->header;
while ((hp = H_NEXT(hp)) != NULL) {
while ((hp = H_FREE_NEXT(hp)) != NULL) {
/* Validating the area pointers.*/
if (!isvalidramexcl(hp, CH_HEAP_ALIGNMENT) ||
!isvalidramincl(H_LIMIT(hp), CH_HEAP_ALIGNMENT)) {
/* Validating the found free block.*/
if (!chMemIsAreaWithinX(&heapp->region,
(void *)hp,
H_FREE_FULLSIZE(hp))) {
result = true;
break;
}

View File

@ -74,6 +74,8 @@
*****************************************************************************
*** Next ***
- NEW: Function chCoreGetStatusX() changed to return a memory region object
instead of a simple size.
- NEW: RT and NIL upgraded to support the enhanced OSLIB.
- NEW: Memory areas/pointers checker functions added to OSLIB.
- NEW: STM32G0B1 USBv2 driver.