Fix atomic in unittests (#4240)
Add unittest support for ATOMIC_BLOCK - BASEPRI emulation on UNIT_TEST target - documentation cleanup / fixed missspelings - ATOMIC_BARRIER implemented for CLang, using blocks - ATOMIC_BARRIER is using macros to separate barriers gcc specific unittest for ATOMIC_BARRIER ATOMIC_BARRIER clang unittest quick hack to enable CLangs -flocks + ATOMIC_BARRIER tests Needs cleanup * Enable test * Add libBlocksRuntime dependency to Travis
This commit is contained in:
parent
9664ba13ff
commit
f564cdf952
|
@ -32,6 +32,7 @@ addons:
|
||||||
- git
|
- git
|
||||||
- libc6-i386
|
- libc6-i386
|
||||||
- time
|
- time
|
||||||
|
- libblocksruntime-dev
|
||||||
|
|
||||||
# We use cpp for unit tests, and c for the main project.
|
# We use cpp for unit tests, and c for the main project.
|
||||||
language: cpp
|
language: cpp
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "atomic.h"
|
||||||
|
|
||||||
|
#if defined(UNIT_TEST)
|
||||||
|
uint8_t atomic_BASEPRI;
|
||||||
|
#endif
|
|
@ -17,20 +17,26 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
// only set_BASEPRI is implemented in device library. It does always create memory barrirer
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#if !defined(UNIT_TEST)
|
||||||
|
// BASEPRI manipulation functions
|
||||||
|
// only set_BASEPRI is implemented in device library. It does always create memory barrier
|
||||||
// missing versions are implemented here
|
// missing versions are implemented here
|
||||||
|
|
||||||
// set BASEPRI and BASEPRI_MAX register, but do not create memory barrier
|
// set BASEPRI register, do not create memory barrier
|
||||||
__attribute__( ( always_inline ) ) static inline void __set_BASEPRI_nb(uint32_t basePri)
|
__attribute__( ( always_inline ) ) static inline void __set_BASEPRI_nb(uint32_t basePri)
|
||||||
{
|
{
|
||||||
__ASM volatile ("\tMSR basepri, %0\n" : : "r" (basePri) );
|
__ASM volatile ("\tMSR basepri, %0\n" : : "r" (basePri) );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set BASEPRI_MAX register, do not create memory barrier
|
||||||
__attribute__( ( always_inline ) ) static inline void __set_BASEPRI_MAX_nb(uint32_t basePri)
|
__attribute__( ( always_inline ) ) static inline void __set_BASEPRI_MAX_nb(uint32_t basePri)
|
||||||
{
|
{
|
||||||
__ASM volatile ("\tMSR basepri_max, %0\n" : : "r" (basePri) );
|
__ASM volatile ("\tMSR basepri_max, %0\n" : : "r" (basePri) );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set BASEPRI_MAX register, with memory barrier
|
||||||
# if !defined(STM32F3) && !defined(STM32F4) && !defined(STM32F7) /* already defined in /lib/main/CMSIS/CM4/CoreSupport/core_cmFunc.h for F4 targets */
|
# if !defined(STM32F3) && !defined(STM32F4) && !defined(STM32F7) /* already defined in /lib/main/CMSIS/CM4/CoreSupport/core_cmFunc.h for F4 targets */
|
||||||
__attribute__( ( always_inline ) ) static inline void __set_BASEPRI_MAX(uint32_t basePri)
|
__attribute__( ( always_inline ) ) static inline void __set_BASEPRI_MAX(uint32_t basePri)
|
||||||
{
|
{
|
||||||
|
@ -38,51 +44,102 @@ __attribute__( ( always_inline ) ) static inline void __set_BASEPRI_MAX(uint32_t
|
||||||
}
|
}
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
// cleanup BASEPRI restore function, with global memory barrier
|
#endif
|
||||||
|
|
||||||
|
#if defined(UNIT_TEST)
|
||||||
|
// atomic related functions for unittest.
|
||||||
|
|
||||||
|
extern uint8_t atomic_BASEPRI;
|
||||||
|
|
||||||
|
static inline uint8_t __get_BASEPRI(void)
|
||||||
|
{
|
||||||
|
return atomic_BASEPRI;
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore BASEPRI (called as cleanup function), with global memory barrier
|
||||||
|
static inline void __basepriRestoreMem(uint8_t *val)
|
||||||
|
{
|
||||||
|
atomic_BASEPRI = *val;
|
||||||
|
asm volatile ("": : :"memory"); // compiler memory barrier
|
||||||
|
}
|
||||||
|
|
||||||
|
// increase BASEPRI, with global memory barrier, returns true
|
||||||
|
static inline uint8_t __basepriSetMemRetVal(uint8_t prio)
|
||||||
|
{
|
||||||
|
if(prio && (atomic_BASEPRI == 0 || atomic_BASEPRI > prio)) {
|
||||||
|
atomic_BASEPRI = prio;
|
||||||
|
}
|
||||||
|
asm volatile ("": : :"memory"); // compiler memory barrier
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore BASEPRI (called as cleanup function), no memory barrier
|
||||||
|
static inline void __basepriRestore(uint8_t *val)
|
||||||
|
{
|
||||||
|
atomic_BASEPRI = *val;
|
||||||
|
}
|
||||||
|
|
||||||
|
// increase BASEPRI, no memory barrier, returns true
|
||||||
|
static inline uint8_t __basepriSetRetVal(uint8_t prio)
|
||||||
|
{
|
||||||
|
if(prio && (atomic_BASEPRI == 0 || atomic_BASEPRI > prio)) {
|
||||||
|
atomic_BASEPRI = prio;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
// ARM BASEPRI manipulation
|
||||||
|
|
||||||
|
// restore BASEPRI (called as cleanup function), with global memory barrier
|
||||||
static inline void __basepriRestoreMem(uint8_t *val)
|
static inline void __basepriRestoreMem(uint8_t *val)
|
||||||
{
|
{
|
||||||
__set_BASEPRI(*val);
|
__set_BASEPRI(*val);
|
||||||
}
|
}
|
||||||
|
|
||||||
// set BASEPRI_MAX function, with global memory barrier, returns true
|
// set BASEPRI_MAX, with global memory barrier, returns true
|
||||||
static inline uint8_t __basepriSetMemRetVal(uint8_t prio)
|
static inline uint8_t __basepriSetMemRetVal(uint8_t prio)
|
||||||
{
|
{
|
||||||
__set_BASEPRI_MAX(prio);
|
__set_BASEPRI_MAX(prio);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanup BASEPRI restore function, no memory barrier
|
// restore BASEPRI (called as cleanup function), no memory barrier
|
||||||
static inline void __basepriRestore(uint8_t *val)
|
static inline void __basepriRestore(uint8_t *val)
|
||||||
{
|
{
|
||||||
__set_BASEPRI_nb(*val);
|
__set_BASEPRI_nb(*val);
|
||||||
}
|
}
|
||||||
|
|
||||||
// set BASEPRI_MAX function, no memory barrier, returns true
|
// set BASEPRI_MAX, no memory barrier, returns true
|
||||||
static inline uint8_t __basepriSetRetVal(uint8_t prio)
|
static inline uint8_t __basepriSetRetVal(uint8_t prio)
|
||||||
{
|
{
|
||||||
__set_BASEPRI_MAX_nb(prio);
|
__set_BASEPRI_MAX_nb(prio);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run block with elevated BASEPRI (using BASEPRI_MAX), restoring BASEPRI on exit. All exit paths are handled
|
#endif
|
||||||
// Full memory barrier is placed at start and exit of block
|
|
||||||
#define ATOMIC_BLOCK(prio) for ( uint8_t __basepri_save __attribute__((__cleanup__(__basepriRestoreMem))) = __get_BASEPRI(), \
|
// Run block with elevated BASEPRI (using BASEPRI_MAX), restoring BASEPRI on exit.
|
||||||
|
// All exit paths are handled. Implemented as for loop, does intercept break and continue
|
||||||
|
// Full memory barrier is placed at start and at exit of block
|
||||||
|
// __unused__ attribute is used to supress CLang warning
|
||||||
|
#define ATOMIC_BLOCK(prio) for ( uint8_t __basepri_save __attribute__ ((__cleanup__ (__basepriRestoreMem), __unused__)) = __get_BASEPRI(), \
|
||||||
__ToDo = __basepriSetMemRetVal(prio); __ToDo ; __ToDo = 0 )
|
__ToDo = __basepriSetMemRetVal(prio); __ToDo ; __ToDo = 0 )
|
||||||
|
|
||||||
// Run block with elevated BASEPRI (using BASEPRI_MAX), but do not create memory barrier.
|
// Run block with elevated BASEPRI (using BASEPRI_MAX), but do not create memory barrier.
|
||||||
// Be careful when using this, you must use some method to prevent optimizer form breaking things
|
// Be careful when using this, you must use some method to prevent optimizer from breaking things
|
||||||
// - lto is used for Cleanflight compilation, so function call is not memory barrier
|
// - lto is used for Cleanflight compilation, so function call is not memory barrier
|
||||||
// - use ATOMIC_BARRIER or volatile to protect used variables
|
// - use ATOMIC_BARRIER or volatile to protect used variables
|
||||||
// - gcc 4.8.4 does write all values in registers to memory before 'asm volatile', so this optimization does not help much
|
// - gcc 4.8.4 does write all values in registers to memory before 'asm volatile', so this optimization does not help much
|
||||||
// - gcc 5 and later works as intended, generating quite optimal code
|
// - gcc 5 and later works as intended, generating quite optimal code
|
||||||
#define ATOMIC_BLOCK_NB(prio) for ( uint8_t __basepri_save __attribute__((__cleanup__(__basepriRestore))) = __get_BASEPRI(), \
|
#define ATOMIC_BLOCK_NB(prio) for ( uint8_t __basepri_save __attribute__ ((__cleanup__ (__basepriRestore), __unused__)) = __get_BASEPRI(), \
|
||||||
__ToDo = __basepriSetRetVal(prio); __ToDo ; __ToDo = 0 ) \
|
__ToDo = __basepriSetRetVal(prio); __ToDo ; __ToDo = 0 ) \
|
||||||
|
|
||||||
// ATOMIC_BARRIER
|
// ATOMIC_BARRIER
|
||||||
// Create memory barrier
|
// Create memory barrier
|
||||||
// - at the beginning of containing block (value of parameter must be reread from memory)
|
// - at the beginning of containing block (value of parameter must be reread from memory)
|
||||||
// - at exit of block (all exit paths) (parameter value if written into memory, but may be cached in register for subsequent use)
|
// - at exit of block (all exit paths) (parameter value if written into memory, but may be cached in register for subsequent use)
|
||||||
// On gcc 5 and higher, this protects only memory passed as parameter (any type should work)
|
// On gcc 5 and higher, this protects only memory passed as parameter (any type can be used)
|
||||||
// this macro can be used only ONCE PER LINE, but multiple uses per block are fine
|
// this macro can be used only ONCE PER LINE, but multiple uses per block are fine
|
||||||
|
|
||||||
#if (__GNUC__ > 6)
|
#if (__GNUC__ > 6)
|
||||||
|
@ -98,15 +155,37 @@ static inline uint8_t __basepriSetRetVal(uint8_t prio)
|
||||||
# define __UNIQL(x) __UNIQL_CONCAT(x,__LINE__)
|
# define __UNIQL(x) __UNIQL_CONCAT(x,__LINE__)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// this macro uses local function for cleanup. CLang block can be substituded
|
#define ATOMIC_BARRIER_ENTER(dataPtr, refStr) \
|
||||||
|
__asm__ volatile ("\t# barrier (" refStr ") enter\n" : "+m" (*(dataPtr)))
|
||||||
|
|
||||||
|
#define ATOMIC_BARRIER_LEAVE(dataPtr, refStr) \
|
||||||
|
__asm__ volatile ("\t# barrier (" refStr ") leave\n" : "m" (*(dataPtr)))
|
||||||
|
|
||||||
|
#if defined(__clang__)
|
||||||
|
// CLang version, using Objective C-style block
|
||||||
|
// based on https://stackoverflow.com/questions/24959440/rewrite-gcc-cleanup-macro-with-nested-function-for-clang
|
||||||
|
typedef void (^__cleanup_block)(void);
|
||||||
|
static inline void __do_cleanup(__cleanup_block * b) { (*b)(); }
|
||||||
|
|
||||||
|
#define ATOMIC_BARRIER(data) \
|
||||||
|
typeof(data) *__UNIQL(__barrier) = &data; \
|
||||||
|
ATOMIC_BARRIER_ENTER(__UNIQL(__barrier), #data); \
|
||||||
|
__cleanup_block __attribute__((cleanup(__do_cleanup) __unused__)) __UNIQL(__cleanup) = \
|
||||||
|
^{ ATOMIC_BARRIER_LEAVE(__UNIQL(__barrier), #data); }; \
|
||||||
|
do {} while(0) \
|
||||||
|
/**/
|
||||||
|
#else
|
||||||
|
// gcc version, uses local function for cleanup.
|
||||||
#define ATOMIC_BARRIER(data) \
|
#define ATOMIC_BARRIER(data) \
|
||||||
__extension__ void __UNIQL(__barrierEnd)(typeof(data) **__d) { \
|
__extension__ void __UNIQL(__barrierEnd)(typeof(data) **__d) { \
|
||||||
__asm__ volatile ("\t# barrier(" #data ") end\n" : : "m" (**__d)); \
|
ATOMIC_BARRIER_LEAVE(*__d, #data); \
|
||||||
} \
|
} \
|
||||||
typeof(data) __attribute__((__cleanup__(__UNIQL(__barrierEnd)))) *__UNIQL(__barrier) = &data; \
|
typeof(data) __attribute__((__cleanup__(__UNIQL(__barrierEnd)))) *__UNIQL(__barrier) = &data; \
|
||||||
__asm__ volatile ("\t# barrier (" #data ") start\n" : "+m" (*__UNIQL(__barrier)))
|
ATOMIC_BARRIER_ENTER(__UNIQL(__barrier), #data); \
|
||||||
|
do {} while(0) \
|
||||||
|
/**/
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// define these wrappers for atomic operations, using gcc builtins
|
||||||
// define these wrappers for atomic operations, use gcc buildins
|
|
||||||
#define ATOMIC_OR(ptr, val) __sync_fetch_and_or(ptr, val)
|
#define ATOMIC_OR(ptr, val) __sync_fetch_and_or(ptr, val)
|
||||||
#define ATOMIC_AND(ptr, val) __sync_fetch_and_and(ptr, val)
|
#define ATOMIC_AND(ptr, val) __sync_fetch_and_and(ptr, val)
|
||||||
|
|
|
@ -35,6 +35,9 @@ arming_prevention_unittest_SRC := \
|
||||||
$(USER_DIR)/fc/runtime_config.c \
|
$(USER_DIR)/fc/runtime_config.c \
|
||||||
$(USER_DIR)/common/bitarray.c
|
$(USER_DIR)/common/bitarray.c
|
||||||
|
|
||||||
|
atomic_unittest_SRC = \
|
||||||
|
$(USER_DIR)/build/atomic.c \
|
||||||
|
$(TEST_DIR)/atomic_unittest_c.c
|
||||||
|
|
||||||
baro_bmp085_unittest_SRC := \
|
baro_bmp085_unittest_SRC := \
|
||||||
$(USER_DIR)/drivers/barometer/barometer_bmp085.c \
|
$(USER_DIR)/drivers/barometer/barometer_bmp085.c \
|
||||||
|
@ -299,6 +302,8 @@ UNAME := $(shell uname -s)
|
||||||
# Use clang/clang++ by default
|
# Use clang/clang++ by default
|
||||||
CC := clang
|
CC := clang
|
||||||
CXX := clang++
|
CXX := clang++
|
||||||
|
#CC := gcc
|
||||||
|
#CXX := g++
|
||||||
|
|
||||||
COMMON_FLAGS = \
|
COMMON_FLAGS = \
|
||||||
-g \
|
-g \
|
||||||
|
@ -308,6 +313,7 @@ COMMON_FLAGS = \
|
||||||
-ggdb3 \
|
-ggdb3 \
|
||||||
-O0 \
|
-O0 \
|
||||||
-DUNIT_TEST \
|
-DUNIT_TEST \
|
||||||
|
-fblocks \
|
||||||
-isystem $(GTEST_DIR)/inc \
|
-isystem $(GTEST_DIR)/inc \
|
||||||
-MMD -MP
|
-MMD -MP
|
||||||
|
|
||||||
|
@ -432,7 +438,7 @@ TEST_CFLAGS = $(addprefix -I,$(TEST_INCLUDE_DIRS))
|
||||||
# param $1 = testname
|
# param $1 = testname
|
||||||
define test-specific-stuff
|
define test-specific-stuff
|
||||||
|
|
||||||
$$1_OBJS = $$(patsubst $$(USER_DIR)%,$$(OBJECT_DIR)/$1%,$$($1_SRC:=.o))
|
$$1_OBJS = $$(patsubst $$(TEST_DIR)%,$$(OBJECT_DIR)/$1%, $$(patsubst $$(USER_DIR)%,$$(OBJECT_DIR)/$1%,$$($1_SRC:=.o)))
|
||||||
|
|
||||||
# $$(info $1 -v-v-------)
|
# $$(info $1 -v-v-------)
|
||||||
# $$(info $1_SRC: $($1_SRC))
|
# $$(info $1_SRC: $($1_SRC))
|
||||||
|
@ -452,6 +458,12 @@ $(OBJECT_DIR)/$1/%.c.o: $(USER_DIR)/%.c
|
||||||
$(foreach def,$($1_DEFINES),-D $(def)) \
|
$(foreach def,$($1_DEFINES),-D $(def)) \
|
||||||
-c $$< -o $$@
|
-c $$< -o $$@
|
||||||
|
|
||||||
|
$(OBJECT_DIR)/$1/%.c.o: $(TEST_DIR)/%.c
|
||||||
|
@echo "compiling test c file: $$<" "$(STDOUT)"
|
||||||
|
$(V1) mkdir -p $$(dir $$@)
|
||||||
|
$(V1) $(CC) $(C_FLAGS) $(TEST_CFLAGS) \
|
||||||
|
$(foreach def,$($1_DEFINES),-D $(def)) \
|
||||||
|
-c $$< -o $$@
|
||||||
|
|
||||||
$(OBJECT_DIR)/$1/$1.o: $(TEST_DIR)/$1.cc
|
$(OBJECT_DIR)/$1/$1.o: $(TEST_DIR)/$1.cc
|
||||||
@echo "compiling $$<" "$(STDOUT)"
|
@echo "compiling $$<" "$(STDOUT)"
|
||||||
|
@ -467,7 +479,7 @@ $(OBJECT_DIR)/$1/$1 : $$($$1_OBJS) \
|
||||||
|
|
||||||
@echo "linking $$@" "$(STDOUT)"
|
@echo "linking $$@" "$(STDOUT)"
|
||||||
$(V1) mkdir -p $(dir $$@)
|
$(V1) mkdir -p $(dir $$@)
|
||||||
$(V1) $(CXX) $(CXX_FLAGS) $(PG_FLAGS) $$^ -o $$@
|
$(V1) $(CXX) $(CXX_FLAGS) $(PG_FLAGS) -lBlocksRuntime $$^ -o $$@
|
||||||
|
|
||||||
test_$1: $(OBJECT_DIR)/$1/$1
|
test_$1: $(OBJECT_DIR)/$1/$1
|
||||||
$(V1) $$< $$(EXEC_OPTS) "$(STDOUT)" && echo "running $$@: PASS"
|
$(V1) $$< $$(EXEC_OPTS) "$(STDOUT)" && echo "running $$@: PASS"
|
||||||
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
/*
|
||||||
|
* This file is part of Cleanflight.
|
||||||
|
*
|
||||||
|
* Cleanflight is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* Cleanflight is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with Cleanflight. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
|
||||||
|
#include "unittest_macros.h"
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include "build/atomic.h"
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AtomicUnittest, TestAtomicBlock)
|
||||||
|
{
|
||||||
|
atomic_BASEPRI = 0; // reset BASEPRI
|
||||||
|
|
||||||
|
ATOMIC_BLOCK(10) {
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // locked
|
||||||
|
ATOMIC_BLOCK(5) {
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 5); // priority increase
|
||||||
|
}
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // restore priority
|
||||||
|
ATOMIC_BLOCK(20) {
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // lower priority, no change to value
|
||||||
|
}
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // restore priority
|
||||||
|
}
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 0); // restore priority to unlocked
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AtomicUnittest, TestAtomicBlockNB)
|
||||||
|
{
|
||||||
|
atomic_BASEPRI = 0; // reset BASEPRI
|
||||||
|
|
||||||
|
ATOMIC_BLOCK_NB(10) {
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // locked
|
||||||
|
ATOMIC_BLOCK_NB(5) {
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 5); // priority increase
|
||||||
|
}
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // restore priority
|
||||||
|
ATOMIC_BLOCK_NB(20) {
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // lower priority, no change to value
|
||||||
|
}
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 10); // restore priority
|
||||||
|
}
|
||||||
|
EXPECT_EQ(atomic_BASEPRI, 0); // restore priority to unlocked
|
||||||
|
}
|
||||||
|
|
||||||
|
#if 1 // not working now ... (CLang needs -fblock + libraries / some unittests don't support gcc)
|
||||||
|
|
||||||
|
struct barrierTrace {
|
||||||
|
int enter, leave;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
int testAtomicBarrier_C(struct barrierTrace *b0, struct barrierTrace *b1, struct barrierTrace sample[][2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AtomicUnittest, TestAtomicBarrier)
|
||||||
|
{
|
||||||
|
barrierTrace b0,b1,sample[10][2];
|
||||||
|
|
||||||
|
int sampled = testAtomicBarrier_C(&b0,&b1,sample);
|
||||||
|
int sIdx = 0;
|
||||||
|
EXPECT_EQ(sample[sIdx][0].enter, 0);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].leave, 0);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].enter, 0);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].leave, 0);
|
||||||
|
sIdx++;
|
||||||
|
// do {
|
||||||
|
// ATOMIC_BARRIER(*b0);
|
||||||
|
// ATOMIC_BARRIER(*b1);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].enter, 1);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].leave, 0);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].enter, 1);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].leave, 0);
|
||||||
|
sIdx++;
|
||||||
|
// do {
|
||||||
|
// ATOMIC_BARRIER(*b0);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].enter, 2);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].leave, 0);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].enter, 1);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].leave, 0);
|
||||||
|
sIdx++;
|
||||||
|
// } while(0);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].enter, 2);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].leave, 1);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].enter, 1);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].leave, 0);
|
||||||
|
sIdx++;
|
||||||
|
//} while(0);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].enter, 2);
|
||||||
|
EXPECT_EQ(sample[sIdx][0].leave, 2);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].enter, 1);
|
||||||
|
EXPECT_EQ(sample[sIdx][1].leave, 1);
|
||||||
|
sIdx++;
|
||||||
|
//return sIdx;
|
||||||
|
EXPECT_EQ(sIdx, sampled);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,37 @@
|
||||||
|
#include "build/atomic.h"
|
||||||
|
|
||||||
|
struct barrierTrace {
|
||||||
|
int enter, leave;
|
||||||
|
};
|
||||||
|
|
||||||
|
int testAtomicBarrier_C(struct barrierTrace *b0, struct barrierTrace *b1, struct barrierTrace sample[][2])
|
||||||
|
{
|
||||||
|
int sIdx = 0;
|
||||||
|
|
||||||
|
// replace barrier macros to track barrier invocation
|
||||||
|
// pass known struct as barrier variable, keep track inside it
|
||||||
|
#undef ATOMIC_BARRIER_ENTER
|
||||||
|
#undef ATOMIC_BARRIER_LEAVE
|
||||||
|
#define ATOMIC_BARRIER_ENTER(ptr, refStr) do {(ptr)->enter++; } while(0)
|
||||||
|
#define ATOMIC_BARRIER_LEAVE(ptr, refStr) do {(ptr)->leave++; } while(0)
|
||||||
|
|
||||||
|
b0->enter = 0; b0->leave = 0;
|
||||||
|
b1->enter = 0; b1->leave = 0;
|
||||||
|
sample[sIdx][0]=*b0; sample[sIdx][1]=*b1; sIdx++;
|
||||||
|
do {
|
||||||
|
ATOMIC_BARRIER(*b0);
|
||||||
|
ATOMIC_BARRIER(*b1);
|
||||||
|
sample[sIdx][0]=*b0; sample[sIdx][1]=*b1; sIdx++;
|
||||||
|
do {
|
||||||
|
ATOMIC_BARRIER(*b0);
|
||||||
|
sample[sIdx][0]=*b0; sample[sIdx][1]=*b1; sIdx++;
|
||||||
|
} while(0);
|
||||||
|
sample[sIdx][0]=*b0; sample[sIdx][1]=*b1; sIdx++;
|
||||||
|
} while(0);
|
||||||
|
sample[sIdx][0]=*b0; sample[sIdx][1]=*b1; sIdx++;
|
||||||
|
return sIdx;
|
||||||
|
|
||||||
|
// ATOMIC_BARRIER is broken in rest of this file
|
||||||
|
#undef ATOMIC_BARRIER_ENTER
|
||||||
|
#undef ATOMIC_BARRIER_LEAVE
|
||||||
|
}
|
Loading…
Reference in New Issue