rusefi-1/firmware/development/perf_trace.cpp

134 lines
2.9 KiB
C++
Raw Normal View History

/**
* @file perf_trace.cpp
*
2020-10-03 21:15:40 -07:00
* https://github.com/rusefi/rusefi/wiki/Developer-Performance-Tracing
*
* See JsonOutput.java in rusEfi console
*/
2019-10-11 17:43:21 -07:00
#include "pch.h"
2019-10-11 17:43:21 -07:00
#ifndef ENABLE_PERF_TRACE
#error ENABLE_PERF_TRACE must be defined!
#endif
#ifndef TRACE_BUFFER_LENGTH
2019-10-14 23:34:12 -07:00
#define TRACE_BUFFER_LENGTH 2048
#endif /* TRACE_BUFFER_LENGTH */
2019-10-11 17:43:21 -07:00
// Disable the buffer if we're not enabled at all
#if !ENABLE_PERF_TRACE
#undef TRACE_BUFFER_LENGTH
#define TRACE_BUFFER_LENGTH 1
#endif
2019-10-14 23:34:12 -07:00
enum class EPhase : char
2019-10-11 17:43:21 -07:00
{
Start,
End,
InstantThread,
InstantGlobal,
};
struct TraceEntry
{
PE Event;
2019-10-14 23:34:12 -07:00
EPhase Phase;
int8_t IsrId;
2019-10-11 17:43:21 -07:00
uint8_t ThreadId;
uint32_t Timestamp;
};
// Ensure that the struct is the size we think it is - the binary layout is important
2019-10-11 17:43:21 -07:00
static_assert(sizeof(TraceEntry) == 8);
// This buffer stores a trace - we write the full buffer once, then disable tracing
2019-10-11 17:43:21 -07:00
static TraceEntry s_traceBuffer[TRACE_BUFFER_LENGTH];
static size_t s_nextIdx = 0;
static bool s_isTracing = false;
2019-10-14 23:34:12 -07:00
static void perfEventImpl(PE event, EPhase phase)
2019-10-11 17:43:21 -07:00
{
// Bail if we aren't allowed to trace
2019-10-14 23:34:12 -07:00
if constexpr (!ENABLE_PERF_TRACE) {
return;
}
// Bail if we aren't tracing
2019-10-14 23:34:12 -07:00
if (!s_isTracing) {
2019-10-11 17:43:21 -07:00
return;
}
2019-11-21 20:45:16 -08:00
// todo: why doesn't getTimeNowLowerNt() work here?
// It returns 0 like we're in a unit test
uint32_t timestamp = port_rt_get_counter_value();
2019-10-11 17:43:21 -07:00
size_t idx;
2019-11-21 20:45:16 -08:00
// Critical section: disable interrupts to reserve an index.
// We could lock, but this gets called a LOT - so locks could
// significantly alter the results of the measurement.
// In addition, if we want to trace lock/unlock events, we can't
// be locking ourselves from the trace functionality.
2019-10-11 17:43:21 -07:00
{
2020-04-29 13:42:55 -07:00
uint32_t prim = __get_PRIMASK();
2019-11-21 20:45:16 -08:00
__disable_irq();
2019-10-11 17:43:21 -07:00
idx = s_nextIdx++;
2019-10-14 23:34:12 -07:00
if (s_nextIdx >= TRACE_BUFFER_LENGTH) {
2019-10-11 17:43:21 -07:00
s_nextIdx = 0;
2019-10-14 23:34:12 -07:00
s_isTracing = false;
2019-10-11 17:43:21 -07:00
}
2020-04-29 13:42:55 -07:00
// Restore previous interrupt state - don't restore if they weren't enabled
if (!prim) {
__enable_irq();
}
2019-10-11 17:43:21 -07:00
}
// We can safely write data out of the lock, our spot is reserved
2019-10-14 23:34:12 -07:00
volatile TraceEntry& entry = s_traceBuffer[idx];
2019-10-11 17:43:21 -07:00
entry.Event = event;
2019-10-14 23:34:12 -07:00
entry.Phase = phase;
// Get the current active interrupt - this is the "process ID"
auto isr = static_cast<int8_t>(SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk);
entry.IsrId = isr - 16;
// Get the current thread (if not interrupt) and use as the thread ID
if (isr == 0) {
entry.ThreadId = chThdGetSelfX()->threadId;
} else {
// Interrupts have no thread - all are T0
entry.ThreadId = 0;
}
2019-10-11 17:43:21 -07:00
entry.Timestamp = timestamp;
}
void perfEventBegin(PE event) {
perfEventImpl(event, EPhase::Start);
2019-10-11 17:43:21 -07:00
}
void perfEventEnd(PE event) {
perfEventImpl(event, EPhase::End);
2019-10-11 17:43:21 -07:00
}
void perfEventInstantGlobal(PE event) {
perfEventImpl(event, EPhase::InstantGlobal);
2019-10-11 17:43:21 -07:00
}
2019-10-14 23:40:51 -07:00
void perfTraceEnable() {
2019-10-14 23:34:12 -07:00
s_isTracing = true;
}
2019-10-14 23:40:51 -07:00
const TraceBufferResult perfTraceGetBuffer() {
// stop tracing if you try to get the buffer early
s_isTracing = false;
return {reinterpret_cast<const uint8_t*>(s_traceBuffer), sizeof(s_traceBuffer)};
2019-10-11 17:43:21 -07:00
}