parent
82734d9691
commit
78345f2a78
|
@ -455,6 +455,11 @@ static void scheduleSparkEvent(bool limitedSpark, IgnitionEvent *event,
|
|||
efiPrintf("scheduling overdwell sparkDown revolution=%d [%s] for %d", getRevolutionCounter(), event->getOutputForLoggins()->getName(), fireTime);
|
||||
#endif /* SPARK_EXTREME_LOGGING */
|
||||
|
||||
/**
|
||||
* todo one: explicit unit test for this mechanism see https://github.com/rusefi/rusefi/issues/6373
|
||||
* todo two: can we please comprehend/document how this even works? we seem to be reusing 'sparkEvent.scheduling' instance
|
||||
* and it looks like current (smart?) re-queuing is effectively cancelling out the overdwell? is that the way this was intended to work?
|
||||
*/
|
||||
engine->executor.scheduleByTimestampNt("overdwell", &event->sparkEvent.scheduling, fireTime, { overFireSparkAndPrepareNextSchedule, event });
|
||||
} else {
|
||||
engine->engineState.overDwellNotScheduledCounter++;
|
||||
|
|
Loading…
Reference in New Issue