node/near: improve logging per reviewer feedback

This commit is contained in:
tbjump 2023-05-24 14:29:42 +00:00 committed by tbjump
parent bf5c540d3e
commit 821d66eae6
2 changed files with 14 additions and 9 deletions

View File

@ -34,7 +34,6 @@ func (e *Watcher) recursivelyReadFinalizedBlocks(logger *zap.Logger, ctx context
// SECURITY: Sanity checks for the block header // SECURITY: Sanity checks for the block header
if startBlock.Header.Hash == "" || startBlock.Header.Height == 0 || startBlock.Header.PrevBlockHash == "" { if startBlock.Header.Hash == "" || startBlock.Header.Height == 0 || startBlock.Header.PrevBlockHash == "" {
logger.Debug("recursivelyReadFinalizedBlocks: json parse error")
return errors.New("json parse error") return errors.New("json parse error")
} }
@ -51,9 +50,8 @@ func (e *Watcher) recursivelyReadFinalizedBlocks(logger *zap.Logger, ctx context
// we want to avoid going too far back because that would increase the likelihood of error somewhere in the recursion stack. // we want to avoid going too far back because that would increase the likelihood of error somewhere in the recursion stack.
// If we go back too far, we just report the error and terminate early. // If we go back too far, we just report the error and terminate early.
if recursionDepth > maxFallBehindBlocks { if recursionDepth > maxFallBehindBlocks {
logger.Debug("recursivelyReadFinalizedBlocks: maxFallBehindBlocks")
e.eventChan <- EVENT_NEAR_WATCHER_TOO_FAR_BEHIND e.eventChan <- EVENT_NEAR_WATCHER_TOO_FAR_BEHIND
return nil return errors.New("recursivelyReadFinalizedBlocks: maxFallBehindBlocks")
} }
// recursion + stop condition // recursion + stop condition
@ -61,10 +59,13 @@ func (e *Watcher) recursivelyReadFinalizedBlocks(logger *zap.Logger, ctx context
prevBlock, err := e.nearAPI.GetBlock(ctx, startBlock.Header.PrevBlockHash) prevBlock, err := e.nearAPI.GetBlock(ctx, startBlock.Header.PrevBlockHash)
if err != nil { if err != nil {
logger.Debug("recursivelyReadFinalizedBlocks: nearAPI.GetBlock error", zap.Error(err))
return err return err
} }
_ = e.recursivelyReadFinalizedBlocks(logger, ctx, prevBlock, stopHeight, chunkSink, recursionDepth+1) // ignore error err = e.recursivelyReadFinalizedBlocks(logger, ctx, prevBlock, stopHeight, chunkSink, recursionDepth+1)
if err != nil {
// only log error because we still want to process the blocks up until the one that made the error
logger.Debug("recursivelyReadFinalizedBlocks error", zap.Error(err))
}
} }
chunks := startBlock.ChunkHashes() chunks := startBlock.ChunkHashes()
@ -99,6 +100,7 @@ func (e *Watcher) ReadFinalChunksSince(logger *zap.Logger, ctx context.Context,
err = e.recursivelyReadFinalizedBlocks(logger, ctx, finalBlock, startHeight, chunkSink, 0) err = e.recursivelyReadFinalizedBlocks(logger, ctx, finalBlock, startHeight, chunkSink, 0)
if err != nil { if err != nil {
logger.Debug("recursivelyReadFinalizedBlocks error", zap.Error(err))
return startHeight, err return startHeight, err
} }
} }

View File

@ -176,7 +176,8 @@ func (e *Watcher) runChunkFetcher(ctx context.Context) error {
for _, job := range newJobs { for _, job := range newJobs {
err := e.schedule(ctx, job, job.delay) err := e.schedule(ctx, job, job.delay)
if err != nil { if err != nil {
logger.Info("error scheduling transaction processing job", zap.Error(err)) // Debug-level logging here because it could be very noisy (one log entry for *any* transaction on the NEAR blockchain)
logger.Debug("error scheduling transaction processing job", zap.Error(err))
} }
} }
} }
@ -206,7 +207,8 @@ func (e *Watcher) runObsvReqProcessor(ctx context.Context) error {
job := newTransactionProcessingJob(txHash, e.wormholeAccount) job := newTransactionProcessingJob(txHash, e.wormholeAccount)
err := e.schedule(ctx, job, time.Nanosecond) err := e.schedule(ctx, job, time.Nanosecond)
if err != nil { if err != nil {
logger.Info("error scheduling transaction processing job", zap.Error(err)) // Error-level logging here because this is after an re-observation request already, which should be infrequent
logger.Error("error scheduling transaction processing job", zap.Error(err))
} }
} }
} }
@ -226,7 +228,7 @@ func (e *Watcher) runTxProcessor(ctx context.Context) error {
// transaction processing unsuccessful. Retry if retry_counter not exceeded. // transaction processing unsuccessful. Retry if retry_counter not exceeded.
if job.retryCounter < txProcRetry { if job.retryCounter < txProcRetry {
// Log and retry with exponential backoff // Log and retry with exponential backoff
logger.Info( logger.Debug(
"near.processTx", "near.processTx",
zap.String("log_msg_type", "tx_processing_retry"), zap.String("log_msg_type", "tx_processing_retry"),
zap.String("tx_hash", job.txHash), zap.String("tx_hash", job.txHash),
@ -236,7 +238,8 @@ func (e *Watcher) runTxProcessor(ctx context.Context) error {
job.delay *= 2 job.delay *= 2
err := e.schedule(ctx, job, job.delay) err := e.schedule(ctx, job, job.delay)
if err != nil { if err != nil {
logger.Info("error scheduling transaction processing job", zap.Error(err)) // Debug-level logging here because it could be very noisy (one log entry for *any* transaction on the NEAR blockchain)
logger.Debug("error scheduling transaction processing job", zap.Error(err))
} }
} else { } else {
// Warn and do not retry // Warn and do not retry