From e979d20ccdba7881408ad867fa9661b7250f285b Mon Sep 17 00:00:00 2001 From: tbjump Date: Tue, 11 Apr 2023 23:54:08 +0000 Subject: [PATCH] node/watchers/near: linter fixes --- node/pkg/watchers/near/finalizer.go | 12 ++++++------ node/pkg/watchers/near/nearapi/mock/mock_server.go | 2 +- node/pkg/watchers/near/nearapi/nearapi.go | 2 +- node/pkg/watchers/near/nearapi/types.go | 4 ++-- node/pkg/watchers/near/poll.go | 13 ++++++------- node/pkg/watchers/near/tx_processing.go | 2 +- 6 files changed, 17 insertions(+), 18 deletions(-) diff --git a/node/pkg/watchers/near/finalizer.go b/node/pkg/watchers/near/finalizer.go index c44f91dd3..54aa77cb2 100644 --- a/node/pkg/watchers/near/finalizer.go +++ b/node/pkg/watchers/near/finalizer.go @@ -30,7 +30,7 @@ func newFinalizer(eventChan chan eventType, nearAPI nearapi.NearApi, mainnet boo } } -func (f Finalizer) isFinalizedCached(logger *zap.Logger, ctx context.Context, blockHash string) (nearapi.BlockHeader, bool) { +func (f Finalizer) isFinalizedCached(logger *zap.Logger, blockHash string) (nearapi.BlockHeader, bool) { if err := nearapi.IsWellFormedHash(blockHash); err != nil { // SECURITY defense-in-depth: check if block hash is well-formed logger.Error("blockHash invalid", zap.String("error_type", "invalid_hash"), zap.String("blockHash", blockHash), zap.Error(err)) @@ -38,7 +38,7 @@ func (f Finalizer) isFinalizedCached(logger *zap.Logger, ctx context.Context, bl } if b, ok := f.finalizedBlocksCache.Get(blockHash); ok { - blockHeader := b.(nearapi.BlockHeader) + blockHeader := b.(nearapi.BlockHeader) //nolint:forcetypeassert // SECURITY In blocks < 74473147 message timestamps were computed differently and we don't want to re-observe these messages if !f.mainnet || blockHeader.Height > 74473147 { return blockHeader, true @@ -58,7 +58,7 @@ func (f Finalizer) isFinalized(logger *zap.Logger, ctx context.Context, queriedB logger.Debug("checking block finalization", zap.String("method", "isFinalized"), zap.String("parameters", queriedBlockHash)) // check cache first - if block, ok := f.isFinalizedCached(logger, ctx, queriedBlockHash); ok { + if block, ok := f.isFinalizedCached(logger, queriedBlockHash); ok { return block, true } @@ -92,17 +92,17 @@ func (f Finalizer) isFinalized(logger *zap.Logger, ctx context.Context, queriedB } if queriedBlockHash == someFinalBlockHash { - f.setFinalized(logger, ctx, queriedBlock.Header) + f.setFinalized(queriedBlock.Header) // block was marked as finalized in the cache, so this should succeed now. // We don't return directly because setFinalized() contains some sanity checks. - return f.isFinalizedCached(logger, ctx, queriedBlockHash) + return f.isFinalizedCached(logger, queriedBlockHash) } } // it seems like the block has not been finalized yet return nearapi.BlockHeader{}, false } -func (f Finalizer) setFinalized(logger *zap.Logger, ctx context.Context, blockHeader nearapi.BlockHeader) { +func (f Finalizer) setFinalized(blockHeader nearapi.BlockHeader) { // SECURITY defense-in-depth: don't cache obviously corrupted data. if nearapi.IsWellFormedHash(blockHeader.Hash) != nil || blockHeader.Timestamp == 0 || blockHeader.Height == 0 { diff --git a/node/pkg/watchers/near/nearapi/mock/mock_server.go b/node/pkg/watchers/near/nearapi/mock/mock_server.go index 8ce0a9703..7269329d3 100644 --- a/node/pkg/watchers/near/nearapi/mock/mock_server.go +++ b/node/pkg/watchers/near/nearapi/mock/mock_server.go @@ -78,7 +78,7 @@ func (s *ForwardingCachingServer) ProxyReq(logger *zap.Logger, req *http.Request req.Body = io.NopCloser(bytes.NewReader(reqBody)) url := fmt.Sprintf("%s%s", s.upstreamHost, req.RequestURI) - proxyReq, _ := http.NewRequest(req.Method, url, bytes.NewReader(reqBody)) + proxyReq, _ := http.NewRequestWithContext(req.Context(), req.Method, url, bytes.NewReader(reqBody)) s.logger.Debug("proxy_req", zap.String("url", url), diff --git a/node/pkg/watchers/near/nearapi/nearapi.go b/node/pkg/watchers/near/nearapi/nearapi.go index eadacef4c..9ae780d68 100644 --- a/node/pkg/watchers/near/nearapi/nearapi.go +++ b/node/pkg/watchers/near/nearapi/nearapi.go @@ -50,7 +50,7 @@ type ( func NewHttpNearRpc(nearRPC string) HttpNearRpc { // Customize the Transport to have larger connection pool (default is only 2 per host) - t := http.DefaultTransport.(*http.Transport).Clone() + t := http.DefaultTransport.(*http.Transport).Clone() //nolint:forcetypeassert t.MaxConnsPerHost = nearRPCConcurrentConnections t.MaxIdleConnsPerHost = nearRPCConcurrentConnections var httpClient = &http.Client{ diff --git a/node/pkg/watchers/near/nearapi/types.go b/node/pkg/watchers/near/nearapi/types.go index 8d5af1bf0..1802b697c 100644 --- a/node/pkg/watchers/near/nearapi/types.go +++ b/node/pkg/watchers/near/nearapi/types.go @@ -65,7 +65,7 @@ func NewBlockFromBytes(bytes []byte) (Block, error) { json := gjson.ParseBytes(bytes) ts_nanosec := jsonGetUint(json, "result.header.timestamp") - ts := uint64(ts_nanosec) / 1_000_000_000 + ts := ts_nanosec / 1_000_000_000 header := BlockHeader{ jsonGetString(json, "result.header.hash"), @@ -84,7 +84,7 @@ func NewBlockFromBytes(bytes []byte) (Block, error) { func (b Block) Timestamp() uint64 { ts_nanosec := jsonGetUint(b.json, "result.header.timestamp") - return uint64(ts_nanosec) / 1000000000 + return ts_nanosec / 1000000000 } func (b Block) ChunkHashes() []ChunkHeader { diff --git a/node/pkg/watchers/near/poll.go b/node/pkg/watchers/near/poll.go index a2bafc1b3..68b346470 100644 --- a/node/pkg/watchers/near/poll.go +++ b/node/pkg/watchers/near/poll.go @@ -12,8 +12,6 @@ import ( func (e *Watcher) fetchAndParseChunk(logger *zap.Logger, ctx context.Context, chunkHeader nearapi.ChunkHeader) ([]*transactionProcessingJob, error) { logger.Debug("near.fetchAndParseChunk", zap.String("chunk_hash", chunkHeader.Hash)) - var result []*transactionProcessingJob - chunk, err := e.nearAPI.GetChunk(ctx, chunkHeader) if err != nil { return nil, err @@ -21,15 +19,16 @@ func (e *Watcher) fetchAndParseChunk(logger *zap.Logger, ctx context.Context, ch txns := chunk.Transactions() - for _, tx := range txns { - result = append(result, newTransactionProcessingJob(tx.Hash, tx.SignerId)) + result := make([]*transactionProcessingJob, len(txns)) + for i, tx := range txns { + result[i] = newTransactionProcessingJob(tx.Hash, tx.SignerId) } return result, nil } // recursivelyReadFinalizedBlocks walks back the blockchain from the startBlock (inclusive) // until it reaches a block of height stopHeight or less (exclusive). Chunks from all these blocks are put -// into e.chunkProcessingqueue with the chunks from the oldest block first +// into chunkSink with the chunks from the oldest block first // if there is an error while walking back the chain, no chunks will be returned func (e *Watcher) recursivelyReadFinalizedBlocks(logger *zap.Logger, ctx context.Context, startBlock nearapi.Block, stopHeight uint64, chunkSink chan<- nearapi.ChunkHeader, recursionDepth uint) error { @@ -39,7 +38,7 @@ func (e *Watcher) recursivelyReadFinalizedBlocks(logger *zap.Logger, ctx context } // SECURITY: We know that this block is finalized because it is a parent of a finalized block. - e.finalizer.setFinalized(logger, ctx, startBlock.Header) + e.finalizer.setFinalized(startBlock.Header) // we want to avoid going too far back because that would increase the likelihood of error somewhere in the recursion stack. // If we go back too far, we just report the error and terminate early. @@ -70,7 +69,7 @@ func (e *Watcher) recursivelyReadFinalizedBlocks(logger *zap.Logger, ctx context chunks := startBlock.ChunkHashes() // process chunks after recursion such that youngest chunks get processed first for i := 0; i < len(chunks); i++ { - e.chunkProcessingQueue <- chunks[i] + chunkSink <- chunks[i] } return nil } diff --git a/node/pkg/watchers/near/tx_processing.go b/node/pkg/watchers/near/tx_processing.go index 59ff424f9..55d37a714 100644 --- a/node/pkg/watchers/near/tx_processing.go +++ b/node/pkg/watchers/near/tx_processing.go @@ -115,7 +115,7 @@ func (e *Watcher) processOutcome(logger *zap.Logger, ctx context.Context, job *t return nil // SUCCESS } -func (e *Watcher) processWormholeLog(logger *zap.Logger, ctx context.Context, job *transactionProcessingJob, outcomeBlockHeader nearapi.BlockHeader, successValue string, log gjson.Result) error { +func (e *Watcher) processWormholeLog(logger *zap.Logger, _ context.Context, job *transactionProcessingJob, outcomeBlockHeader nearapi.BlockHeader, successValue string, log gjson.Result) error { event := log.String() // SECURITY CRITICAL: Ensure that we're reading a correct log message.