core, eth: minor txpool event cleanups

This commit is contained in:
Péter Szilágyi 2018-05-18 11:45:52 +03:00
parent a2e43d28d0
commit 49719e21bc
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
19 changed files with 89 additions and 92 deletions

View File

@ -454,7 +454,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
return logs, nil
}
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxsPreEvent) event.Subscription {
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit
return nil

View File

@ -21,8 +21,8 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
// TxsPreEvent is posted when a batch of transactions enter the transaction pool.
type TxsPreEvent struct{ Txs types.Transactions }
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
type NewTxsEvent struct{ Txs []*types.Transaction }
// PendingLogsEvent is posted pre mining and notifies of pending logs.
type PendingLogsEvent struct {
@ -35,9 +35,6 @@ type PendingStateEvent struct{}
// NewMinedBlockEvent is posted when a block has been imported.
type NewMinedBlockEvent struct{ Block *types.Block }
// RemovedTransactionEvent is posted when a reorg happens
type RemovedTransactionEvent struct{ Txs types.Transactions }
// RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log }

View File

@ -76,22 +76,21 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
stream := rlp.NewStream(input, 0)
total, dropped := 0, 0
// flush imports a batch of transactions and bump the appropriate progress counters
flush := func(txs types.Transactions) {
errs := add(txs)
for _, err := range errs {
// Create a method to load a limited batch of transactions and bump the
// appropriate progress counters. Then use this method to load all the
// journalled transactions in small-ish batches.
loadBatch := func(txs types.Transactions) {
for _, err := range add(txs) {
if err != nil {
log.Debug("Failed to add journaled transaction", "err", err)
dropped++
}
}
}
var (
failure error
txs types.Transactions
batch types.Transactions
)
for {
// Parse the next transaction and terminate on error
tx := new(types.Transaction)
@ -99,19 +98,19 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
if err != io.EOF {
failure = err
}
if batch.Len() > 0 {
loadBatch(batch)
}
break
}
txs = append(txs, tx)
// New transaction parsed, queue up for later, import if threnshold is reached
total++
if txs.Len() > 1024 {
flush(txs)
txs = types.Transactions{}
if batch = append(batch, tx); batch.Len() > 1024 {
loadBatch(batch)
batch = batch[:0]
}
}
if txs.Len() > 0 {
flush(txs)
txs = types.Transactions{}
}
log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped)
return failure

View File

@ -444,9 +444,9 @@ func (pool *TxPool) Stop() {
log.Info("Transaction pool stopped")
}
// SubscribeTxPreEvent registers a subscription of TxsPreEvent and
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and
// starts sending event to the given channel.
func (pool *TxPool) SubscribeTxPreEvent(ch chan<- TxsPreEvent) event.Subscription {
func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch))
}
@ -653,7 +653,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
// We've directly injected a replacement transaction, notify subsystems
go pool.txFeed.Send(TxsPreEvent{types.Transactions{tx}})
go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}})
return old != nil, nil
}
@ -712,7 +712,8 @@ func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
}
}
// promoteTx adds a transaction to the pending (processable) list of transactions.
// promoteTx adds a transaction to the pending (processable) list of transactions
// and returns whether it was inserted or an older was better.
//
// Note, this method assumes the pool lock is held!
func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool {
@ -746,6 +747,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
// Set the potentially new pending nonce and notify any subsystems of the new tx
pool.beats[addr] = time.Now()
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
return true
}
@ -906,7 +908,9 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
// future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted.
func (pool *TxPool) promoteExecutables(accounts []common.Address) {
var promotedTxs types.Transactions
// Track the promoted transactions to broadcast them at once
var promoted []*types.Transaction
// Gather all the accounts potentially needing updates
if accounts == nil {
accounts = make([]common.Address, 0, len(pool.queue))
@ -937,16 +941,13 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
queuedNofundsCounter.Inc(1)
}
// Gather all executable transactions and promote them
txs := list.Ready(pool.pendingState.GetNonce(addr))
for _, tx := range txs {
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
hash := tx.Hash()
inserted := pool.promoteTx(addr, hash, tx)
if inserted {
if pool.promoteTx(addr, hash, tx) {
log.Trace("Promoting queued transaction", "hash", hash)
promotedTxs = append(promotedTxs, tx)
promoted = append(promoted, tx)
}
}
// Drop all transactions over the allowed limit
if !pool.locals.contains(addr) {
for _, tx := range list.Cap(int(pool.config.AccountQueue)) {
@ -963,10 +964,9 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
}
}
// Notify subsystem for new promoted transactions.
if promotedTxs.Len() > 0 {
pool.txFeed.Send(TxsPreEvent{promotedTxs})
if len(promoted) > 0 {
pool.txFeed.Send(NewTxsEvent{promoted})
}
// If the pending limit is overflown, start equalizing allowances
pending := uint64(0)
for _, list := range pool.pending {

View File

@ -118,19 +118,20 @@ func validateTxPoolInternals(pool *TxPool) error {
// validateEvents checks that the correct number of transaction addition events
// were fired on the pool's event feed.
func validateEvents(events chan TxsPreEvent, count int) error {
received := 0
for {
if received == count {
break
}
func validateEvents(events chan NewTxsEvent, count int) error {
var received []*types.Transaction
for len(received) < count {
select {
case ev := <-events:
received += ev.Txs.Len()
received = append(received, ev.Txs...)
case <-time.After(time.Second):
return fmt.Errorf("event #%d not fired", received)
}
}
if len(received) > count {
return fmt.Errorf("more than %d events fired: %v", count, received[count:])
}
select {
case ev := <-events:
return fmt.Errorf("more than %d events fired: %v", count, ev.Txs)
@ -674,7 +675,7 @@ func TestTransactionGapFilling(t *testing.T) {
pool.currentState.AddBalance(account, big.NewInt(1000000))
// Keep track of transaction events to ensure all executables get announced
events := make(chan TxsPreEvent, testTxPoolConfig.AccountQueue+5)
events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5)
sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe()
@ -925,7 +926,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
pool.currentState.AddBalance(account, big.NewInt(1000000))
// Keep track of transaction events to ensure all executables get announced
events := make(chan TxsPreEvent, testTxPoolConfig.AccountQueue+5)
events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5)
sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe()
@ -1145,7 +1146,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
events := make(chan TxsPreEvent, 32)
events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe()
@ -1332,7 +1333,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
events := make(chan TxsPreEvent, 32)
events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe()
@ -1438,7 +1439,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
events := make(chan TxsPreEvent, 32)
events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe()
@ -1500,7 +1501,7 @@ func TestTransactionReplacement(t *testing.T) {
defer pool.Stop()
// Keep track of transaction events to ensure all executables get announced
events := make(chan TxsPreEvent, 32)
events := make(chan NewTxsEvent, 32)
sub := pool.txFeed.Subscribe(events)
defer sub.Unsubscribe()

View File

@ -188,8 +188,8 @@ func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions,
return b.eth.TxPool().Content()
}
func (b *EthAPIBackend) SubscribeTxPreEvent(ch chan<- core.TxsPreEvent) event.Subscription {
return b.eth.TxPool().SubscribeTxPreEvent(ch)
func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.eth.TxPool().SubscribeNewTxsEvent(ch)
}
func (b *EthAPIBackend) Downloader() *downloader.Downloader {

View File

@ -105,7 +105,7 @@ func (api *PublicFilterAPI) timeoutLoop() {
func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
var (
pendingTxs = make(chan []common.Hash)
pendingTxSub = api.events.SubscribePendingTxEvents(pendingTxs)
pendingTxSub = api.events.SubscribePendingTxs(pendingTxs)
)
api.filtersMu.Lock()
@ -145,7 +145,7 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su
go func() {
txHashes := make(chan []common.Hash, 128)
pendingTxSub := api.events.SubscribePendingTxEvents(txHashes)
pendingTxSub := api.events.SubscribePendingTxs(txHashes)
for {
select {

View File

@ -36,7 +36,7 @@ type Backend interface {
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
SubscribeTxPreEvent(chan<- core.TxsPreEvent) event.Subscription
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription

View File

@ -59,7 +59,7 @@ const (
const (
// txChanSize is the size of channel listening to TxsPreEvent.
// txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
// rmLogsChanSize is the size of channel listening to RemovedLogsEvent.
@ -104,7 +104,7 @@ type EventSystem struct {
// Channels
install chan *subscription // install filter for event notification
uninstall chan *subscription // remove filter for event notification
txsCh chan core.TxsPreEvent // Channel to receive new transactions event
txsCh chan core.NewTxsEvent // Channel to receive new transactions event
logsCh chan []*types.Log // Channel to receive new log event
rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
chainCh chan core.ChainEvent // Channel to receive new chain event
@ -123,14 +123,14 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS
lightMode: lightMode,
install: make(chan *subscription),
uninstall: make(chan *subscription),
txsCh: make(chan core.TxsPreEvent, txChanSize),
txsCh: make(chan core.NewTxsEvent, txChanSize),
logsCh: make(chan []*types.Log, logsChanSize),
rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
chainCh: make(chan core.ChainEvent, chainEvChanSize),
}
// Subscribe events
m.txsSub = m.backend.SubscribeTxPreEvent(m.txsCh)
m.txsSub = m.backend.SubscribeNewTxsEvent(m.txsCh)
m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)
m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)
m.chainSub = m.backend.SubscribeChainEvent(m.chainCh)
@ -298,9 +298,9 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
return es.subscribe(sub)
}
// SubscribePendingTxEvents creates a subscription that writes transaction hashes for
// SubscribePendingTxs creates a subscription that writes transaction hashes for
// transactions that enter the transaction pool.
func (es *EventSystem) SubscribePendingTxEvents(hashes chan []common.Hash) *Subscription {
func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription {
sub := &subscription{
id: rpc.NewID(),
typ: PendingTransactionsSubscription,
@ -348,8 +348,8 @@ func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) {
}
}
}
case core.TxsPreEvent:
hashes := make([]common.Hash, 0, e.Txs.Len())
case core.NewTxsEvent:
hashes := make([]common.Hash, 0, len(e.Txs))
for _, tx := range e.Txs {
hashes = append(hashes, tx.Hash())
}

View File

@ -96,7 +96,7 @@ func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types
return logs, nil
}
func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxsPreEvent) event.Subscription {
func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.txFeed.Subscribe(ch)
}
@ -232,7 +232,7 @@ func TestPendingTxFilter(t *testing.T) {
fid0 := api.NewPendingTransactionFilter()
time.Sleep(1 * time.Second)
txFeed.Send(core.TxsPreEvent{transactions})
txFeed.Send(core.NewTxsEvent{Txs: transactions})
timeout := time.Now().Add(1 * time.Second)
for {

View File

@ -46,7 +46,7 @@ const (
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
// txChanSize is the size of channel listening to TxsPreEvent.
// txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
)
@ -81,7 +81,7 @@ type ProtocolManager struct {
SubProtocols []p2p.Protocol
eventMux *event.TypeMux
txsCh chan core.TxsPreEvent
txsCh chan core.NewTxsEvent
txsSub event.Subscription
minedBlockSub *event.TypeMuxSubscription
@ -204,8 +204,8 @@ func (pm *ProtocolManager) Start(maxPeers int) {
pm.maxPeers = maxPeers
// broadcast transactions
pm.txsCh = make(chan core.TxsPreEvent, txChanSize)
pm.txsSub = pm.txpool.SubscribeTxPreEvent(pm.txsCh)
pm.txsCh = make(chan core.NewTxsEvent, txChanSize)
pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh)
go pm.txBroadcastLoop()
// broadcast mined blocks

View File

@ -124,7 +124,7 @@ func (p *testTxPool) Pending() (map[common.Address]types.Transactions, error) {
return batches, nil
}
func (p *testTxPool) SubscribeTxPreEvent(ch chan<- core.TxsPreEvent) event.Subscription {
func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return p.txFeed.Subscribe(ch)
}

View File

@ -103,9 +103,9 @@ type txPool interface {
// The slice should be modifiable by the caller.
Pending() (map[common.Address]types.Transactions, error)
// SubscribeTxPreEvent should return an event subscription of
// TxsPreEvent and send events to the given channel.
SubscribeTxPreEvent(chan<- core.TxsPreEvent) event.Subscription
// SubscribeNewTxsEvent should return an event subscription of
// NewTxsEvent and send events to the given channel.
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
}
// statusData is the network packet for the status message.

View File

@ -116,7 +116,7 @@ func testRecvTransactions(t *testing.T, protocol int) {
t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash())
}
case <-time.After(2 * time.Second):
t.Errorf("no TxsPreEvent received within 2 seconds")
t.Errorf("no NewTxsEvent received within 2 seconds")
}
}

View File

@ -49,7 +49,7 @@ const (
// history request.
historyUpdateRange = 50
// txChanSize is the size of channel listening to TxsPreEvent.
// txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
@ -57,9 +57,9 @@ const (
)
type txPool interface {
// SubscribeTxPreEvent should return an event subscription of
// TxsPreEvent and send events to the given channel.
SubscribeTxPreEvent(chan<- core.TxsPreEvent) event.Subscription
// SubscribeNewTxsEvent should return an event subscription of
// NewTxsEvent and send events to the given channel.
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
}
type blockChain interface {
@ -150,8 +150,8 @@ func (s *Service) loop() {
headSub := blockchain.SubscribeChainHeadEvent(chainHeadCh)
defer headSub.Unsubscribe()
txEventCh := make(chan core.TxsPreEvent, txChanSize)
txSub := txpool.SubscribeTxPreEvent(txEventCh)
txEventCh := make(chan core.NewTxsEvent, txChanSize)
txSub := txpool.SubscribeNewTxsEvent(txEventCh)
defer txSub.Unsubscribe()
// Start a goroutine that exhausts the subsciptions to avoid events piling up

View File

@ -65,7 +65,7 @@ type Backend interface {
GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error)
Stats() (pending int, queued int)
TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions)
SubscribeTxPreEvent(chan<- core.TxsPreEvent) event.Subscription
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
ChainConfig() *params.ChainConfig
CurrentBlock() *types.Block

View File

@ -136,8 +136,8 @@ func (b *LesApiBackend) TxPoolContent() (map[common.Address]types.Transactions,
return b.eth.txPool.Content()
}
func (b *LesApiBackend) SubscribeTxPreEvent(ch chan<- core.TxsPreEvent) event.Subscription {
return b.eth.txPool.SubscribeTxPreEvent(ch)
func (b *LesApiBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.eth.txPool.SubscribeNewTxsEvent(ch)
}
func (b *LesApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {

View File

@ -321,9 +321,9 @@ func (pool *TxPool) Stop() {
log.Info("Transaction pool stopped")
}
// SubscribeTxPreEvent registers a subscription of core.TxsPreEvent and
// SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and
// starts sending event to the given channel.
func (pool *TxPool) SubscribeTxPreEvent(ch chan<- core.TxsPreEvent) event.Subscription {
func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch))
}
@ -412,7 +412,7 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
// Notify the subscribers. This event is posted in a goroutine
// because it's possible that somewhere during the post "Remove transaction"
// gets called which will then wait for the global tx pool lock and deadlock.
go self.txFeed.Send(core.TxsPreEvent{types.Transactions{tx}})
go self.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}})
}
// Print a log message if low enough level is set

View File

@ -42,7 +42,7 @@ const (
resultQueueSize = 10
miningLogAtDepth = 5
// txChanSize is the size of channel listening to TxsPreEvent.
// txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
@ -71,7 +71,7 @@ type Work struct {
family *set.Set // family set (used for checking uncle invalidity)
uncles *set.Set // uncle set
tcount int // tx count in cycle
gasPool *core.GasPool // available gas used to pack transaction.
gasPool *core.GasPool // available gas used to pack transactions
Block *types.Block // the new block
@ -96,7 +96,7 @@ type worker struct {
// update loop
mux *event.TypeMux
txsCh chan core.TxsPreEvent
txsCh chan core.NewTxsEvent
txsSub event.Subscription
chainHeadCh chan core.ChainHeadEvent
chainHeadSub event.Subscription
@ -138,7 +138,7 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com
engine: engine,
eth: eth,
mux: mux,
txsCh: make(chan core.TxsPreEvent, txChanSize),
txsCh: make(chan core.NewTxsEvent, txChanSize),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
chainDb: eth.ChainDb(),
@ -150,8 +150,8 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com
agents: make(map[Agent]struct{}),
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
}
// Subscribe TxsPreEvent for tx pool
worker.txsSub = eth.TxPool().SubscribeTxPreEvent(worker.txsCh)
// Subscribe NewTxsEvent for tx pool
worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh)
// Subscribe events for blockchain
worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh)
worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh)
@ -259,7 +259,7 @@ func (self *worker) update() {
self.possibleUncles[ev.Block.Hash()] = ev.Block
self.uncleMu.Unlock()
// Handle TxsPreEvent
// Handle NewTxsEvent
case ev := <-self.txsCh:
// Apply transactions to the pending state if we're not mining.
//
@ -538,7 +538,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
for {
// If we don't have enough gas for any further transactions then we're done
if env.gasPool.Gas() < params.TxGas {
log.Trace("Not enough gas for further transactions", "gp", env.gasPool)
log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
break
}
// Retrieve the next transaction and abort if all done