diff options
-rw-r--r-- | cmd/geth/main.go | 7 | ||||
-rw-r--r-- | cmd/geth/usage.go | 12 | ||||
-rw-r--r-- | cmd/utils/flags.go | 120 | ||||
-rw-r--r-- | core/tx_list.go | 4 | ||||
-rw-r--r-- | core/tx_list_test.go | 2 | ||||
-rw-r--r-- | core/tx_pool.go | 101 | ||||
-rw-r--r-- | core/tx_pool_test.go | 88 | ||||
-rw-r--r-- | eth/backend.go | 2 | ||||
-rw-r--r-- | eth/config.go | 4 | ||||
-rw-r--r-- | eth/gen_config.go | 6 |
10 files changed, 241 insertions, 105 deletions
diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 56652f3bd..cc481796f 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -66,6 +66,13 @@ var ( utils.EthashDatasetDirFlag, utils.EthashDatasetsInMemoryFlag, utils.EthashDatasetsOnDiskFlag, + utils.TxPoolPriceLimitFlag, + utils.TxPoolPriceBumpFlag, + utils.TxPoolAccountSlotsFlag, + utils.TxPoolGlobalSlotsFlag, + utils.TxPoolAccountQueueFlag, + utils.TxPoolGlobalQueueFlag, + utils.TxPoolLifetimeFlag, utils.FastSyncFlag, utils.LightModeFlag, utils.SyncModeFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 403e93a9d..2ba504fdd 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -93,6 +93,18 @@ var AppHelpFlagGroups = []flagGroup{ }, }, { + Name: "TRANSACTION POOL", + Flags: []cli.Flag{ + utils.TxPoolPriceLimitFlag, + utils.TxPoolPriceBumpFlag, + utils.TxPoolAccountSlotsFlag, + utils.TxPoolGlobalSlotsFlag, + utils.TxPoolAccountQueueFlag, + utils.TxPoolGlobalQueueFlag, + utils.TxPoolLifetimeFlag, + }, + }, + { Name: "PERFORMANCE TUNING", Flags: []cli.Flag{ utils.CacheFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 863761e8c..3c97cd3bb 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -123,35 +123,6 @@ var ( Name: "nousb", Usage: "Disables monitoring for and managine USB hardware wallets", } - EthashCacheDirFlag = DirectoryFlag{ - Name: "ethash.cachedir", - Usage: "Directory to store the ethash verification caches (default = inside the datadir)", - } - EthashCachesInMemoryFlag = cli.IntFlag{ - Name: "ethash.cachesinmem", - Usage: "Number of recent ethash caches to keep in memory (16MB each)", - Value: eth.DefaultConfig.EthashCachesInMem, - } - EthashCachesOnDiskFlag = cli.IntFlag{ - Name: "ethash.cachesondisk", - Usage: "Number of recent ethash caches to keep on disk (16MB each)", - Value: eth.DefaultConfig.EthashCachesOnDisk, - } - EthashDatasetDirFlag = DirectoryFlag{ - Name: "ethash.dagdir", - Usage: "Directory to store the ethash mining DAGs (default = inside home folder)", - Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir}, - } - EthashDatasetsInMemoryFlag = cli.IntFlag{ - Name: "ethash.dagsinmem", - Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", - Value: eth.DefaultConfig.EthashDatasetsInMem, - } - EthashDatasetsOnDiskFlag = cli.IntFlag{ - Name: "ethash.dagsondisk", - Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", - Value: eth.DefaultConfig.EthashDatasetsOnDisk, - } NetworkIdFlag = cli.Uint64Flag{ Name: "networkid", Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)", @@ -207,6 +178,72 @@ var ( Name: "lightkdf", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", } + // Ethash settings + EthashCacheDirFlag = DirectoryFlag{ + Name: "ethash.cachedir", + Usage: "Directory to store the ethash verification caches (default = inside the datadir)", + } + EthashCachesInMemoryFlag = cli.IntFlag{ + Name: "ethash.cachesinmem", + Usage: "Number of recent ethash caches to keep in memory (16MB each)", + Value: eth.DefaultConfig.EthashCachesInMem, + } + EthashCachesOnDiskFlag = cli.IntFlag{ + Name: "ethash.cachesondisk", + Usage: "Number of recent ethash caches to keep on disk (16MB each)", + Value: eth.DefaultConfig.EthashCachesOnDisk, + } + EthashDatasetDirFlag = DirectoryFlag{ + Name: "ethash.dagdir", + Usage: "Directory to store the ethash mining DAGs (default = inside home folder)", + Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir}, + } + EthashDatasetsInMemoryFlag = cli.IntFlag{ + Name: "ethash.dagsinmem", + Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", + Value: eth.DefaultConfig.EthashDatasetsInMem, + } + EthashDatasetsOnDiskFlag = cli.IntFlag{ + Name: "ethash.dagsondisk", + Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", + Value: eth.DefaultConfig.EthashDatasetsOnDisk, + } + // Transaction pool settings + TxPoolPriceLimitFlag = cli.Uint64Flag{ + Name: "txpool.pricelimit", + Usage: "Minimum gas price limit to enforce for acceptance into the pool", + Value: eth.DefaultConfig.TxPool.PriceLimit, + } + TxPoolPriceBumpFlag = cli.Uint64Flag{ + Name: "txpool.pricebump", + Usage: "Price bump percentage to replace an already existing transaction", + Value: eth.DefaultConfig.TxPool.PriceBump, + } + TxPoolAccountSlotsFlag = cli.Uint64Flag{ + Name: "txpool.accountslots", + Usage: "Minimum number of executable transaction slots guaranteed per account", + Value: eth.DefaultConfig.TxPool.AccountSlots, + } + TxPoolGlobalSlotsFlag = cli.Uint64Flag{ + Name: "txpool.globalslots", + Usage: "Maximum number of executable transaction slots for all accounts", + Value: eth.DefaultConfig.TxPool.GlobalSlots, + } + TxPoolAccountQueueFlag = cli.Uint64Flag{ + Name: "txpool.accountqueue", + Usage: "Maximum number of non-executable transaction slots permitted per account", + Value: eth.DefaultConfig.TxPool.AccountQueue, + } + TxPoolGlobalQueueFlag = cli.Uint64Flag{ + Name: "txpool.globalqueue", + Usage: "Maximum number of non-executable transaction slots for all accounts", + Value: eth.DefaultConfig.TxPool.GlobalQueue, + } + TxPoolLifetimeFlag = cli.DurationFlag{ + Name: "txpool.lifetime", + Usage: "Maximum amount of time non-executable transaction are queued", + Value: eth.DefaultConfig.TxPool.Lifetime, + } // Performance tuning settings CacheFlag = cli.IntFlag{ Name: "cache", @@ -784,6 +821,30 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) { } } +func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { + if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) { + cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name) + } + if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) { + cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name) + } + if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) { + cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name) + } + if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) { + cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name) + } + if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) { + cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name) + } + if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) { + cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name) + } + if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) { + cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name) + } +} + func setEthash(ctx *cli.Context, cfg *eth.Config) { if ctx.GlobalIsSet(EthashCacheDirFlag.Name) { cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name) @@ -826,6 +887,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) setEtherbase(ctx, ks, cfg) setGPO(ctx, &cfg.GPO) + setTxPool(ctx, &cfg.TxPool) setEthash(ctx, cfg) switch { diff --git a/core/tx_list.go b/core/tx_list.go index eb380da0b..08d7d80e8 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -246,11 +246,11 @@ func (l *txList) Overlaps(tx *types.Transaction) bool { // // If the new transaction is accepted into the list, the lists' cost threshold // is also potentially updated. -func (l *txList) Add(tx *types.Transaction) (bool, *types.Transaction) { +func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) { // If there's an older better transaction, abort old := l.txs.Get(tx.Nonce()) if old != nil { - threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+minPriceBumpPercent)), big.NewInt(100)) + threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+int64(priceBump))), big.NewInt(100)) if threshold.Cmp(tx.GasPrice()) >= 0 { return false, nil } diff --git a/core/tx_list_test.go b/core/tx_list_test.go index 92b211937..b4f0b5228 100644 --- a/core/tx_list_test.go +++ b/core/tx_list_test.go @@ -38,7 +38,7 @@ func TestStrictTxListAdd(t *testing.T) { // Insert the transactions in a random order list := newTxList(true) for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v]) + list.Add(txs[v], DefaultTxPoolConfig.PriceBump) } // Verify internal state if len(list.txs.items) != len(txs) { diff --git a/core/tx_pool.go b/core/tx_pool.go index 6f67aaa0a..7aea02101 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -48,14 +48,8 @@ var ( ) var ( - minPendingPerAccount = uint64(16) // Min number of guaranteed transaction slots per address - maxPendingTotal = uint64(4096) // Max limit of pending transactions from all accounts (soft) - maxQueuedPerAccount = uint64(64) // Max limit of queued transactions per address - maxQueuedTotal = uint64(1024) // Max limit of queued transactions from all accounts - maxQueuedLifetime = 3 * time.Hour // Max amount of time transactions from idle accounts are queued - minPriceBumpPercent = int64(10) // Minimum price bump needed to replace an old transaction - evictionInterval = time.Minute // Time interval to check for evictable transactions - statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats + evictionInterval = time.Minute // Time interval to check for evictable transactions + statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats ) var ( @@ -78,6 +72,48 @@ var ( type stateFn func() (*state.StateDB, error) +// TxPoolConfig are the configuration parameters of the transaction pool. +type TxPoolConfig struct { + PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool + PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) + + AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account + GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts + AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account + GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts + + Lifetime time.Duration // Maximum amount of time non-executable transaction are queued +} + +// DefaultTxPoolConfig contains the default configurations for the transaction +// pool. +var DefaultTxPoolConfig = TxPoolConfig{ + PriceLimit: 1, + PriceBump: 10, + + AccountSlots: 16, + GlobalSlots: 4096, + AccountQueue: 64, + GlobalQueue: 1024, + + Lifetime: 3 * time.Hour, +} + +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (config *TxPoolConfig) sanitize() TxPoolConfig { + conf := *config + if conf.PriceLimit < 1 { + log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) + conf.PriceLimit = DefaultTxPoolConfig.PriceLimit + } + if conf.PriceBump < 1 { + log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) + conf.PriceBump = DefaultTxPoolConfig.PriceBump + } + return conf +} + // TxPool contains all currently known transactions. Transactions // enter the pool when they are received from the network or submitted // locally. They exit the pool when they are included in the blockchain. @@ -86,7 +122,8 @@ type stateFn func() (*state.StateDB, error) // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type TxPool struct { - config *params.ChainConfig + config TxPoolConfig + chainconfig *params.ChainConfig currentState stateFn // The state function which will allow us to do some pre checks pendingState *state.ManagedState gasLimit func() *big.Int // The current gas limit function callback @@ -109,10 +146,17 @@ type TxPool struct { homestead bool } -func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool { +// NewTxPool creates a new transaction pool to gather, sort and filter inbound +// trnsactions from the network. +func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool { + // Sanitize the input to ensure no vulnerable gas prices are set + config = (&config).sanitize() + + // Create the transaction pool with its initial settings pool := &TxPool{ config: config, - signer: types.NewEIP155Signer(config.ChainId), + chainconfig: chainconfig, + signer: types.NewEIP155Signer(chainconfig.ChainId), pending: make(map[common.Address]*txList), queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), @@ -120,7 +164,7 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState eventMux: eventMux, currentState: currentStateFn, gasLimit: gasLimitFn, - gasPrice: big.NewInt(1), + gasPrice: new(big.Int).SetUint64(config.PriceLimit), pendingState: nil, locals: newTxSet(), events: eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}), @@ -129,6 +173,7 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState pool.priced = newTxPricedList(&pool.all) pool.resetState() + // Start the various events loops and return pool.wg.Add(2) go pool.eventLoop() go pool.expirationLoop() @@ -159,7 +204,7 @@ func (pool *TxPool) eventLoop() { case ChainHeadEvent: pool.mu.Lock() if ev.Block != nil { - if pool.config.IsHomestead(ev.Block.Number()) { + if pool.chainconfig.IsHomestead(ev.Block.Number()) { pool.homestead = true } } @@ -388,7 +433,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) { return false, err } // If the transaction pool is full, discard underpriced transactions - if uint64(len(pool.all)) >= maxPendingTotal+maxQueuedTotal { + if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if pool.priced.Underpriced(tx, pool.locals) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) @@ -396,7 +441,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) { return false, ErrUnderpriced } // New transaction is better than our worse ones, make room for it - drop := pool.priced.Discard(len(pool.all)-int(maxPendingTotal+maxQueuedTotal-1), pool.locals) + drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) underpricedTxCounter.Inc(1) @@ -407,7 +452,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) { from, _ := types.Sender(pool.signer, tx) // already validated if list := pool.pending[from]; list != nil && list.Overlaps(tx) { // Nonce already pending, check if required price bump is met - inserted, old := list.Add(tx) + inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { pendingDiscardCounter.Inc(1) return false, ErrReplaceUnderpriced @@ -442,7 +487,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er if pool.queue[from] == nil { pool.queue[from] = newTxList(false) } - inserted, old := pool.queue[from].Add(tx) + inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this queuedDiscardCounter.Inc(1) @@ -469,7 +514,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T } list := pool.pending[addr] - inserted, old := list.Add(tx) + inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this delete(pool.all, hash) @@ -644,7 +689,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { pool.promoteTx(addr, hash, tx) } // Drop all transactions over the allowed limit - for _, tx := range list.Cap(int(maxQueuedPerAccount)) { + for _, tx := range list.Cap(int(pool.config.AccountQueue)) { hash := tx.Hash() log.Trace("Removed cap-exceeding queued transaction", "hash", hash) delete(pool.all, hash) @@ -663,13 +708,13 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { for _, list := range pool.pending { pending += uint64(list.Len()) } - if pending > maxPendingTotal { + if pending > pool.config.GlobalSlots { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first spammers := prque.New() for addr, list := range pool.pending { // Only evict transactions from high rollers - if uint64(list.Len()) > minPendingPerAccount { + if uint64(list.Len()) > pool.config.AccountSlots { // Skip local accounts as pools should maintain backlogs for themselves for _, tx := range list.txs.items { if !pool.locals.contains(tx.Hash()) { @@ -681,7 +726,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { } // Gradually drop transactions from offenders offenders := []common.Address{} - for pending > maxPendingTotal && !spammers.Empty() { + for pending > pool.config.GlobalSlots && !spammers.Empty() { // Retrieve the next offender if not local address offender, _ := spammers.Pop() offenders = append(offenders, offender.(common.Address)) @@ -692,7 +737,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { threshold := pool.pending[offender.(common.Address)].Len() // Iteratively reduce all offenders until below limit or threshold reached - for pending > maxPendingTotal && pool.pending[offenders[len(offenders)-2]].Len() > threshold { + for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { for i := 0; i < len(offenders)-1; i++ { list := pool.pending[offenders[i]] list.Cap(list.Len() - 1) @@ -702,8 +747,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { } } // If still above threshold, reduce to limit or min allowance - if pending > maxPendingTotal && len(offenders) > 0 { - for pending > maxPendingTotal && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > minPendingPerAccount { + if pending > pool.config.GlobalSlots && len(offenders) > 0 { + for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { for _, addr := range offenders { list := pool.pending[addr] list.Cap(list.Len() - 1) @@ -714,7 +759,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { pendingRLCounter.Inc(int64(pendingBeforeCap - pending)) } // If we've queued more transactions than the hard limit, drop oldest ones - if queued > maxQueuedTotal { + if queued > pool.config.GlobalQueue { // Sort all accounts with queued transactions by heartbeat addresses := make(addresssByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { @@ -723,7 +768,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { sort.Sort(addresses) // Drop transactions until the total is below the limit - for drop := queued - maxQueuedTotal; drop > 0; { + for drop := queued - pool.config.GlobalQueue; drop > 0; { addr := addresses[len(addresses)-1] list := pool.queue[addr.address] @@ -800,7 +845,7 @@ func (pool *TxPool) expirationLoop() { case <-evict.C: pool.mu.Lock() for addr := range pool.queue { - if time.Since(pool.beats[addr]) > maxQueuedLifetime { + if time.Since(pool.beats[addr]) > pool.config.Lifetime { for _, tx := range pool.queue[addr].Flatten() { pool.removeTx(tx.Hash()) } diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index e773daa2c..63e0144bd 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -46,7 +46,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) { statedb, _ := state.New(common.Hash{}, db) key, _ := crypto.GenerateKey() - newPool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) + newPool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) newPool.resetState() return newPool, key @@ -95,7 +95,7 @@ func TestStateChangeDuringPoolReset(t *testing.T) { gasLimitFunc := func() *big.Int { return big.NewInt(1000000000) } - txpool := NewTxPool(params.TestChainConfig, mux, stateFunc, gasLimitFunc) + txpool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, mux, stateFunc, gasLimitFunc) txpool.resetState() nonce := txpool.State().GetNonce(address) @@ -533,25 +533,25 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { pool.resetState() // Keep queuing up transactions and make sure all above a limit are dropped - for i := uint64(1); i <= maxQueuedPerAccount+5; i++ { + for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue+5; i++ { if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } if len(pool.pending) != 0 { t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) } - if i <= maxQueuedPerAccount { + if i <= DefaultTxPoolConfig.AccountQueue { if pool.queue[account].Len() != int(i) { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) } } else { - if pool.queue[account].Len() != int(maxQueuedPerAccount) { - t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), maxQueuedPerAccount) + if pool.queue[account].Len() != int(DefaultTxPoolConfig.AccountQueue) { + t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), DefaultTxPoolConfig.AccountQueue) } } } - if len(pool.all) != int(maxQueuedPerAccount) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount) + if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue) { + t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue) } } @@ -559,14 +559,14 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { // some threshold, the higher transactions are dropped to prevent DOS attacks. func TestTransactionQueueGlobalLimiting(t *testing.T) { // Reduce the queue limits to shorten test time - defer func(old uint64) { maxQueuedTotal = old }(maxQueuedTotal) - maxQueuedTotal = maxQueuedPerAccount * 3 + defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue) + DefaultTxPoolConfig.GlobalQueue = DefaultTxPoolConfig.AccountQueue * 3 // Create the pool to test the limit enforcement with db, _ := ethdb.NewMemDatabase() statedb, _ := state.New(common.Hash{}, db) - pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) + pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) pool.resetState() // Create a number of test accounts and fund them @@ -580,7 +580,7 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) { // Generate and queue a batch of transactions nonces := make(map[common.Address]uint64) - txs := make(types.Transactions, 0, 3*maxQueuedTotal) + txs := make(types.Transactions, 0, 3*DefaultTxPoolConfig.GlobalQueue) for len(txs) < cap(txs) { key := keys[rand.Intn(len(keys))] addr := crypto.PubkeyToAddress(key.PublicKey) @@ -593,13 +593,13 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) { queued := 0 for addr, list := range pool.queue { - if list.Len() > int(maxQueuedPerAccount) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), maxQueuedPerAccount) + if list.Len() > int(DefaultTxPoolConfig.AccountQueue) { + t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), DefaultTxPoolConfig.AccountQueue) } queued += list.Len() } - if queued > int(maxQueuedTotal) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, maxQueuedTotal) + if queued > int(DefaultTxPoolConfig.GlobalQueue) { + t.Fatalf("total transactions overflow allowance: %d > %d", queued, DefaultTxPoolConfig.GlobalQueue) } } @@ -608,9 +608,9 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) { // on shuffling them around. func TestTransactionQueueTimeLimiting(t *testing.T) { // Reduce the queue limits to shorten test time - defer func(old time.Duration) { maxQueuedLifetime = old }(maxQueuedLifetime) + defer func(old time.Duration) { DefaultTxPoolConfig.Lifetime = old }(DefaultTxPoolConfig.Lifetime) defer func(old time.Duration) { evictionInterval = old }(evictionInterval) - maxQueuedLifetime = time.Second + DefaultTxPoolConfig.Lifetime = time.Second evictionInterval = time.Second // Create a test account and fund it @@ -621,7 +621,7 @@ func TestTransactionQueueTimeLimiting(t *testing.T) { state.AddBalance(account, big.NewInt(1000000)) // Queue up a batch of transactions - for i := uint64(1); i <= maxQueuedPerAccount; i++ { + for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue; i++ { if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } @@ -646,7 +646,7 @@ func TestTransactionPendingLimiting(t *testing.T) { pool.resetState() // Keep queuing up transactions and make sure all above a limit are dropped - for i := uint64(0); i < maxQueuedPerAccount+5; i++ { + for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ { if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } @@ -657,8 +657,8 @@ func TestTransactionPendingLimiting(t *testing.T) { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) } } - if len(pool.all) != int(maxQueuedPerAccount+5) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount+5) + if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue+5) { + t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue+5) } } @@ -674,7 +674,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { state1, _ := pool1.currentState() state1.AddBalance(account1, big.NewInt(1000000)) - for i := uint64(0); i < maxQueuedPerAccount+5; i++ { + for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ { if err := pool1.Add(transaction(origin+i, big.NewInt(100000), key1)); err != nil { t.Fatalf("tx %d: failed to add transaction: %v", i, err) } @@ -686,7 +686,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { state2.AddBalance(account2, big.NewInt(1000000)) txns := []*types.Transaction{} - for i := uint64(0); i < maxQueuedPerAccount+5; i++ { + for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ { txns = append(txns, transaction(origin+i, big.NewInt(100000), key2)) } pool2.AddBatch(txns) @@ -708,14 +708,14 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { // attacks. func TestTransactionPendingGlobalLimiting(t *testing.T) { // Reduce the queue limits to shorten test time - defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal) - maxPendingTotal = minPendingPerAccount * 10 + defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots) + DefaultTxPoolConfig.GlobalSlots = DefaultTxPoolConfig.AccountSlots * 10 // Create the pool to test the limit enforcement with db, _ := ethdb.NewMemDatabase() statedb, _ := state.New(common.Hash{}, db) - pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) + pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) pool.resetState() // Create a number of test accounts and fund them @@ -732,7 +732,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { txs := types.Transactions{} for _, key := range keys { addr := crypto.PubkeyToAddress(key.PublicKey) - for j := 0; j < int(maxPendingTotal)/len(keys)*2; j++ { + for j := 0; j < int(DefaultTxPoolConfig.GlobalSlots)/len(keys)*2; j++ { txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key)) nonces[addr]++ } @@ -744,8 +744,8 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { for _, list := range pool.pending { pending += list.Len() } - if pending > int(maxPendingTotal) { - t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, maxPendingTotal) + if pending > int(DefaultTxPoolConfig.GlobalSlots) { + t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, DefaultTxPoolConfig.GlobalSlots) } } @@ -754,14 +754,14 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { // the transactions are still kept. func TestTransactionPendingMinimumAllowance(t *testing.T) { // Reduce the queue limits to shorten test time - defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal) - maxPendingTotal = 0 + defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots) + DefaultTxPoolConfig.GlobalSlots = 0 // Create the pool to test the limit enforcement with db, _ := ethdb.NewMemDatabase() statedb, _ := state.New(common.Hash{}, db) - pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) + pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) pool.resetState() // Create a number of test accounts and fund them @@ -778,7 +778,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { txs := types.Transactions{} for _, key := range keys { addr := crypto.PubkeyToAddress(key.PublicKey) - for j := 0; j < int(minPendingPerAccount)*2; j++ { + for j := 0; j < int(DefaultTxPoolConfig.AccountSlots)*2; j++ { txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key)) nonces[addr]++ } @@ -787,8 +787,8 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { pool.AddBatch(txs) for addr, list := range pool.pending { - if list.Len() != int(minPendingPerAccount) { - t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), minPendingPerAccount) + if list.Len() != int(DefaultTxPoolConfig.AccountSlots) { + t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), DefaultTxPoolConfig.AccountSlots) } } } @@ -803,7 +803,7 @@ func TestTransactionPoolRepricing(t *testing.T) { db, _ := ethdb.NewMemDatabase() statedb, _ := state.New(common.Hash{}, db) - pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) + pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) pool.resetState() // Create a number of test accounts and fund them @@ -874,17 +874,17 @@ func TestTransactionPoolRepricing(t *testing.T) { // Note, local transactions are never allowed to be dropped. func TestTransactionPoolUnderpricing(t *testing.T) { // Reduce the queue limits to shorten test time - defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal) - maxPendingTotal = 2 + defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots) + DefaultTxPoolConfig.GlobalSlots = 2 - defer func(old uint64) { maxQueuedTotal = old }(maxQueuedTotal) - maxQueuedTotal = 2 + defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue) + DefaultTxPoolConfig.GlobalQueue = 2 // Create the pool to test the pricing enforcement with db, _ := ethdb.NewMemDatabase() statedb, _ := state.New(common.Hash{}, db) - pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) + pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) pool.resetState() // Create a number of test accounts and fund them @@ -960,7 +960,7 @@ func TestTransactionReplacement(t *testing.T) { db, _ := ethdb.NewMemDatabase() statedb, _ := state.New(common.Hash{}, db) - pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) + pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) pool.resetState() // Create a a test account to add transactions with @@ -971,7 +971,7 @@ func TestTransactionReplacement(t *testing.T) { // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) price := int64(100) - threshold := (price * (100 + minPriceBumpPercent)) / 100 + threshold := (price * (100 + int64(DefaultTxPoolConfig.PriceBump))) / 100 if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) diff --git a/eth/backend.go b/eth/backend.go index dc813ed0d..639792333 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -150,7 +150,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { core.WriteChainConfig(chainDb, genesisHash, chainConfig) } - newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) + newPool := core.NewTxPool(config.TxPool, eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) eth.txPool = newPool maxPeers := config.MaxPeers diff --git a/eth/config.go b/eth/config.go index 22c09b170..4109cff8b 100644 --- a/eth/config.go +++ b/eth/config.go @@ -44,6 +44,7 @@ var DefaultConfig = Config{ DatabaseCache: 128, GasPrice: big.NewInt(18 * params.Shannon), + TxPool: core.DefaultTxPoolConfig, GPO: gasprice.Config{ Blocks: 10, Percentile: 50, @@ -99,6 +100,9 @@ type Config struct { EthashDatasetsInMem int EthashDatasetsOnDisk int + // Transaction pool options + TxPool core.TxPoolConfig + // Gas Price Oracle options GPO gasprice.Config diff --git a/eth/gen_config.go b/eth/gen_config.go index 955facf8f..477479419 100644 --- a/eth/gen_config.go +++ b/eth/gen_config.go @@ -33,6 +33,7 @@ func (c Config) MarshalTOML() (interface{}, error) { EthashDatasetDir string EthashDatasetsInMem int EthashDatasetsOnDisk int + TxPool core.TxPoolConfig GPO gasprice.Config EnablePreimageRecording bool DocRoot string `toml:"-"` @@ -60,6 +61,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.EthashDatasetDir = c.EthashDatasetDir enc.EthashDatasetsInMem = c.EthashDatasetsInMem enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk + enc.TxPool = c.TxPool enc.GPO = c.GPO enc.EnablePreimageRecording = c.EnablePreimageRecording enc.DocRoot = c.DocRoot @@ -90,6 +92,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { EthashDatasetDir *string EthashDatasetsInMem *int EthashDatasetsOnDisk *int + TxPool *core.TxPoolConfig GPO *gasprice.Config EnablePreimageRecording *bool DocRoot *string `toml:"-"` @@ -158,6 +161,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.EthashDatasetsOnDisk != nil { c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk } + if dec.TxPool != nil { + c.TxPool = *dec.TxPool + } if dec.GPO != nil { c.GPO = *dec.GPO } |