From 362d09404074a7c92a56f50cb466922cf985990b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 25 Dec 2023 12:29:33 +0300 Subject: [PATCH 1/8] rename tx handlers --- plugin/evm/block_builder.go | 4 ++-- plugin/evm/message/codec.go | 2 +- plugin/evm/message/handler.go | 6 +++--- plugin/evm/message/handler_test.go | 12 ++++++------ plugin/evm/message/message.go | 18 +++++++++--------- plugin/evm/message/message_test.go | 6 +++--- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index ac78237670..68cdbf8083 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -157,9 +157,9 @@ func (b *blockBuilder) awaitSubmittedTxs() { b.signalTxsReady() if b.gossiper != nil && len(ethTxsEvent.Txs) > 0 { - // [GossipTxs] will block unless [gossiper.txsToGossipChan] (an + // [GossipEthTxs] will block unless [gossiper.ethTxsToGossipChan] (an // unbuffered channel) is listened on - if err := b.gossiper.GossipTxs(ethTxsEvent.Txs); err != nil { + if err := b.gossiper.GossipEthTxs(ethTxsEvent.Txs); err != nil { log.Warn( "failed to gossip new eth transactions", "err", err, diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index 91db9633ab..e49838cd56 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -27,7 +27,7 @@ func init() { errs := wrappers.Errs{} errs.Add( // Gossip types - c.RegisterType(TxsGossip{}), + c.RegisterType(EthTxsGossip{}), // Types for state sync frontier consensus c.RegisterType(SyncSummary{}), diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index b5933f28f3..c0617b0312 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -19,13 +19,13 @@ var ( // GossipHandler handles incoming gossip messages type GossipHandler interface { - HandleTxs(nodeID ids.NodeID, msg TxsGossip) error + HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error } type NoopMempoolGossipHandler struct{} -func (NoopMempoolGossipHandler) HandleTxs(nodeID ids.NodeID, _ TxsGossip) error { - log.Debug("dropping unexpected Txs message", "peerID", nodeID) +func (NoopMempoolGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error { + log.Debug("dropping unexpected EthTxsGossip message", "peerID", nodeID) return nil } diff --git a/plugin/evm/message/handler_test.go b/plugin/evm/message/handler_test.go index 37ca82ea4a..dad0ec2b90 100644 --- a/plugin/evm/message/handler_test.go +++ b/plugin/evm/message/handler_test.go @@ -12,11 +12,11 @@ import ( ) type CounterHandler struct { - Txs int + EthTxs int } -func (h *CounterHandler) HandleTxs(ids.NodeID, TxsGossip) error { - h.Txs++ +func (h *CounterHandler) HandleEthTxs(ids.NodeID, EthTxsGossip) error { + h.EthTxs++ return nil } @@ -24,11 +24,11 @@ func TestHandleTxs(t *testing.T) { assert := assert.New(t) handler := CounterHandler{} - msg := TxsGossip{} + msg := EthTxsGossip{} err := msg.Handle(&handler, ids.EmptyNodeID) assert.NoError(err) - assert.Equal(1, handler.Txs) + assert.Equal(1, handler.EthTxs) } func TestNoopHandler(t *testing.T) { @@ -36,6 +36,6 @@ func TestNoopHandler(t *testing.T) { handler := NoopMempoolGossipHandler{} - err := handler.HandleTxs(ids.EmptyNodeID, TxsGossip{}) + err := handler.HandleEthTxs(ids.EmptyNodeID, EthTxsGossip{}) assert.NoError(err) } diff --git a/plugin/evm/message/message.go b/plugin/evm/message/message.go index 89e584179f..35887911c9 100644 --- a/plugin/evm/message/message.go +++ b/plugin/evm/message/message.go @@ -14,15 +14,15 @@ import ( ) const ( - // TxMsgSoftCapSize is the ideal size of encoded transaction bytes we send in - // any [Txs] message. We do not limit inbound messages to + // EthMsgSoftCapSize is the ideal size of encoded transaction bytes we send in + // any [EthTxsGossip] or [AtomicTxGossip] message. We do not limit inbound messages to // this size, however. Max inbound message size is enforced by the codec // (512KB). - TxMsgSoftCapSize = 64 * units.KiB + EthMsgSoftCapSize = 64 * units.KiB ) var ( - _ GossipMessage = TxsGossip{} + _ GossipMessage = EthTxsGossip{} errUnexpectedCodecVersion = errors.New("unexpected codec version") ) @@ -35,16 +35,16 @@ type GossipMessage interface { Handle(handler GossipHandler, nodeID ids.NodeID) error } -type TxsGossip struct { +type EthTxsGossip struct { Txs []byte `serialize:"true"` } -func (msg TxsGossip) Handle(handler GossipHandler, nodeID ids.NodeID) error { - return handler.HandleTxs(nodeID, msg) +func (msg EthTxsGossip) Handle(handler GossipHandler, nodeID ids.NodeID) error { + return handler.HandleEthTxs(nodeID, msg) } -func (msg TxsGossip) String() string { - return fmt.Sprintf("TxsGossip(Len=%d)", len(msg.Txs)) +func (msg EthTxsGossip) String() string { + return fmt.Sprintf("EthTxsGossip(Len=%d)", len(msg.Txs)) } func ParseGossipMessage(codec codec.Manager, bytes []byte) (GossipMessage, error) { diff --git a/plugin/evm/message/message_test.go b/plugin/evm/message/message_test.go index 29f47f226d..0a18fde784 100644 --- a/plugin/evm/message/message_test.go +++ b/plugin/evm/message/message_test.go @@ -20,7 +20,7 @@ func TestMarshalTxs(t *testing.T) { base64EthTxGossip := "AAAAAAAAAAAABGJsYWg=" msg := []byte("blah") - builtMsg := TxsGossip{ + builtMsg := EthTxsGossip{ Txs: msg, } builtMsgBytes, err := BuildGossipMessage(Codec, builtMsg) @@ -30,7 +30,7 @@ func TestMarshalTxs(t *testing.T) { parsedMsgIntf, err := ParseGossipMessage(Codec, builtMsgBytes) assert.NoError(err) - parsedMsg, ok := parsedMsgIntf.(TxsGossip) + parsedMsg, ok := parsedMsgIntf.(EthTxsGossip) assert.True(ok) assert.Equal(msg, parsedMsg.Txs) @@ -39,7 +39,7 @@ func TestMarshalTxs(t *testing.T) { func TestTxsTooLarge(t *testing.T) { assert := assert.New(t) - builtMsg := TxsGossip{ + builtMsg := EthTxsGossip{ Txs: utils.RandomBytes(1024 * units.KiB), } _, err := BuildGossipMessage(Codec, builtMsg) From 30598a9cd850a6bc71ab8888aadf9dc2078fdd93 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 25 Dec 2023 12:29:42 +0300 Subject: [PATCH 2/8] add p2p sdk handlers --- go.mod | 30 ++--- go.sum | 61 +++++----- peer/network.go | 89 ++++++++------- peer/network_test.go | 78 +++++++++---- plugin/evm/gossip.go | 177 +++++++++++++++++++++++++++++ plugin/evm/gossip_mempool.go | 117 -------------------- plugin/evm/gossip_test.go | 26 +++++ plugin/evm/gossiper.go | 137 ++++++++++++++--------- plugin/evm/gossipper_test.go | 72 ++++++------ plugin/evm/syncervm_test.go | 2 +- plugin/evm/tx_gossip_test.go | 208 ++++++++++++++++++++++++++++------- plugin/evm/vm.go | 125 ++++++++++++--------- scripts/versions.sh | 2 +- sync/client/client.go | 2 +- x/warp/predicate_test.go | 4 +- 15 files changed, 720 insertions(+), 410 deletions(-) create mode 100644 plugin/evm/gossip.go delete mode 100644 plugin/evm/gossip_mempool.go create mode 100644 plugin/evm/gossip_test.go diff --git a/go.mod b/go.mod index d04b55474e..db5a1f4daf 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,8 @@ go 1.20 require ( github.com/VictoriaMetrics/fastcache v1.10.0 github.com/ava-labs/avalanche-network-runner v1.7.4-0.20231127162258-2f3ceed8ae4b - github.com/ava-labs/avalanchego v1.10.17 + github.com/ava-labs/avalanchego v1.10.18-rc.8 + github.com/ava-labs/coreth v0.12.9-rc.9.0.20231222191417-2e3f762373e9 github.com/cespare/cp v0.1.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/davecgh/go-spew v1.1.1 @@ -29,8 +30,8 @@ require ( github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.16 github.com/olekukonko/tablewriter v0.0.5 - github.com/onsi/ginkgo/v2 v2.8.1 - github.com/onsi/gomega v1.26.0 + github.com/onsi/ginkgo/v2 v2.13.1 + github.com/onsi/gomega v1.29.0 github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model v0.3.0 github.com/shirou/gopsutil v3.21.11+incompatible @@ -44,10 +45,10 @@ require ( github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa go.uber.org/goleak v1.2.1 go.uber.org/mock v0.2.0 - golang.org/x/crypto v0.14.0 - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.13.0 - golang.org/x/text v0.13.0 + golang.org/x/crypto v0.17.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.15.0 + golang.org/x/text v0.14.0 golang.org/x/time v0.1.0 google.golang.org/protobuf v1.31.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -57,7 +58,6 @@ require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/ava-labs/coreth v0.12.9-rc.9 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect @@ -76,18 +76,19 @@ require ( github.com/dlclark/regexp2 v1.7.0 // indirect github.com/ethereum/c-kzg-4844 v0.2.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/gorilla/mux v1.8.0 // indirect @@ -144,10 +145,11 @@ require ( go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/term v0.13.0 // indirect + golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/tools v0.16.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect diff --git a/go.sum b/go.sum index 5ac902dae5..161e2a63a7 100644 --- a/go.sum +++ b/go.sum @@ -61,10 +61,10 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanche-network-runner v1.7.4-0.20231127162258-2f3ceed8ae4b h1:iH6q+S7dmBOYCXrZx+nNlS1HBp72L2msiVCLs39Ls5A= github.com/ava-labs/avalanche-network-runner v1.7.4-0.20231127162258-2f3ceed8ae4b/go.mod h1:aeAm8dgJ1xucQKlYoRDMgYjA0UWGwmaICG9wL0WvseU= -github.com/ava-labs/avalanchego v1.10.17 h1:Ri01nU5ukKC38ZCkCh3namaMZtJkSuv1X/vC13uJguc= -github.com/ava-labs/avalanchego v1.10.17/go.mod h1:A6f3877qlq7bePjCU4T0D60bZGecRMCk15pMpJGOb4Q= -github.com/ava-labs/coreth v0.12.9-rc.9 h1:mvYxABdyPByXwwwIxnTBCiNO23dsE1Kfnd5H106lric= -github.com/ava-labs/coreth v0.12.9-rc.9/go.mod h1:yrf2vEah4Fgj6sJ4UpHewo4DLolwdpf2bJuLRT80PGw= +github.com/ava-labs/avalanchego v1.10.18-rc.8 h1:IBGS1psMY+HNm3w9kQHaHF1+SIssv7qdNV8H50mAGY4= +github.com/ava-labs/avalanchego v1.10.18-rc.8/go.mod h1:duoSU6Xb1HoVhQThAXqcCY7ik7LPWwQHb/YgdauXrfc= +github.com/ava-labs/coreth v0.12.9-rc.9.0.20231222191417-2e3f762373e9 h1:DiJBkm2IJ/My4u5DP4gq2wIbdflFRuZJbDm8DbgNDdg= +github.com/ava-labs/coreth v0.12.9-rc.9.0.20231222191417-2e3f762373e9/go.mod h1:Xftjgk8T46k5/pWSQWcmdPanNl68kTcufd9S4kB58bM= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -220,8 +220,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= @@ -234,6 +234,8 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= @@ -300,8 +302,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -490,16 +492,16 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU= -github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= +github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= +github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= -github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc= @@ -582,6 +584,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -678,8 +681,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -690,8 +693,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= -golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -718,8 +721,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -765,8 +768,8 @@ golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -789,8 +792,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -858,13 +861,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -875,8 +878,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -939,6 +942,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/peer/network.go b/peer/network.go index e10ab73654..3011356093 100644 --- a/peer/network.go +++ b/peer/network.go @@ -78,9 +78,10 @@ type Network interface { // (length of response divided by request time), and with 0 if the response is invalid. TrackBandwidth(nodeID ids.NodeID, bandwidth float64) - // NewAppProtocol reserves a protocol identifier and returns a corresponding - // client to send messages with - NewAppProtocol(protocol uint64, handler p2p.Handler, options ...p2p.ClientOption) (*p2p.Client, error) + // NewClient returns a client to send messages with for the given protocol + NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client + // AddHandler registers a server handler for an application protocol + AddHandler(protocol uint64, handler p2p.Handler) error } // network is an implementation of Network that processes message requests for @@ -92,7 +93,7 @@ type network struct { outstandingRequestHandlers map[uint32]message.ResponseHandler // maps avalanchego requestID => message.ResponseHandler activeAppRequests *semaphore.Weighted // controls maximum number of active outbound requests activeCrossChainRequests *semaphore.Weighted // controls maximum number of active outbound cross chain requests - network *p2p.Network + p2pNetwork *p2p.Network appSender common.AppSender // avalanchego AppSender for sending messages codec codec.Manager // Codec used for parsing messages crossChainCodec codec.Manager // Codec used for parsing cross chain messages @@ -123,7 +124,7 @@ func NewNetwork(p2pNetwork *p2p.Network, appSender common.AppSender, codec codec outstandingRequestHandlers: make(map[uint32]message.ResponseHandler), activeAppRequests: semaphore.NewWeighted(maxActiveAppRequests), activeCrossChainRequests: semaphore.NewWeighted(maxActiveCrossChainRequests), - network: p2pNetwork, + p2pNetwork: p2pNetwork, gossipHandler: message.NoopMempoolGossipHandler{}, appRequestHandler: message.NoopRequestHandler{}, crossChainRequestHandler: message.NoopCrossChainRequestHandler{}, @@ -183,7 +184,7 @@ func (n *network) sendAppRequest(ctx context.Context, nodeID ids.NodeID, request return nil } - log.Trace("sending request to peer", "nodeID", nodeID, "requestLen", len(request)) + log.Debug("sending request to peer", "nodeID", nodeID, "requestLen", len(request)) n.peers.TrackPeer(nodeID) requestID := n.nextRequestID() @@ -200,7 +201,7 @@ func (n *network) sendAppRequest(ctx context.Context, nodeID ids.NodeID, request return err } - log.Trace("sent request message to peer", "nodeID", nodeID, "requestID", requestID) + log.Debug("sent request message to peer", "nodeID", nodeID, "requestID", requestID) return nil } @@ -232,7 +233,7 @@ func (n *network) SendCrossChainRequest(ctx context.Context, chainID ids.ID, req return err } - log.Trace("sent request message to chain", "chainID", chainID, "crossChainRequestID", requestID) + log.Debug("sent request message to chain", "chainID", chainID, "crossChainRequestID", requestID) return nil } @@ -244,21 +245,21 @@ func (n *network) CrossChainAppRequest(ctx context.Context, requestingChainID id return nil } - log.Trace("received CrossChainAppRequest from chain", "requestingChainID", requestingChainID, "requestID", requestID, "requestLen", len(request)) + log.Debug("received CrossChainAppRequest from chain", "requestingChainID", requestingChainID, "requestID", requestID, "requestLen", len(request)) var req message.CrossChainRequest if _, err := n.crossChainCodec.Unmarshal(request, &req); err != nil { - log.Trace("failed to unmarshal CrossChainAppRequest", "requestingChainID", requestingChainID, "requestID", requestID, "requestLen", len(request), "err", err) + log.Debug("failed to unmarshal CrossChainAppRequest", "requestingChainID", requestingChainID, "requestID", requestID, "requestLen", len(request), "err", err) return nil } bufferedDeadline, err := calculateTimeUntilDeadline(deadline, n.crossChainStats) if err != nil { - log.Trace("deadline to process CrossChainAppRequest has expired, skipping", "requestingChainID", requestingChainID, "requestID", requestID, "err", err) + log.Debug("deadline to process CrossChainAppRequest has expired, skipping", "requestingChainID", requestingChainID, "requestID", requestID, "err", err) return nil } - log.Trace("processing incoming CrossChainAppRequest", "requestingChainID", requestingChainID, "requestID", requestID, "req", req) + log.Debug("processing incoming CrossChainAppRequest", "requestingChainID", requestingChainID, "requestID", requestID, "req", req) handleCtx, cancel := context.WithDeadline(context.Background(), bufferedDeadline) defer cancel() @@ -280,13 +281,13 @@ func (n *network) CrossChainAppRequest(ctx context.Context, requestingChainID id // - request times out before a response is provided // If [requestID] is not known, this function will emit a log and return a nil error. // If the response handler returns an error it is propagated as a fatal error. -func (n *network) CrossChainAppRequestFailed(ctx context.Context, respondingChainID ids.ID, requestID uint32) error { - log.Trace("received CrossChainAppRequestFailed from chain", "respondingChainID", respondingChainID, "requestID", requestID) +func (n *network) CrossChainAppRequestFailed(ctx context.Context, respondingChainID ids.ID, requestID uint32, _ *common.AppError) error { + log.Debug("received CrossChainAppRequestFailed from chain", "respondingChainID", respondingChainID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { // Can happen after the network has been closed. - log.Trace("received CrossChainAppRequestFailed to unknown request", "respondingChainID", respondingChainID, "requestID", requestID) + log.Debug("received CrossChainAppRequestFailed to unknown request", "respondingChainID", respondingChainID, "requestID", requestID) return nil } @@ -301,12 +302,12 @@ func (n *network) CrossChainAppRequestFailed(ctx context.Context, respondingChai // If [requestID] is not known, this function will emit a log and return a nil error. // If the response handler returns an error it is propagated as a fatal error. func (n *network) CrossChainAppResponse(ctx context.Context, respondingChainID ids.ID, requestID uint32, response []byte) error { - log.Trace("received CrossChainAppResponse from responding chain", "respondingChainID", respondingChainID, "requestID", requestID) + log.Debug("received CrossChainAppResponse from responding chain", "respondingChainID", respondingChainID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { // Can happen after the network has been closed. - log.Trace("received CrossChainAppResponse to unknown request", "respondingChainID", respondingChainID, "requestID", requestID, "responseLen", len(response)) + log.Debug("received CrossChainAppResponse to unknown request", "respondingChainID", respondingChainID, "requestID", requestID, "responseLen", len(response)) return nil } @@ -326,21 +327,21 @@ func (n *network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u return nil } - log.Trace("received AppRequest from node", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request)) + log.Debug("received AppRequest from node", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request)) var req message.Request if _, err := n.codec.Unmarshal(request, &req); err != nil { - log.Trace("forwarding AppRequest to SDK network", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request), "err", err) - return n.network.AppRequest(ctx, nodeID, requestID, deadline, request) + log.Debug("forwarding AppRequest to SDK network", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request), "err", err) + return n.p2pNetwork.AppRequest(ctx, nodeID, requestID, deadline, request) } bufferedDeadline, err := calculateTimeUntilDeadline(deadline, n.appStats) if err != nil { - log.Trace("deadline to process AppRequest has expired, skipping", "nodeID", nodeID, "requestID", requestID, "err", err) + log.Debug("deadline to process AppRequest has expired, skipping", "nodeID", nodeID, "requestID", requestID, "err", err) return nil } - log.Trace("processing incoming request", "nodeID", nodeID, "requestID", requestID, "req", req) + log.Debug("processing incoming request", "nodeID", nodeID, "requestID", requestID, "req", req) // We make a new context here because we don't want to cancel the context // passed into n.AppSender.SendAppResponse below handleCtx, cancel := context.WithDeadline(context.Background(), bufferedDeadline) @@ -362,12 +363,12 @@ func (n *network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u // If [requestID] is not known, this function will emit a log and return a nil error. // If the response handler returns an error it is propagated as a fatal error. func (n *network) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { - log.Trace("received AppResponse from peer", "nodeID", nodeID, "requestID", requestID) + log.Debug("received AppResponse from peer", "nodeID", nodeID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { - log.Trace("forwarding AppResponse to SDK network", "nodeID", nodeID, "requestID", requestID, "responseLen", len(response)) - return n.network.AppResponse(ctx, nodeID, requestID, response) + log.Debug("forwarding AppResponse to SDK network", "nodeID", nodeID, "requestID", requestID, "responseLen", len(response)) + return n.p2pNetwork.AppResponse(ctx, nodeID, requestID, response) } // We must release the slot @@ -382,13 +383,13 @@ func (n *network) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID // - request times out before a response is provided // error returned by this function is expected to be treated as fatal by the engine // returns error only when the response handler returns an error -func (n *network) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - log.Trace("received AppRequestFailed from peer", "nodeID", nodeID, "requestID", requestID) +func (n *network) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + log.Debug("received AppRequestFailed from peer", "nodeID", nodeID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { - log.Trace("forwarding AppRequestFailed to SDK network", "nodeID", nodeID, "requestID", requestID) - return n.network.AppRequestFailed(ctx, nodeID, requestID) + log.Debug("forwarding AppRequestFailed to SDK network", "nodeID", nodeID, "requestID", requestID) + return n.p2pNetwork.AppRequestFailed(ctx, nodeID, requestID, appErr) } // We must release the slot @@ -445,22 +446,24 @@ func (n *network) Gossip(gossip []byte) error { return n.appSender.SendAppGossip(context.TODO(), gossip) } -// AppGossip is called by avalanchego -> VM when there is an incoming AppGossip from a peer -// error returned by this function is expected to be treated as fatal by the engine -// returns error if request could not be parsed as message.Request or when the requestHandler returns an error -func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes []byte) error { +// AppGossip is called by avalanchego -> VM when there is an incoming AppGossip +// from a peer. An error returned by this function is treated as fatal by the +// engine. +func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { var gossipMsg message.GossipMessage if _, err := n.codec.Unmarshal(gossipBytes, &gossipMsg); err != nil { - log.Trace("could not parse app gossip", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) - return nil + log.Debug("forwarding AppGossip to SDK network", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) + return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) } - log.Trace("processing AppGossip from node", "nodeID", nodeID, "msg", gossipMsg) + log.Debug("processing AppGossip from node", "nodeID", nodeID, "msg", gossipMsg) return gossipMsg.Handle(n.gossipHandler, nodeID) } // Connected adds the given nodeID to the peer list so that it can receive messages func (n *network) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + log.Debug("adding new peer", "nodeID", nodeID) + n.lock.Lock() defer n.lock.Unlock() @@ -469,15 +472,17 @@ func (n *network) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion } if nodeID == n.self { + log.Debug("skipping registering self as peer") return nil } n.peers.Connected(nodeID, nodeVersion) - return n.network.Connected(ctx, nodeID, nodeVersion) + return n.p2pNetwork.Connected(ctx, nodeID, nodeVersion) } // Disconnected removes given [nodeID] from the peer list func (n *network) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + log.Debug("disconnecting peer", "nodeID", nodeID) n.lock.Lock() defer n.lock.Unlock() @@ -486,7 +491,7 @@ func (n *network) Disconnected(ctx context.Context, nodeID ids.NodeID) error { } n.peers.Disconnected(nodeID) - return n.network.Disconnected(ctx, nodeID) + return n.p2pNetwork.Disconnected(ctx, nodeID) } // Shutdown disconnects all peers @@ -539,8 +544,12 @@ func (n *network) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { n.peers.TrackBandwidth(nodeID, bandwidth) } -func (n *network) NewAppProtocol(protocol uint64, handler p2p.Handler, options ...p2p.ClientOption) (*p2p.Client, error) { - return n.network.NewAppProtocol(protocol, handler, options...) +func (n *network) NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client { + return n.p2pNetwork.NewClient(protocol, options...) +} + +func (n *network) AddHandler(protocol uint64, handler p2p.Handler) error { + return n.p2pNetwork.AddHandler(protocol, handler) } // invariant: peer/network must use explicitly even request ids. diff --git a/peer/network_test.go b/peer/network_test.go index 260d54b492..e8d61fff1f 100644 --- a/peer/network_test.go +++ b/peer/network_test.go @@ -57,7 +57,9 @@ var ( func TestNetworkDoesNotConnectToItself(t *testing.T) { selfNodeID := ids.GenerateTestNodeID() - n := NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), nil, nil, nil, selfNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + n := NewNetwork(p2pNetwork, nil, nil, nil, selfNodeID, 1, 1) assert.NoError(t, n.Connected(context.Background(), selfNodeID, defaultPeerVersion)) assert.EqualValues(t, 0, n.Size()) } @@ -93,7 +95,9 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) client := NewNetworkClient(net) nodeID := ids.GenerateTestNodeID() @@ -145,7 +149,9 @@ func TestAppRequestOnCtxCancellation(t *testing.T) { }, } - net := NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) requestMessage := HelloRequest{Message: "this is a request"} @@ -197,7 +203,9 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) client := NewNetworkClient(net) @@ -251,7 +259,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { } // ensure empty nodeID is not allowed - _, err := client.SendAppRequest(context.Background(), ids.EmptyNodeID, []byte("hello there")) + _, err = client.SendAppRequest(context.Background(), ids.EmptyNodeID, []byte("hello there")) assert.Error(t, err) assert.Contains(t, err.Error(), "cannot send request to empty nodeID") } @@ -277,7 +285,9 @@ func TestAppRequestOnShutdown(t *testing.T) { codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) client := NewNetworkClient(net) nodeID := ids.GenerateTestNodeID() require.NoError(t, net.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -326,7 +336,9 @@ func TestAppRequestAnyOnCtxCancellation(t *testing.T) { }, } - net := NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) assert.NoError(t, net.Connected( @@ -379,6 +391,7 @@ func TestRequestMinVersion(t *testing.T) { callNum := uint32(0) nodeID := ids.GenerateTestNodeID() codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) var net Network sender := testAppSender{ @@ -402,8 +415,9 @@ func TestRequestMinVersion(t *testing.T) { } // passing nil as codec works because the net.AppRequest is never called - crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 16) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 16) client := NewNetworkClient(net) requestMessage := TestMessage{Message: "this is a request"} requestBytes, err := message.RequestToBytes(codecManager, requestMessage) @@ -413,6 +427,7 @@ func TestRequestMinVersion(t *testing.T) { context.Background(), nodeID, &version.Application{ + Name: version.Client, Major: 1, Minor: 7, Patch: 1, @@ -424,13 +439,14 @@ func TestRequestMinVersion(t *testing.T) { responseBytes, _, err := client.SendAppRequestAny( context.Background(), &version.Application{ + Name: version.Client, Major: 2, Minor: 0, Patch: 0, }, requestBytes, ) - assert.Equal(t, err.Error(), "no peers found matching version avalanche/2.0.0 out of 1 peers") + assert.Equal(t, err.Error(), "no peers found matching version avalanchego/2.0.0 out of 1 peers") assert.Nil(t, responseBytes) // ensure version matches and the request goes through @@ -467,7 +483,9 @@ func TestOnRequestHonoursDeadline(t *testing.T) { processingDuration: 500 * time.Millisecond, } - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetRequestHandler(requestHandler) nodeID := ids.GenerateTestNodeID() @@ -507,7 +525,9 @@ func TestGossip(t *testing.T) { } gossipHandler := &testGossipHandler{} - clientNetwork = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + clientNetwork = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(gossipHandler) assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -534,7 +554,9 @@ func TestHandleInvalidMessages(t *testing.T) { requestID := uint32(1) sender := testAppSender{} - clientNetwork := NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{}) @@ -583,7 +605,9 @@ func TestNetworkPropagatesRequestHandlerError(t *testing.T) { requestID := uint32(1) sender := testAppSender{} - clientNetwork := NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{err: errors.New("fail")}) // Return an error from the request handler @@ -623,7 +647,9 @@ func TestCrossChainAppRequest(t *testing.T) { }, } - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) client := NewNetworkClient(net) @@ -658,7 +684,9 @@ func TestCrossChainAppRequestOnCtxCancellation(t *testing.T) { }, } - net := NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) exampleCrossChainRequest := ExampleCrossChainRequest{ @@ -710,7 +738,9 @@ func TestCrossChainRequestRequestsRoutingAndResponse(t *testing.T) { codecManager := buildCodec(t, TestMessage{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) client := NewNetworkClient(net) @@ -770,7 +800,9 @@ func TestCrossChainRequestOnShutdown(t *testing.T) { } codecManager := buildCodec(t, TestMessage{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), ""), sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) client := NewNetworkClient(net) exampleCrossChainRequest := ExampleCrossChainRequest{ @@ -823,9 +855,9 @@ func TestNetworkRouting(t *testing.T) { } protocol := 0 handler := &testSDKHandler{} - p2pNetwork := p2p.NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") - _, err := p2pNetwork.NewAppProtocol(uint64(protocol), handler) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") require.NoError(err) + require.NoError(p2pNetwork.AddHandler(uint64(protocol), handler)) networkCodec := codec.NewManager(0) crossChainCodec := codec.NewManager(0) @@ -849,7 +881,7 @@ func TestNetworkRouting(t *testing.T) { err = network.AppResponse(context.Background(), ids.GenerateTestNodeID(), 0, foobar) require.ErrorIs(err, p2p.ErrUnrequestedResponse) - err = network.AppRequestFailed(context.Background(), nodeID, 0) + err = network.AppRequestFailed(context.Background(), nodeID, 0, common.ErrTimeout) require.ErrorIs(err, p2p.ErrUnrequestedResponse) } @@ -978,7 +1010,7 @@ type HelloGossip struct { } func (h HelloGossip) Handle(handler message.GossipHandler, nodeID ids.NodeID) error { - return handler.HandleTxs(nodeID, message.TxsGossip{}) + return handler.HandleEthTxs(nodeID, message.EthTxsGossip{}) } func (h HelloGossip) String() string { @@ -995,7 +1027,7 @@ type testGossipHandler struct { nodeID ids.NodeID } -func (t *testGossipHandler) HandleTxs(nodeID ids.NodeID, msg message.TxsGossip) error { +func (t *testGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { t.received = true t.nodeID = nodeID return nil diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go new file mode 100644 index 0000000000..41cea7df1a --- /dev/null +++ b/plugin/evm/gossip.go @@ -0,0 +1,177 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ethereum/go-ethereum/log" + + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + + "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/subnet-evm/core/txpool" + "github.com/ava-labs/subnet-evm/core/types" +) + +var ( + _ p2p.Handler = (*txGossipHandler)(nil) + + _ gossip.Gossipable = (*GossipEthTx)(nil) + _ gossip.Marshaller[*GossipEthTx] = (*GossipEthTxMarshaller)(nil) + _ gossip.Set[*GossipEthTx] = (*GossipEthTxPool)(nil) +) + +func newTxGossipHandler[T gossip.Gossipable]( + log logging.Logger, + marshaller gossip.Marshaller[T], + mempool gossip.Set[T], + metrics gossip.Metrics, + maxMessageSize int, + throttlingPeriod time.Duration, + throttlingLimit int, + validators *p2p.Validators, +) txGossipHandler { + // push gossip messages can be handled from any peer + handler := gossip.NewHandler[T]( + log, + marshaller, + // Don't forward gossip to avoid double-forwarding + gossip.NoOpAccumulator[T]{}, + mempool, + metrics, + maxMessageSize, + ) + + // pull gossip requests are filtered by validators and are throttled + // to prevent spamming + validatorHandler := p2p.NewValidatorHandler( + p2p.NewThrottlerHandler( + handler, + p2p.NewSlidingWindowThrottler(throttlingPeriod, throttlingLimit), + log, + ), + validators, + log, + ) + + return txGossipHandler{ + appGossipHandler: handler, + appRequestHandler: validatorHandler, + } +} + +type txGossipHandler struct { + p2p.NoOpHandler + appGossipHandler p2p.Handler + appRequestHandler p2p.Handler +} + +func (t txGossipHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) +} + +func NewGossipEthTxPool(mempool *txpool.TxPool) (*GossipEthTxPool, error) { + bloom, err := gossip.NewBloomFilter(txGossipBloomMaxItems, txGossipBloomFalsePositiveRate) + if err != nil { + return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) + } + + return &GossipEthTxPool{ + mempool: mempool, + pendingTxs: make(chan core.NewTxsEvent), + bloom: bloom, + }, nil +} + +type GossipEthTxPool struct { + mempool *txpool.TxPool + pendingTxs chan core.NewTxsEvent + + bloom *gossip.BloomFilter + lock sync.RWMutex +} + +func (g *GossipEthTxPool) Subscribe(ctx context.Context) { + g.mempool.SubscribeNewTxsEvent(g.pendingTxs) + + for { + select { + case <-ctx.Done(): + log.Debug("shutting down subscription") + return + case pendingTxs := <-g.pendingTxs: + g.lock.Lock() + for _, pendingTx := range pendingTxs.Txs { + tx := &GossipEthTx{Tx: pendingTx} + g.bloom.Add(tx) + reset, err := gossip.ResetBloomFilterIfNeeded(g.bloom, txGossipMaxFalsePositiveRate) + if err != nil { + log.Error("failed to reset bloom filter", "err", err) + continue + } + + if reset { + log.Debug("resetting bloom filter", "reason", "reached max filled ratio") + + g.mempool.IteratePending(func(tx *types.Transaction) bool { + g.bloom.Add(&GossipEthTx{Tx: pendingTx}) + return true + }) + } + } + g.lock.Unlock() + } + } +} + +// Add enqueues the transaction to the mempool. Subscribe should be called +// to receive an event if tx is actually added to the mempool or not. +func (g *GossipEthTxPool) Add(tx *GossipEthTx) error { + return g.mempool.AddRemotes([]*types.Transaction{tx.Tx})[0] +} + +func (g *GossipEthTxPool) Iterate(f func(tx *GossipEthTx) bool) { + g.mempool.IteratePending(func(tx *types.Transaction) bool { + return f(&GossipEthTx{Tx: tx}) + }) +} + +func (g *GossipEthTxPool) GetFilter() ([]byte, []byte, error) { + g.lock.RLock() + defer g.lock.RUnlock() + + bloom, err := g.bloom.Bloom.MarshalBinary() + salt := g.bloom.Salt + + return bloom, salt[:], err +} + +type GossipEthTxMarshaller struct{} + +func (g GossipEthTxMarshaller) MarshalGossip(tx *GossipEthTx) ([]byte, error) { + return tx.Tx.MarshalBinary() +} + +func (g GossipEthTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipEthTx, error) { + tx := &GossipEthTx{ + Tx: &types.Transaction{}, + } + + return tx, tx.Tx.UnmarshalBinary(bytes) +} + +type GossipEthTx struct { + Tx *types.Transaction +} + +func (tx *GossipEthTx) GossipID() ids.ID { + return ids.ID(tx.Tx.Hash()) +} diff --git a/plugin/evm/gossip_mempool.go b/plugin/evm/gossip_mempool.go deleted file mode 100644 index 49c649e2bd..0000000000 --- a/plugin/evm/gossip_mempool.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "context" - "fmt" - "sync" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ethereum/go-ethereum/log" - - "github.com/ava-labs/avalanchego/network/p2p/gossip" - - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" -) - -var ( - _ gossip.Gossipable = (*GossipTx)(nil) - _ gossip.Set[*GossipTx] = (*GossipTxPool)(nil) -) - -func NewGossipTxPool(mempool *txpool.TxPool) (*GossipTxPool, error) { - bloom, err := gossip.NewBloomFilter(txGossipBloomMaxItems, txGossipBloomFalsePositiveRate) - if err != nil { - return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) - } - - return &GossipTxPool{ - mempool: mempool, - pendingTxs: make(chan core.NewTxsEvent), - bloom: bloom, - }, nil -} - -type GossipTxPool struct { - mempool *txpool.TxPool - pendingTxs chan core.NewTxsEvent - - bloom *gossip.BloomFilter - lock sync.RWMutex -} - -func (g *GossipTxPool) Subscribe(ctx context.Context) { - g.mempool.SubscribeNewTxsEvent(g.pendingTxs) - - for { - select { - case <-ctx.Done(): - log.Debug("shutting down subscription") - return - case pendingTxs := <-g.pendingTxs: - g.lock.Lock() - for _, pendingTx := range pendingTxs.Txs { - tx := &GossipTx{Tx: pendingTx} - g.bloom.Add(tx) - reset, err := gossip.ResetBloomFilterIfNeeded(g.bloom, txGossipMaxFalsePositiveRate) - if err != nil { - log.Error("failed to reset bloom filter", "err", err) - continue - } - - if reset { - log.Debug("resetting bloom filter", "reason", "reached max filled ratio") - - g.mempool.IteratePending(func(tx *types.Transaction) bool { - g.bloom.Add(&GossipTx{Tx: pendingTx}) - return true - }) - } - } - g.lock.Unlock() - } - } -} - -// Add enqueues the transaction to the mempool. Subscribe should be called -// to receive an event if tx is actually added to the mempool or not. -func (g *GossipTxPool) Add(tx *GossipTx) error { - return g.mempool.AddRemotes([]*types.Transaction{tx.Tx})[0] -} - -func (g *GossipTxPool) Iterate(f func(tx *GossipTx) bool) { - g.mempool.IteratePending(func(tx *types.Transaction) bool { - return f(&GossipTx{Tx: tx}) - }) -} - -func (g *GossipTxPool) GetFilter() ([]byte, []byte, error) { - g.lock.RLock() - defer g.lock.RUnlock() - - bloom, err := g.bloom.Bloom.MarshalBinary() - salt := g.bloom.Salt - - return bloom, salt[:], err -} - -type GossipTx struct { - Tx *types.Transaction -} - -func (tx *GossipTx) GetID() ids.ID { - return ids.ID(tx.Tx.Hash()) -} - -func (tx *GossipTx) Marshal() ([]byte, error) { - return tx.Tx.MarshalBinary() -} - -func (tx *GossipTx) Unmarshal(bytes []byte) error { - tx.Tx = &types.Transaction{} - return tx.Tx.UnmarshalBinary(bytes) -} diff --git a/plugin/evm/gossip_test.go b/plugin/evm/gossip_test.go new file mode 100644 index 0000000000..153507c19b --- /dev/null +++ b/plugin/evm/gossip_test.go @@ -0,0 +1,26 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "testing" + + "github.com/ava-labs/subnet-evm/core/types" + "github.com/stretchr/testify/require" +) + +func TestGossipEthTxMarshaller(t *testing.T) { + require := require.New(t) + + blobTx := &types.BlobTx{} + want := &GossipEthTx{Tx: types.NewTx(blobTx)} + marshaller := GossipEthTxMarshaller{} + + bytes, err := marshaller.MarshalGossip(want) + require.NoError(err) + + got, err := marshaller.UnmarshalGossip(bytes) + require.NoError(err) + require.Equal(want.GossipID(), got.GossipID()) +} diff --git a/plugin/evm/gossiper.go b/plugin/evm/gossiper.go index 8f179d0ce5..578ed1d3ad 100644 --- a/plugin/evm/gossiper.go +++ b/plugin/evm/gossiper.go @@ -4,11 +4,13 @@ package evm import ( + "context" "math/big" "sync" "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/subnet-evm/peer" @@ -32,9 +34,9 @@ const ( // in the cache, not entire transactions. recentCacheSize = 512 - // [txsGossipInterval] is how often we attempt to gossip newly seen + // [ethTxsGossipInterval] is how often we attempt to gossip newly seen // transactions to other nodes. - txsGossipInterval = 500 * time.Millisecond + ethTxsGossipInterval = 500 * time.Millisecond // [minGossipBatchInterval] is the minimum amount of time that must pass // before our last gossip to peers. @@ -43,8 +45,8 @@ const ( // Gossiper handles outgoing gossip of transactions type Gossiper interface { - // GossipTxs sends AppGossip message containing the given [txs] - GossipTxs(txs []*types.Transaction) error + // GossipEthTxs sends AppGossip message containing the given [txs] + GossipEthTxs(txs []*types.Transaction) error } // pushGossiper is used to gossip transactions to the network @@ -52,21 +54,22 @@ type pushGossiper struct { ctx *snow.Context config Config - client peer.NetworkClient - blockchain *core.BlockChain - txPool *txpool.TxPool + client peer.NetworkClient + blockchain *core.BlockChain + txPool *txpool.TxPool + ethTxGossiper gossip.Accumulator[*GossipEthTx] // We attempt to batch transactions we need to gossip to avoid runaway // amplification of mempol chatter. - txsToGossipChan chan []*types.Transaction - txsToGossip map[common.Hash]*types.Transaction - lastGossiped time.Time - shutdownChan chan struct{} - shutdownWg *sync.WaitGroup + ethTxsToGossipChan chan []*types.Transaction + ethTxsToGossip map[common.Hash]*types.Transaction + lastGossiped time.Time + shutdownChan chan struct{} + shutdownWg *sync.WaitGroup // [recentTxs] prevent us from over-gossiping the // same transaction in a short period of time. - recentTxs *cache.LRU[common.Hash, interface{}] + recentEthTxs *cache.LRU[common.Hash, interface{}] codec codec.Manager signer types.Signer @@ -75,22 +78,25 @@ type pushGossiper struct { // createGossiper constructs and returns a pushGossiper or noopGossiper // based on whether vm.chainConfig.SubnetEVMTimestamp is set -func (vm *VM) createGossiper(stats GossipStats) Gossiper { +func (vm *VM) createGossiper(stats GossipStats, ethTxGossiper gossip.Accumulator[*GossipEthTx], +) Gossiper { net := &pushGossiper{ - ctx: vm.ctx, - config: vm.config, - client: vm.client, - blockchain: vm.blockChain, - txPool: vm.txPool, - txsToGossipChan: make(chan []*types.Transaction), - txsToGossip: make(map[common.Hash]*types.Transaction), - shutdownChan: vm.shutdownChan, - shutdownWg: &vm.shutdownWg, - recentTxs: &cache.LRU[common.Hash, interface{}]{Size: recentCacheSize}, - codec: vm.networkCodec, - signer: types.LatestSigner(vm.blockChain.Config()), - stats: stats, + ctx: vm.ctx, + config: vm.config, + client: vm.client, + blockchain: vm.blockChain, + txPool: vm.txPool, + ethTxsToGossipChan: make(chan []*types.Transaction), + ethTxsToGossip: make(map[common.Hash]*types.Transaction), + shutdownChan: vm.shutdownChan, + shutdownWg: &vm.shutdownWg, + recentEthTxs: &cache.LRU[common.Hash, interface{}]{Size: recentCacheSize}, + codec: vm.networkCodec, + signer: types.LatestSigner(vm.blockChain.Config()), + stats: stats, + ethTxGossiper: ethTxGossiper, } + net.awaitEthTxGossip() return net } @@ -231,12 +237,12 @@ func (n *pushGossiper) queuePriorityRegossipTxs() types.Transactions { } // awaitEthTxGossip periodically gossips transactions that have been queued for -// gossip at least once every [txsGossipInterval]. +// gossip at least once every [ethTxsGossipInterval]. func (n *pushGossiper) awaitEthTxGossip() { n.shutdownWg.Add(1) go n.ctx.Log.RecoverAndPanic(func() { var ( - gossipTicker = time.NewTicker(txsGossipInterval) + gossipTicker = time.NewTicker(ethTxsGossipInterval) regossipTicker = time.NewTicker(n.config.RegossipFrequency.Duration) priorityRegossipTicker = time.NewTicker(n.config.PriorityRegossipFrequency.Duration) ) @@ -250,18 +256,24 @@ func (n *pushGossiper) awaitEthTxGossip() { for { select { case <-gossipTicker.C: - if attempted, err := n.gossipTxs(false); err != nil { + if attempted, err := n.gossipEthTxs(false); err != nil { log.Warn( "failed to send eth transactions", "len(txs)", attempted, "err", err, ) } + if err := n.ethTxGossiper.Gossip(context.TODO()); err != nil { + log.Warn( + "failed to send eth transactions", + "err", err, + ) + } case <-regossipTicker.C: for _, tx := range n.queueRegossipTxs() { - n.txsToGossip[tx.Hash()] = tx + n.ethTxsToGossip[tx.Hash()] = tx } - if attempted, err := n.gossipTxs(true); err != nil { + if attempted, err := n.gossipEthTxs(true); err != nil { log.Warn( "failed to regossip eth transactions", "len(txs)", attempted, @@ -270,26 +282,41 @@ func (n *pushGossiper) awaitEthTxGossip() { } case <-priorityRegossipTicker.C: for _, tx := range n.queuePriorityRegossipTxs() { - n.txsToGossip[tx.Hash()] = tx + n.ethTxsToGossip[tx.Hash()] = tx } - if attempted, err := n.gossipTxs(true); err != nil { + if attempted, err := n.gossipEthTxs(true); err != nil { log.Warn( "failed to regossip priority eth transactions", "len(txs)", attempted, "err", err, ) } - case txs := <-n.txsToGossipChan: + case txs := <-n.ethTxsToGossipChan: for _, tx := range txs { - n.txsToGossip[tx.Hash()] = tx + n.ethTxsToGossip[tx.Hash()] = tx } - if attempted, err := n.gossipTxs(false); err != nil { + if attempted, err := n.gossipEthTxs(false); err != nil { log.Warn( "failed to send eth transactions", "len(txs)", attempted, "err", err, ) } + + gossipTxs := make([]*GossipEthTx, 0, len(txs)) + for _, tx := range txs { + gossipTxs = append(gossipTxs, &GossipEthTx{Tx: tx}) + } + + n.ethTxGossiper.Add(gossipTxs...) + if err := n.ethTxGossiper.Gossip(context.TODO()); err != nil { + log.Warn( + "failed to send eth transactions", + "len(txs)", len(txs), + "err", err, + ) + } + case <-n.shutdownChan: return } @@ -297,7 +324,7 @@ func (n *pushGossiper) awaitEthTxGossip() { }) } -func (n *pushGossiper) sendTxs(txs []*types.Transaction) error { +func (n *pushGossiper) sendEthTxs(txs []*types.Transaction) error { if len(txs) == 0 { return nil } @@ -306,7 +333,7 @@ func (n *pushGossiper) sendTxs(txs []*types.Transaction) error { if err != nil { return err } - msg := message.TxsGossip{ + msg := message.EthTxsGossip{ Txs: txBytes, } msgBytes, err := message.BuildGossipMessage(n.codec, msg) @@ -323,15 +350,15 @@ func (n *pushGossiper) sendTxs(txs []*types.Transaction) error { return n.client.Gossip(msgBytes) } -func (n *pushGossiper) gossipTxs(force bool) (int, error) { - if (!force && time.Since(n.lastGossiped) < minGossipBatchInterval) || len(n.txsToGossip) == 0 { +func (n *pushGossiper) gossipEthTxs(force bool) (int, error) { + if (!force && time.Since(n.lastGossiped) < minGossipBatchInterval) || len(n.ethTxsToGossip) == 0 { return 0, nil } n.lastGossiped = time.Now() - txs := make([]*types.Transaction, 0, len(n.txsToGossip)) - for txHash, tx := range n.txsToGossip { + txs := make([]*types.Transaction, 0, len(n.ethTxsToGossip)) + for _, tx := range n.ethTxsToGossip { txs = append(txs, tx) - delete(n.txsToGossip, txHash) + delete(n.ethTxsToGossip, tx.Hash()) } selectedTxs := make([]*types.Transaction, 0) @@ -349,11 +376,11 @@ func (n *pushGossiper) gossipTxs(force bool) (int, error) { // We check [force] outside of the if statement to avoid an unnecessary // cache lookup. if !force { - if _, has := n.recentTxs.Get(txHash); has { + if _, has := n.recentEthTxs.Get(txHash); has { continue } } - n.recentTxs.Put(txHash, nil) + n.recentEthTxs.Put(txHash, nil) selectedTxs = append(selectedTxs, tx) } @@ -367,8 +394,8 @@ func (n *pushGossiper) gossipTxs(force bool) (int, error) { msgTxsSize := uint64(0) for _, tx := range selectedTxs { size := tx.Size() - if msgTxsSize+size > message.TxMsgSoftCapSize { - if err := n.sendTxs(msgTxs); err != nil { + if msgTxsSize+size > message.EthMsgSoftCapSize { + if err := n.sendEthTxs(msgTxs); err != nil { return len(selectedTxs), err } msgTxs = msgTxs[:0] @@ -379,18 +406,18 @@ func (n *pushGossiper) gossipTxs(force bool) (int, error) { } // Send any remaining [msgTxs] - return len(selectedTxs), n.sendTxs(msgTxs) + return len(selectedTxs), n.sendEthTxs(msgTxs) } -// GossipTxs enqueues the provided [txs] for gossiping. At some point, the +// GossipEthTxs enqueues the provided [txs] for gossiping. At some point, the // [pushGossiper] will attempt to gossip the provided txs to other nodes // (usually right away if not under load). // // NOTE: We never return a non-nil error from this function but retain the // option to do so in case it becomes useful. -func (n *pushGossiper) GossipTxs(txs []*types.Transaction) error { +func (n *pushGossiper) GossipEthTxs(txs []*types.Transaction) error { select { - case n.txsToGossipChan <- txs: + case n.ethTxsToGossipChan <- txs: case <-n.shutdownChan: } return nil @@ -411,16 +438,16 @@ func NewGossipHandler(vm *VM, stats GossipReceivedStats) *GossipHandler { } } -func (h *GossipHandler) HandleTxs(nodeID ids.NodeID, msg message.TxsGossip) error { +func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { log.Trace( - "AppGossip called with TxsGossip", + "AppGossip called with EthTxsGossip", "peerID", nodeID, "size(txs)", len(msg.Txs), ) if len(msg.Txs) == 0 { log.Trace( - "AppGossip received empty TxsGossip Message", + "AppGossip received empty EthTxsGossip Message", "peerID", nodeID, ) return nil diff --git a/plugin/evm/gossipper_test.go b/plugin/evm/gossipper_test.go index c2978db73f..d18163f998 100644 --- a/plugin/evm/gossipper_test.go +++ b/plugin/evm/gossipper_test.go @@ -49,7 +49,7 @@ func fundAddressByGenesis(addrs []common.Address) (string, error) { return string(bytes), err } -func getValidTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*types.Transaction { +func getValidEthTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*types.Transaction { res := make([]*types.Transaction, count) to := common.Address{} @@ -73,10 +73,10 @@ func getValidTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*types.T } // show that locally issued eth txs are gossiped -// Note: channel through which subnet-evm mempool push txs to vm is injected here +// Note: channel through which coreth mempool push txs to vm is injected here // to ease up UT, which target only VM behaviors in response to subnet-evm mempool // signals -func TestMempoolTxsAddedTxsGossipedAfterActivation(t *testing.T) { +func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) { if os.Getenv("RUN_FLAKY_TESTS") != "true" { t.Skip("FLAKY") } @@ -86,10 +86,11 @@ func TestMempoolTxsAddedTxsGossipedAfterActivation(t *testing.T) { assert.NoError(err) addr := crypto.PubkeyToAddress(key.PublicKey) - cfgJson, err := fundAddressByGenesis([]common.Address{addr}) + + genesisJSON, err := fundAddressByGenesis([]common.Address{addr}) assert.NoError(err) - _, vm, _, sender := GenesisVM(t, true, cfgJson, "", "") + _, vm, _, sender := GenesisVM(t, true, genesisJSON, "", "") defer func() { err := vm.Shutdown(context.Background()) assert.NoError(err) @@ -98,20 +99,19 @@ func TestMempoolTxsAddedTxsGossipedAfterActivation(t *testing.T) { vm.txPool.SetMinFee(common.Big0) // create eth txes - ethTxs := getValidTxs(key, 3, common.Big1) + ethTxs := getValidEthTxs(key, 3, common.Big1) var wg sync.WaitGroup - var wg2 sync.WaitGroup wg.Add(2) - wg2.Add(1) sender.CantSendAppGossip = false + signal1 := make(chan struct{}) seen := 0 sender.SendAppGossipF = func(_ context.Context, gossipedBytes []byte) error { if seen == 0 { notifyMsgIntf, err := message.ParseGossipMessage(vm.networkCodec, gossipedBytes) assert.NoError(err) - requestMsg, ok := notifyMsgIntf.(message.TxsGossip) + requestMsg, ok := notifyMsgIntf.(message.EthTxsGossip) assert.True(ok) assert.NotEmpty(requestMsg.Txs) @@ -123,12 +123,12 @@ func TestMempoolTxsAddedTxsGossipedAfterActivation(t *testing.T) { []common.Hash{txs[0].Hash(), txs[1].Hash()}, ) seen++ - wg2.Done() + close(signal1) } else if seen == 1 { notifyMsgIntf, err := message.ParseGossipMessage(vm.networkCodec, gossipedBytes) assert.NoError(err) - requestMsg, ok := notifyMsgIntf.(message.TxsGossip) + requestMsg, ok := notifyMsgIntf.(message.EthTxsGossip) assert.True(ok) assert.NotEmpty(requestMsg.Txs) @@ -152,19 +152,19 @@ func TestMempoolTxsAddedTxsGossipedAfterActivation(t *testing.T) { } // Gossip txs again (shouldn't gossip hashes) - attemptAwait(t, &wg2, 5*time.Second) // wait until reorg processed - assert.NoError(vm.gossiper.GossipTxs(ethTxs[:2])) + <-signal1 // wait until reorg processed + assert.NoError(vm.gossiper.GossipEthTxs(ethTxs[:2])) errs = vm.txPool.AddRemotesSync(ethTxs) assert.Contains(errs[0].Error(), "already known") assert.Contains(errs[1].Error(), "already known") - assert.NoError(errs[2], "failed adding subnet-evm tx to mempool") + assert.NoError(errs[2], "failed adding coreth tx to mempool") attemptAwait(t, &wg, 5*time.Second) } // show that locally issued eth txs are chunked correctly -func TestMempoolTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { +func TestMempoolEthTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { if os.Getenv("RUN_FLAKY_TESTS") != "true" { t.Skip("FLAKY") } @@ -175,10 +175,10 @@ func TestMempoolTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { addr := crypto.PubkeyToAddress(key.PublicKey) - cfgJson, err := fundAddressByGenesis([]common.Address{addr}) + genesisJSON, err := fundAddressByGenesis([]common.Address{addr}) assert.NoError(err) - _, vm, _, sender := GenesisVM(t, true, cfgJson, "", "") + _, vm, _, sender := GenesisVM(t, true, genesisJSON, "", "") defer func() { err := vm.Shutdown(context.Background()) assert.NoError(err) @@ -187,7 +187,7 @@ func TestMempoolTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { vm.txPool.SetMinFee(common.Big0) // create eth txes - txs := getValidTxs(key, 100, common.Big1) + ethTxs := getValidEthTxs(key, 100, common.Big1) var wg sync.WaitGroup wg.Add(2) @@ -197,7 +197,7 @@ func TestMempoolTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { notifyMsgIntf, err := message.ParseGossipMessage(vm.networkCodec, gossipedBytes) assert.NoError(err) - requestMsg, ok := notifyMsgIntf.(message.TxsGossip) + requestMsg, ok := notifyMsgIntf.(message.EthTxsGossip) assert.True(ok) assert.NotEmpty(requestMsg.Txs) @@ -211,14 +211,14 @@ func TestMempoolTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { } // Notify VM about eth txs - errs := vm.txPool.AddRemotesSync(txs) + errs := vm.txPool.AddRemotesSync(ethTxs) for _, err := range errs { assert.NoError(err, "failed adding subnet-evm tx to mempool") } attemptAwait(t, &wg, 5*time.Second) - for _, tx := range txs { + for _, tx := range ethTxs { _, ok := seen[tx.Hash()] assert.True(ok, "missing hash: %v", tx.Hash()) } @@ -226,7 +226,7 @@ func TestMempoolTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { // show that a geth tx discovered from gossip is requested to the same node that // gossiped it -func TestMempoolTxsAppGossipHandling(t *testing.T) { +func TestMempoolEthTxsAppGossipHandling(t *testing.T) { if os.Getenv("RUN_FLAKY_TESTS") != "true" { t.Skip("FLAKY") } @@ -237,10 +237,10 @@ func TestMempoolTxsAppGossipHandling(t *testing.T) { addr := crypto.PubkeyToAddress(key.PublicKey) - cfgJson, err := fundAddressByGenesis([]common.Address{addr}) + genesisJSON, err := fundAddressByGenesis([]common.Address{addr}) assert.NoError(err) - _, vm, _, sender := GenesisVM(t, true, cfgJson, "", "") + _, vm, _, sender := GenesisVM(t, true, genesisJSON, "", "") defer func() { err := vm.Shutdown(context.Background()) assert.NoError(err) @@ -264,12 +264,12 @@ func TestMempoolTxsAppGossipHandling(t *testing.T) { } // prepare a tx - tx := getValidTxs(key, 1, common.Big1)[0] + tx := getValidEthTxs(key, 1, common.Big1)[0] // show that unknown subnet-evm hashes is requested txBytes, err := rlp.EncodeToBytes([]*types.Transaction{tx}) assert.NoError(err) - msg := message.TxsGossip{ + msg := message.EthTxsGossip{ Txs: txBytes, } msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) @@ -284,7 +284,7 @@ func TestMempoolTxsAppGossipHandling(t *testing.T) { attemptAwait(t, &wg, 5*time.Second) } -func TestMempoolTxsRegossipSingleAccount(t *testing.T) { +func TestMempoolEthTxsRegossipSingleAccount(t *testing.T) { assert := assert.New(t) key, err := crypto.GenerateKey() @@ -304,10 +304,10 @@ func TestMempoolTxsRegossipSingleAccount(t *testing.T) { vm.txPool.SetMinFee(common.Big0) // create eth txes - txs := getValidTxs(key, 10, big.NewInt(226*params.GWei)) + ethTxs := getValidEthTxs(key, 10, big.NewInt(226*params.GWei)) // Notify VM about eth txs - errs := vm.txPool.AddRemotesSync(txs) + errs := vm.txPool.AddRemotesSync(ethTxs) for _, err := range errs { assert.NoError(err, "failed adding subnet-evm tx to remote mempool") } @@ -317,10 +317,10 @@ func TestMempoolTxsRegossipSingleAccount(t *testing.T) { pushNetwork := vm.gossiper.(*pushGossiper) queued := pushNetwork.queueRegossipTxs() assert.Len(queued, 1, "unexpected length of queued txs") - assert.Equal(txs[0].Hash(), queued[0].Hash()) + assert.Equal(ethTxs[0].Hash(), queued[0].Hash()) } -func TestMempoolTxsRegossip(t *testing.T) { +func TestMempoolEthTxsRegossip(t *testing.T) { assert := assert.New(t) keys := make([]*ecdsa.PrivateKey, 20) @@ -332,10 +332,10 @@ func TestMempoolTxsRegossip(t *testing.T) { addrs[i] = crypto.PubkeyToAddress(key.PublicKey) } - cfgJson, err := fundAddressByGenesis(addrs) + genesisJSON, err := fundAddressByGenesis(addrs) assert.NoError(err) - _, vm, _, _ := GenesisVM(t, true, cfgJson, `{"local-txs-enabled":true}`, "") + _, vm, _, _ := GenesisVM(t, true, genesisJSON, `{"local-txs-enabled":true}`, "") defer func() { err := vm.Shutdown(context.Background()) assert.NoError(err) @@ -347,7 +347,7 @@ func TestMempoolTxsRegossip(t *testing.T) { ethTxs := make([]*types.Transaction, 20) ethTxHashes := make([]common.Hash, 20) for i := 0; i < 20; i++ { - txs := getValidTxs(keys[i], 1, big.NewInt(226*params.GWei)) + txs := getValidEthTxs(keys[i], 1, big.NewInt(226*params.GWei)) tx := txs[0] ethTxs[i] = tx ethTxHashes[i] = tx.Hash() @@ -407,8 +407,8 @@ func TestMempoolTxsPriorityRegossip(t *testing.T) { vm.txPool.SetMinFee(common.Big0) // create eth txes - txs := getValidTxs(key, 10, big.NewInt(226*params.GWei)) - txs2 := getValidTxs(key2, 10, big.NewInt(226*params.GWei)) + txs := getValidEthTxs(key, 10, big.NewInt(226*params.GWei)) + txs2 := getValidEthTxs(key2, 10, big.NewInt(226*params.GWei)) // Notify VM about eth txs for _, err := range vm.txPool.AddRemotesSync(txs) { diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index a51b3e22e1..09c8155c6b 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -87,7 +87,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { reqCount++ // Fail all requests after number 50 to interrupt the sync if reqCount > 50 { - if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID); err != nil { + if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { panic(err) } cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index b4815359a3..cf0df24779 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -5,20 +5,24 @@ package evm import ( "context" + "encoding/binary" "math/big" "sync" "testing" "time" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/coreth/params" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -27,23 +31,57 @@ import ( "github.com/ava-labs/subnet-evm/core/types" ) -func TestTxGossip(t *testing.T) { +func TestEthTxGossip(t *testing.T) { require := require.New(t) + ctx := context.Background() + snowCtx := snow.DefaultContextTest() + validatorState := &validators.TestState{} + snowCtx.ValidatorState = validatorState + + responseSender := &common.FakeSender{ + SentAppResponse: make(chan []byte, 1), + } + vm := &VM{ + p2pSender: responseSender, + } + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + []byte(genesisJSONLatest), + nil, + nil, + make(chan common.Message), + nil, + &common.SenderTest{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) - // set up prefunded address - _, vm, _, sender := GenesisVM(t, true, genesisJSONLatest, "", "") defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(ctx)) }() // sender for the peer requesting gossip from [vm] - peerSender := &common.SenderTest{} - router := p2p.NewNetwork(logging.NoLog{}, peerSender, prometheus.NewRegistry(), "") + peerSender := &common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } - // we're only making client requests, so we don't need a server handler - client, err := router.NewAppProtocol(txGossipProtocol, nil) + network, err := p2p.NewNetwork(logging.NoLog{}, peerSender, prometheus.NewRegistry(), "") require.NoError(err) + client := network.NewClient(ethTxGossipProtocol) + // we only accept gossip requests from validators + requestingNodeID := ids.GenerateTestNodeID() + require.NoError(vm.Network.Connected(ctx, requestingNodeID, nil)) + validatorState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return 0, nil + } + validatorState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{requestingNodeID: nil}, nil + } + + // Ask the VM for any new transactions. We should get nothing at first. emptyBloomFilter, err := gossip.NewBloomFilter(txGossipBloomMaxItems, txGossipBloomFalsePositiveRate) require.NoError(err) emptyBloomFilterBytes, err := emptyBloomFilter.Bloom.MarshalBinary() @@ -57,34 +95,6 @@ func TestTxGossip(t *testing.T) { require.NoError(err) wg := &sync.WaitGroup{} - - requestingNodeID := ids.GenerateTestNodeID() - peerSender.SendAppRequestF = func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { - go func() { - require.NoError(vm.AppRequest(ctx, requestingNodeID, requestID, time.Time{}, appRequestBytes)) - }() - return nil - } - - sender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { - go func() { - require.NoError(router.AppResponse(ctx, nodeID, requestID, appResponseBytes)) - }() - return nil - } - - // we only accept gossip requests from validators - require.NoError(vm.Network.Connected(context.Background(), requestingNodeID, nil)) - mockValidatorSet, ok := vm.ctx.ValidatorState.(*validators.TestState) - require.True(ok) - mockValidatorSet.GetCurrentHeightF = func(context.Context) (uint64, error) { - return 0, nil - } - mockValidatorSet.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return map[ids.NodeID]*validators.GetValidatorOutput{requestingNodeID: nil}, nil - } - - // Ask the VM for any new transactions. We should get nothing at first. wg.Add(1) onResponse := func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { require.NoError(err) @@ -94,7 +104,9 @@ func TestTxGossip(t *testing.T) { require.Empty(response.Gossip) wg.Done() } - require.NoError(client.AppRequest(context.Background(), set.Set[ids.NodeID]{vm.ctx.NodeID: struct{}{}}, requestBytes, onResponse)) + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 1, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 1, <-responseSender.SentAppResponse)) wg.Wait() // Issue a tx to the VM @@ -111,6 +123,7 @@ func TestTxGossip(t *testing.T) { // wait so we aren't throttled by the vm time.Sleep(5 * time.Second) + marshaller := GossipEthTxMarshaller{} // Ask the VM for new transactions. We should get the newly issued tx. wg.Add(1) onResponse = func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { @@ -120,12 +133,127 @@ func TestTxGossip(t *testing.T) { require.NoError(proto.Unmarshal(responseBytes, response)) require.Len(response.Gossip, 1) - gotTx := &GossipTx{} - require.NoError(gotTx.Unmarshal(response.Gossip[0])) + gotTx, err := marshaller.UnmarshalGossip(response.Gossip[0]) + require.NoError(err) require.Equal(signedTx.Hash(), gotTx.Tx.Hash()) wg.Done() } - require.NoError(client.AppRequest(context.Background(), set.Set[ids.NodeID]{vm.ctx.NodeID: struct{}{}}, requestBytes, onResponse)) + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 3, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 3, <-responseSender.SentAppResponse)) wg.Wait() } + +func TestEthTxPushGossipOutbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := snow.DefaultContextTest() + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + + vm := &VM{ + p2pSender: sender, + ethTxPullGossiper: gossip.NoOpGossiper{}, + } + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + []byte(genesisJSONLatest), + nil, + nil, + make(chan common.Message), + nil, + &common.FakeSender{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + address := testEthAddrs[0] + key := testKeys[0] + tx := types.NewTransaction(0, address, big.NewInt(10), 100_000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) + require.NoError(err) + + // issue a tx + require.NoError(vm.txPool.AddLocal(signedTx)) + + sent := <-sender.SentAppGossip + got := &sdk.PushGossip{} + + // we should get a message that has the protocol prefix and the gossip + // message + require.Equal(byte(ethTxGossipProtocol), sent[0]) + require.NoError(proto.Unmarshal(sent[1:], got)) + + marshaller := GossipEthTxMarshaller{} + require.Len(got.Gossip, 1) + gossipedTx, err := marshaller.UnmarshalGossip(got.Gossip[0]) + require.NoError(err) + require.Equal(ids.ID(signedTx.Hash()), gossipedTx.GossipID()) +} + +// Tests that a gossiped tx is added to the mempool and forwarded +func TestEthTxPushGossipInbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := snow.DefaultContextTest() + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + vm := &VM{ + p2pSender: sender, + ethTxPullGossiper: gossip.NoOpGossiper{}, + } + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + []byte(genesisJSONLatest), + nil, + nil, + make(chan common.Message), + nil, + &common.FakeSender{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + address := testEthAddrs[0] + key := testKeys[0] + tx := types.NewTransaction(0, address, big.NewInt(10), 100_000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) + require.NoError(err) + + marshaller := GossipEthTxMarshaller{} + gossipedTx := &GossipEthTx{ + Tx: signedTx, + } + gossipedTxBytes, err := marshaller.MarshalGossip(gossipedTx) + require.NoError(err) + + inboundGossip := &sdk.PushGossip{ + Gossip: [][]byte{gossipedTxBytes}, + } + + inboundGossipBytes, err := proto.Marshal(inboundGossip) + require.NoError(err) + + inboundGossipMsg := append(binary.AppendUvarint(nil, ethTxGossipProtocol), inboundGossipBytes...) + require.NoError(vm.AppGossip(ctx, ids.EmptyNodeID, inboundGossipMsg)) + + forwardedMsg := &sdk.PushGossip{} + outboundGossipBytes := <-sender.SentAppGossip + + require.Equal(byte(ethTxGossipProtocol), outboundGossipBytes[0]) + require.NoError(proto.Unmarshal(outboundGossipBytes[1:], forwardedMsg)) + require.Len(forwardedMsg.Gossip, 1) + + forwardedTx, err := marshaller.UnmarshalGossip(forwardedMsg.Gossip[0]) + require.NoError(err) + require.Equal(gossipedTx.GossipID(), forwardedTx.GossipID()) + require.True(vm.txPool.Has(signedTx.Hash())) +} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index cc8ba2cfb3..4abd15a008 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -103,35 +103,26 @@ const ( chainStateMetricsPrefix = "chain_state" // p2p app protocols - txGossipProtocol = 0x0 + ethTxGossipProtocol = 0x0 // gossip constants txGossipBloomMaxItems = 8 * 1024 txGossipBloomFalsePositiveRate = 0.01 txGossipMaxFalsePositiveRate = 0.05 - txGossipTargetResponseSize = 20 * units.KiB + txGossipTargetMessageSize = 20 * units.KiB maxValidatorSetStaleness = time.Minute - throttlingPeriod = 10 * time.Second - throttlingLimit = 2 + txGossipThrottlingPeriod = 10 * time.Second + txGossipThrottlingLimit = 2 gossipFrequency = 10 * time.Second -) - -var ( - txGossipConfig = gossip.Config{ - Namespace: "eth_tx_gossip", - PollSize: 10, - } - txGossipHandlerConfig = gossip.HandlerConfig{ - Namespace: "eth_tx_gossip", - TargetResponseSize: txGossipTargetResponseSize, - } + txGossipPollSize = 10 ) // Define the API endpoints for the VM const ( - adminEndpoint = "/admin" - ethRPCEndpoint = "/rpc" - ethWSEndpoint = "/ws" + adminEndpoint = "/admin" + ethRPCEndpoint = "/rpc" + ethWSEndpoint = "/ws" + ethTxGossipNamespace = "eth_tx_gossip" ) var ( @@ -248,6 +239,11 @@ type VM struct { // Avalanche Warp Messaging backend // Used to serve BLS signatures of warp messages over RPC warpBackend warp.Backend + // Initialize only sets these if nil so they can be overridden in tests + p2pSender commonEng.AppSender + ethTxGossipHandler p2p.Handler + ethTxPullGossiper gossip.Gossiper + ethTxPushGossiper gossip.Accumulator[*GossipEthTx] } // Initialize implements the snowman.ChainVM interface @@ -459,7 +455,14 @@ func (vm *VM) Initialize( } // initialize peer network - p2pNetwork := p2p.NewNetwork(vm.ctx.Log, appSender, vm.sdkMetrics, "p2p") + if vm.p2pSender == nil { + vm.p2pSender = appSender + } + + p2pNetwork, err := p2p.NewNetwork(vm.ctx.Log, vm.p2pSender, vm.sdkMetrics, "p2p") + if err != nil { + return fmt.Errorf("failed to initialize p2p network: %w", err) + } vm.validators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) vm.networkCodec = message.Codec vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, message.CrossChainCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests, vm.config.MaxOutboundActiveCrossChainRequests) @@ -656,60 +659,78 @@ func (vm *VM) initBlockBuilding() error { ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel + ethTxGossipMarshaller := GossipEthTxMarshaller{} + + ethTxGossipClient := vm.Network.NewClient(ethTxGossipProtocol, p2p.WithValidatorSampling(vm.validators)) + + ethTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) + if err != nil { + return fmt.Errorf("failed to initialize eth tx gossip metrics: %w", err) + } + + if vm.ethTxPushGossiper == nil { + vm.ethTxPushGossiper = gossip.NewPushGossiper[*GossipEthTx]( + ethTxGossipMarshaller, + ethTxGossipClient, + ethTxGossipMetrics, + txGossipTargetMessageSize, + ) + } + // NOTE: gossip network must be initialized first otherwise ETH tx gossip will not work. gossipStats := NewGossipStats() - vm.gossiper = vm.createGossiper(gossipStats) + vm.gossiper = vm.createGossiper(gossipStats, vm.ethTxPushGossiper) vm.builder = vm.NewBlockBuilder(vm.toEngine) vm.builder.awaitSubmittedTxs() vm.Network.SetGossipHandler(NewGossipHandler(vm, gossipStats)) - txPool, err := NewGossipTxPool(vm.txPool) + ethTxPool, err := NewGossipEthTxPool(vm.txPool) if err != nil { return err } vm.shutdownWg.Add(1) go func() { - txPool.Subscribe(ctx) + ethTxPool.Subscribe(ctx) vm.shutdownWg.Done() }() - var txGossipHandler p2p.Handler - - txGossipHandler, err = gossip.NewHandler[*GossipTx](txPool, txGossipHandlerConfig, vm.sdkMetrics) - if err != nil { - return err - } - txGossipHandler = &p2p.ValidatorHandler{ - ValidatorSet: vm.validators, - Handler: &p2p.ThrottlerHandler{ - Throttler: p2p.NewSlidingWindowThrottler(throttlingPeriod, throttlingLimit), - Handler: txGossipHandler, - }, - } - txGossipClient, err := vm.Network.NewAppProtocol(txGossipProtocol, txGossipHandler, p2p.WithValidatorSampling(vm.validators)) - if err != nil { - return err + if vm.ethTxGossipHandler == nil { + vm.ethTxGossipHandler = newTxGossipHandler[*GossipEthTx]( + vm.ctx.Log, + ethTxGossipMarshaller, + ethTxPool, + ethTxGossipMetrics, + txGossipTargetMessageSize, + txGossipThrottlingPeriod, + txGossipThrottlingLimit, + vm.validators, + ) } - var ethTxGossiper gossip.Gossiper - ethTxGossiper, err = gossip.NewPullGossiper[GossipTx, *GossipTx]( - txGossipConfig, - vm.ctx.Log, - txPool, - txGossipClient, - vm.sdkMetrics, - ) - if err != nil { + + if err := vm.Network.AddHandler(ethTxGossipProtocol, vm.ethTxGossipHandler); err != nil { return err } - txGossiper := gossip.ValidatorGossiper{ - Gossiper: ethTxGossiper, - NodeID: vm.ctx.NodeID, - Validators: vm.validators, + + if vm.ethTxPullGossiper == nil { + ethTxPullGossiper := gossip.NewPullGossiper[*GossipEthTx]( + vm.ctx.Log, + ethTxGossipMarshaller, + ethTxPool, + ethTxGossipClient, + ethTxGossipMetrics, + txGossipPollSize, + ) + + vm.ethTxPullGossiper = gossip.ValidatorGossiper{ + Gossiper: ethTxPullGossiper, + NodeID: vm.ctx.NodeID, + Validators: vm.validators, + } } vm.shutdownWg.Add(1) go func() { - gossip.Every(ctx, vm.ctx.Log, txGossiper, gossipFrequency) + gossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, gossipFrequency) vm.shutdownWg.Done() }() diff --git a/scripts/versions.sh b/scripts/versions.sh index f9125a2baf..d7d978e229 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'v1.10.17'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'v1.10.18-rc.8'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier diff --git a/sync/client/client.go b/sync/client/client.go index 5168aa8dda..001989f934 100644 --- a/sync/client/client.go +++ b/sync/client/client.go @@ -351,7 +351,7 @@ func (c *client) get(ctx context.Context, request message.Request, parseFn parse responseIntf, numElements, err = parseFn(c.codec, request, response) if err != nil { lastErr = err - log.Info("could not validate response, retrying", "nodeID", nodeID, "attempt", attempt, "request", request, "err", err) + log.Debug("could not validate response, retrying", "nodeID", nodeID, "attempt", attempt, "request", request, "err", err) c.networkClient.TrackBandwidth(nodeID, 0) metric.IncFailed() metric.IncInvalidResponse() diff --git a/x/warp/predicate_test.go b/x/warp/predicate_test.go index a760de29af..6cd5e02736 100644 --- a/x/warp/predicate_test.go +++ b/x/warp/predicate_test.go @@ -108,8 +108,8 @@ type testValidator struct { vdr *avalancheWarp.Validator } -func (v *testValidator) Less(o *testValidator) bool { - return v.vdr.Less(o.vdr) +func (v *testValidator) Compare(o *testValidator) int { + return v.vdr.Compare(o.vdr) } func newTestValidator() *testValidator { From 08f8bb0c86d2ac7f15523271523b33f7e6f479a0 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 25 Dec 2023 14:25:34 +0300 Subject: [PATCH 3/8] fix tests --- plugin/evm/gossip.go | 9 +++++---- plugin/evm/tx_gossip_test.go | 13 +++++++------ 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index 41cea7df1a..db8450bc8e 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -78,6 +78,10 @@ func (t txGossipHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossi t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) } +func (t txGossipHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + func NewGossipEthTxPool(mempool *txpool.TxPool) (*GossipEthTxPool, error) { bloom, err := gossip.NewBloomFilter(txGossipBloomMaxItems, txGossipBloomFalsePositiveRate) if err != nil { @@ -148,10 +152,7 @@ func (g *GossipEthTxPool) GetFilter() ([]byte, []byte, error) { g.lock.RLock() defer g.lock.RUnlock() - bloom, err := g.bloom.Bloom.MarshalBinary() - salt := g.bloom.Salt - - return bloom, salt[:], err + return g.bloom.Marshal() } type GossipEthTxMarshaller struct{} diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index cf0df24779..3d1e713009 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" + agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/coreth/params" @@ -29,12 +29,13 @@ import ( "google.golang.org/protobuf/proto" "github.com/ava-labs/subnet-evm/core/types" + "github.com/ava-labs/subnet-evm/utils" ) func TestEthTxGossip(t *testing.T) { require := require.New(t) ctx := context.Background() - snowCtx := snow.DefaultContextTest() + snowCtx := utils.TestSnowContext() validatorState := &validators.TestState{} snowCtx.ValidatorState = validatorState @@ -84,11 +85,11 @@ func TestEthTxGossip(t *testing.T) { // Ask the VM for any new transactions. We should get nothing at first. emptyBloomFilter, err := gossip.NewBloomFilter(txGossipBloomMaxItems, txGossipBloomFalsePositiveRate) require.NoError(err) - emptyBloomFilterBytes, err := emptyBloomFilter.Bloom.MarshalBinary() + emptyBloomFilterBytes, _, err := emptyBloomFilter.Marshal() require.NoError(err) request := &sdk.PullGossipRequest{ Filter: emptyBloomFilterBytes, - Salt: utils.RandomBytes(32), + Salt: agoUtils.RandomBytes(32), } requestBytes, err := proto.Marshal(request) @@ -148,7 +149,7 @@ func TestEthTxGossip(t *testing.T) { func TestEthTxPushGossipOutbound(t *testing.T) { require := require.New(t) ctx := context.Background() - snowCtx := snow.DefaultContextTest() + snowCtx := utils.TestSnowContext() sender := &common.FakeSender{ SentAppGossip: make(chan []byte, 1), } @@ -199,7 +200,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { func TestEthTxPushGossipInbound(t *testing.T) { require := require.New(t) ctx := context.Background() - snowCtx := snow.DefaultContextTest() + snowCtx := utils.TestSnowContext() sender := &common.FakeSender{ SentAppGossip: make(chan []byte, 1), From c49eba4ea9b93fa622598bfc8fb786f8e8185e97 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 25 Dec 2023 14:40:48 +0300 Subject: [PATCH 4/8] fix imports --- go.mod | 2 +- plugin/evm/gossipper_test.go | 4 ++-- plugin/evm/tx_gossip_test.go | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index db5a1f4daf..ea6d8c3ad5 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,6 @@ require ( github.com/VictoriaMetrics/fastcache v1.10.0 github.com/ava-labs/avalanche-network-runner v1.7.4-0.20231127162258-2f3ceed8ae4b github.com/ava-labs/avalanchego v1.10.18-rc.8 - github.com/ava-labs/coreth v0.12.9-rc.9.0.20231222191417-2e3f762373e9 github.com/cespare/cp v0.1.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/davecgh/go-spew v1.1.1 @@ -58,6 +57,7 @@ require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/ava-labs/coreth v0.12.9-rc.9.0.20231222191417-2e3f762373e9 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect diff --git a/plugin/evm/gossipper_test.go b/plugin/evm/gossipper_test.go index d18163f998..bf7f91a165 100644 --- a/plugin/evm/gossipper_test.go +++ b/plugin/evm/gossipper_test.go @@ -73,7 +73,7 @@ func getValidEthTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*type } // show that locally issued eth txs are gossiped -// Note: channel through which coreth mempool push txs to vm is injected here +// Note: channel through which subnet-evm mempool push txs to vm is injected here // to ease up UT, which target only VM behaviors in response to subnet-evm mempool // signals func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) { @@ -158,7 +158,7 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) { errs = vm.txPool.AddRemotesSync(ethTxs) assert.Contains(errs[0].Error(), "already known") assert.Contains(errs[1].Error(), "already known") - assert.NoError(errs[2], "failed adding coreth tx to mempool") + assert.NoError(errs[2], "failed adding subnet-evm tx to mempool") attemptAwait(t, &wg, 5*time.Second) } diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 3d1e713009..9044750ada 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -22,7 +22,6 @@ import ( agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/coreth/params" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -174,7 +173,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { address := testEthAddrs[0] key := testKeys[0] - tx := types.NewTransaction(0, address, big.NewInt(10), 100_000, big.NewInt(params.LaunchMinGasPrice), nil) + tx := types.NewTransaction(0, address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) require.NoError(err) @@ -225,7 +224,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { address := testEthAddrs[0] key := testKeys[0] - tx := types.NewTransaction(0, address, big.NewInt(10), 100_000, big.NewInt(params.LaunchMinGasPrice), nil) + tx := types.NewTransaction(0, address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) require.NoError(err) From e8c2bde5112355330f345bae74db2a295931b835 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 25 Dec 2023 19:07:49 +0300 Subject: [PATCH 5/8] bump min go version --- .github/workflows/auto-generated-code-checker.yml | 2 +- .github/workflows/bench.yml | 2 +- .github/workflows/ci.yml | 8 ++++---- .github/workflows/release.yml | 2 +- Dockerfile | 2 +- README.md | 2 +- scripts/build.sh | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/auto-generated-code-checker.yml b/.github/workflows/auto-generated-code-checker.yml index 39bd2c8434..a83c1f2990 100644 --- a/.github/workflows/auto-generated-code-checker.yml +++ b/.github/workflows/auto-generated-code-checker.yml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - shell: bash run: scripts/mock.gen.sh diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 12ce8eec47..500c9aaf7f 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go mod download shell: bash diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1f932cbcb2..c391433f68 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,12 +18,12 @@ jobs: shell: bash - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.54 + version: v1.55 working-directory: . args: --timeout 10m @@ -34,7 +34,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go mod download shell: bash @@ -66,7 +66,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Use Node.js uses: actions/setup-node@v3 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index febc34fa75..285349835f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Set up arm64 cross compiler run: | diff --git a/Dockerfile b/Dockerfile index 11784758dd..54e3274966 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ARG AVALANCHE_VERSION # ============= Compilation Stage ================ -FROM golang:1.20.10-bullseye AS builder +FROM golang:1.20.12-bullseye AS builder WORKDIR /build diff --git a/README.md b/README.md index 193e209540..5f2fc45017 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ To support these changes, there have been a number of changes to the SubnetEVM b ### Clone Subnet-evm -First install Go 1.20.10 or later. Follow the instructions [here](https://go.dev/doc/install). You can verify by running `go version`. +First install Go 1.20.12 or later. Follow the instructions [here](https://go.dev/doc/install). You can verify by running `go version`. Set `$GOPATH` environment variable properly for Go to look for Go Workspaces. Please read [this](https://go.dev/doc/code) for details. You can verify by running `echo $GOPATH`. diff --git a/scripts/build.sh b/scripts/build.sh index c9b169bee9..a4941b99c4 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -4,7 +4,7 @@ set -o errexit set -o nounset set -o pipefail -go_version_minimum="1.20.10" +go_version_minimum="1.20.12" go_version() { go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' From 0bf3ed332c595a4968851fc30251772f43bf67ef Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 25 Dec 2023 19:30:04 +0300 Subject: [PATCH 6/8] downgrade golangci --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c391433f68..637aaf012e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.55 + version: v1.54 working-directory: . args: --timeout 10m From 966e5b435624622f21dc043200296c5876b847b5 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 28 Dec 2023 19:30:36 +0300 Subject: [PATCH 7/8] sync to 0.12.10-rc.2 --- core/blockchain.go | 49 +- core/blockchain_repair_test.go | 7 +- core/blockchain_test.go | 378 ++++++++++--- core/state/statedb.go | 4 +- core/state/trie_prefetcher.go | 519 +++++++++++++----- core/state/trie_prefetcher_test.go | 8 +- eth/backend.go | 2 + eth/ethconfig/config.go | 45 +- eth/tracers/api_test.go | 9 +- .../internal/tracetest/calltrace_test.go | 102 ++-- eth/tracers/native/call.go | 2 + eth/tracers/native/prestate.go | 7 +- ethclient/ethclient.go | 11 + internal/ethapi/api.go | 39 +- internal/ethapi/api_test.go | 189 ++++++- params/avalanche_params.go | 3 + params/config.go | 3 - peer/network_test.go | 1 + plugin/evm/block.go | 6 +- plugin/evm/block_test.go | 2 +- plugin/evm/config.go | 34 +- plugin/evm/gossiper.go | 2 +- plugin/evm/message/codec.go | 6 +- plugin/evm/message/message_test.go | 4 +- plugin/evm/vm.go | 10 +- plugin/evm/vm_test.go | 40 +- rpc/types.go | 5 +- rpc/types_test.go | 21 + scripts/versions.sh | 2 +- sync/client/client_test.go | 7 +- sync/handlers/block_request.go | 17 +- sync/handlers/block_request_test.go | 200 ++++--- sync/handlers/leafs_request_test.go | 15 +- sync/statesync/sync_test.go | 43 +- sync/statesync/test_sync.go | 15 +- {trie => sync/syncutils}/test_trie.go | 39 +- trie/triedb/hashdb/database.go | 15 +- utils/bounded_workers.go | 81 +++ 38 files changed, 1449 insertions(+), 493 deletions(-) rename {trie => sync/syncutils}/test_trie.go (84%) create mode 100644 utils/bounded_workers.go diff --git a/core/blockchain.go b/core/blockchain.go index ec02b24073..0e8a04642a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -168,11 +168,12 @@ type CacheConfig struct { TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit + TriePrefetcherParallelism int // Max concurrent disk reads trie prefetcher should perform at once CommitInterval uint64 // Commit the trie every [CommitInterval] blocks. Pruning bool // Whether to disable trie write caching and GC altogether (archive node) AcceptorQueueLimit int // Blocks to queue before blocking during acceptance PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries. - PopulateMissingTriesParallelism int // Is the number of readers to use when trying to populate missing tries. + PopulateMissingTriesParallelism int // Number of readers to use when trying to populate missing tries. AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory @@ -180,20 +181,22 @@ type CacheConfig struct { Preimages bool // Whether to store preimage of trie key to the disk AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices + SkipTxIndexing bool // Whether to skip transaction indexing SnapshotNoBuild bool // Whether the background generation is allowed SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it } var DefaultCacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB) - Pruning: true, - CommitInterval: 4096, - AcceptorQueueLimit: 64, // Provides 2 minutes of buffer (2s block target) for a commit delay - SnapshotLimit: 256, - AcceptedCacheSize: 32, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB) + TriePrefetcherParallelism: 16, + Pruning: true, + CommitInterval: 4096, + AcceptorQueueLimit: 64, // Provides 2 minutes of buffer (2s block target) for a commit delay + SnapshotLimit: 256, + AcceptedCacheSize: 32, } // BlockChain represents the canonical chain given a database with a genesis @@ -507,7 +510,9 @@ func (bc *BlockChain) dispatchTxUnindexer() { // - updating the acceptor tip index func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block) error { batch := bc.db.NewBatch() - rawdb.WriteTxLookupEntriesByBlock(batch, b) + if !bc.cacheConfig.SkipTxIndexing { + rawdb.WriteTxLookupEntriesByBlock(batch, b) + } if err := rawdb.WriteAcceptorTip(batch, b.Hash()); err != nil { return fmt.Errorf("%w: failed to write acceptor tip key", err) } @@ -607,6 +612,12 @@ func (bc *BlockChain) startAcceptor() { logs := bc.collectUnflattenedLogs(next, false) bc.acceptedLogsCache.Put(next.Hash(), logs) + // Update the acceptor tip before sending events to ensure that any client acting based off of + // the events observes the updated acceptorTip on subsequent requests + bc.acceptorTipLock.Lock() + bc.acceptorTip = next + bc.acceptorTipLock.Unlock() + // Update accepted feeds flattenedLogs := types.FlattenLogs(logs) bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: flattenedLogs}) @@ -617,9 +628,6 @@ func (bc *BlockChain) startAcceptor() { bc.txAcceptedFeed.Send(NewTxsEvent{next.Transactions()}) } - bc.acceptorTipLock.Lock() - bc.acceptorTip = next - bc.acceptorTipLock.Unlock() bc.acceptorWg.Done() acceptorWorkTimer.Inc(time.Since(start).Milliseconds()) @@ -883,7 +891,8 @@ func (bc *BlockChain) ValidateCanonicalChain() error { // Transactions are only indexed beneath the last accepted block, so we only check // that the transactions have been indexed, if we are checking below the last accepted // block. - shouldIndexTxs := bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.Number.Uint64()+bc.cacheConfig.TxLookupLimit + shouldIndexTxs := !bc.cacheConfig.SkipTxIndexing && + (bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.Number.Uint64()+bc.cacheConfig.TxLookupLimit) if current.Number.Uint64() <= bc.lastAccepted.NumberU64() && shouldIndexTxs { // Ensure that all of the transactions have been stored correctly in the canonical // chain @@ -1371,7 +1380,7 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { blockStateInitTimer.Inc(time.Since(substart).Milliseconds()) // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) activeState = statedb // If we have a followup block, run that against the current state to pre-cache @@ -1746,7 +1755,7 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) } // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) defer func() { statedb.StopPrefetcher() }() @@ -2153,3 +2162,11 @@ func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error { bc.initSnapshot(head) return nil } + +// CacheConfig returns a reference to [bc.cacheConfig] +// +// This is used by [miner] to set prefetch parallelism +// during block building. +func (bc *BlockChain) CacheConfig() *CacheConfig { + return bc.cacheConfig +} diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 3d35a69cbb..e33dcda9ec 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -524,9 +524,10 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } engine = dummy.NewFullFaker() config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - SnapshotLimit: 0, // Disable snapshot by default + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TriePrefetcherParallelism: 4, + SnapshotLimit: 0, // Disable snapshot by default } ) defer engine.Close() diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 2c2be768a8..015d3074ea 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -28,22 +28,24 @@ import ( var ( archiveConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: false, // Archive mode - SnapshotLimit: 256, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: false, // Archive mode + SnapshotLimit: 256, + AcceptorQueueLimit: 64, } pruningConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 256, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 256, + AcceptorQueueLimit: 64, } ) @@ -180,12 +182,13 @@ func TestArchiveBlockChainSnapsDisabled(t *testing.T) { return createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: false, // Archive mode - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: false, // Archive mode + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -214,13 +217,14 @@ func TestPruningBlockChainSnapsDisabled(t *testing.T) { return createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -263,13 +267,14 @@ func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { blockchain, err := createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -298,13 +303,14 @@ func TestEnableSnapshots(t *testing.T) { blockchain, err := createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: snapLimit, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: snapLimit, + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -458,6 +464,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, Pruning: false, // Archive mode SnapshotLimit: 256, PopulateMissingTries: &startHeight, // Starting point for re-populating. @@ -490,14 +497,15 @@ func TestUngracefulAsyncShutdown(t *testing.T) { var ( create = func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain(db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 1000, // ensure channel doesn't block + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 1000, // ensure channel doesn't block }, gspec, lastAcceptedHash) if err != nil { return nil, err @@ -629,9 +637,6 @@ func TestUngracefulAsyncShutdown(t *testing.T) { // TODO: simplify the unindexer logic and this test. func TestTransactionIndices(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") - } // Configure and generate a sample block chain require := require.New(t) var ( @@ -641,7 +646,7 @@ func TestTransactionIndices(t *testing.T) { addr2 = crypto.PubkeyToAddress(key2.PublicKey) funds = big.NewInt(10000000000000) gspec = &Genesis{ - Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int), FeeConfig: params.DefaultFeeConfig}, Alloc: GenesisAlloc{addr1: {Balance: funds}}, } signer = types.LatestSigner(gspec.Config) @@ -653,17 +658,25 @@ func TestTransactionIndices(t *testing.T) { }) require.NoError(err) - blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewFaker(), genDb, 10, 10, nil) + blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewFaker(), genDb, 10, 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + require.NoError(err) + block.AddTx(tx) + }) require.NoError(err) check := func(tail *uint64, chain *BlockChain) { stored := rawdb.ReadTxIndexTail(chain.db) - require.EqualValues(tail, stored) - + var tailValue uint64 if tail == nil { - return + require.Nil(stored) + tailValue = 0 + } else { + require.EqualValues(*tail, *stored, "expected tail %d, got %d", *tail, *stored) + tailValue = *tail } - for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ { + + for i := tailValue; i <= chain.CurrentBlock().Number.Uint64(); i++ { block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) if block.Transactions().Len() == 0 { continue @@ -674,7 +687,7 @@ func TestTransactionIndices(t *testing.T) { } } - for i := uint64(0); i < *tail; i++ { + for i := uint64(0); i < tailValue; i++ { block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) if block.Transactions().Len() == 0 { continue @@ -687,14 +700,15 @@ func TestTransactionIndices(t *testing.T) { } conf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, } // Init block chain and check all needed indices has been indexed. @@ -718,29 +732,167 @@ func TestTransactionIndices(t *testing.T) { // Reconstruct a block chain which only reserves limited tx indices // 128 blocks were previously indexed. Now we add a new block at each test step. - limit := []uint64{130 /* 129 + 1 reserve all */, 64 /* drop stale */, 32 /* shorten history */} - tails := []uint64{0 /* reserve all */, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */} - for i, l := range limit { - conf.TxLookupLimit = l + limits := []uint64{ + 0, /* tip: 129 reserve all (don't run) */ + 131, /* tip: 130 reserve all */ + 140, /* tip: 131 reserve all */ + 64, /* tip: 132, limit:64 */ + 32, /* tip: 133, limit:32 */ + } + for i, l := range limits { + t.Run(fmt.Sprintf("test-%d, limit: %d", i+1, l), func(t *testing.T) { + conf.TxLookupLimit = l + + chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedHash) + require.NoError(err) + + newBlks := blocks2[i : i+1] + _, err = chain.InsertChain(newBlks) // Feed chain a higher block to trigger indices updater. + require.NoError(err) + + err = chain.Accept(newBlks[0]) // Accept the block to trigger indices updater. + require.NoError(err) + + chain.DrainAcceptorQueue() + time.Sleep(50 * time.Millisecond) // Wait for indices initialisation + + chain.Stop() + var tail *uint64 + if l == 0 { + tail = nil + } else { + var tl uint64 + if chain.CurrentBlock().Number.Uint64() > l { + // tail should be the first block number which is indexed + // i.e the first block number that's in the lookup range + tl = chain.CurrentBlock().Number.Uint64() - l + 1 + } + tail = &tl + } - chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedHash) - require.NoError(err) + check(tail, chain) - newBlks := blocks2[i : i+1] - _, err = chain.InsertChain(newBlks) // Feed chain a higher block to trigger indices updater. + lastAcceptedHash = chain.CurrentHeader().Hash() + }) + } +} + +func TestTransactionSkipIndexing(t *testing.T) { + // Configure and generate a sample block chain + require := require.New(t) + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + funds = big.NewInt(10000000000000) + gspec = &Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int), FeeConfig: params.DefaultFeeConfig}, + Alloc: GenesisAlloc{addr1: {Balance: funds}}, + } + signer = types.LatestSigner(gspec.Config) + ) + genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewFaker(), 5, 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) require.NoError(err) + block.AddTx(tx) + }) + require.NoError(err) - err = chain.Accept(newBlks[0]) // Accept the block to trigger indices updater. + blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewFaker(), genDb, 5, 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) require.NoError(err) + block.AddTx(tx) + }) + require.NoError(err) - chain.DrainAcceptorQueue() - time.Sleep(50 * time.Millisecond) // Wait for indices initialisation + checkRemoved := func(tail *uint64, to uint64, chain *BlockChain) { + stored := rawdb.ReadTxIndexTail(chain.db) + var tailValue uint64 + if tail == nil { + require.Nil(stored) + tailValue = 0 + } else { + require.EqualValues(*tail, *stored, "expected tail %d, got %d", *tail, *stored) + tailValue = *tail + } - chain.Stop() - check(&tails[i], chain) + for i := tailValue; i < to; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.NotNilf(index, "Miss transaction indices, number %d hash %s", i, tx.Hash().Hex()) + } + } - lastAcceptedHash = chain.CurrentHeader().Hash() + for i := uint64(0); i < tailValue; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.Nilf(index, "Transaction indices should be deleted, number %d hash %s", i, tx.Hash().Hex()) + } + } + + for i := to; i <= chain.CurrentBlock().Number.Uint64(); i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.Nilf(index, "Transaction indices should be skipped, number %d hash %s", i, tx.Hash().Hex()) + } + } } + + conf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, + SkipTxIndexing: true, + } + + // test1: Init block chain and check all indices has been skipped. + chainDB := rawdb.NewMemoryDatabase() + chain, err := createAndInsertChain(chainDB, conf, gspec, blocks, common.Hash{}) + require.NoError(err) + checkRemoved(nil, 0, chain) // check all indices has been skipped + + // test2: specify lookuplimit with tx index skipping enabled. Blocks should not be indexed but tail should be updated. + conf.TxLookupLimit = 2 + chain, err = createAndInsertChain(chainDB, conf, gspec, blocks2[0:1], chain.CurrentHeader().Hash()) + require.NoError(err) + tail := chain.CurrentBlock().Number.Uint64() - conf.TxLookupLimit + 1 + checkRemoved(&tail, 0, chain) + + // test3: tx index skipping and unindexer disabled. Blocks should be indexed and tail should be updated. + conf.TxLookupLimit = 0 + conf.SkipTxIndexing = false + chainDB = rawdb.NewMemoryDatabase() + chain, err = createAndInsertChain(chainDB, conf, gspec, blocks, common.Hash{}) + require.NoError(err) + checkRemoved(nil, chain.CurrentBlock().Number.Uint64()+1, chain) // check all indices has been indexed + + // now change tx index skipping to true and check that the indices are skipped for the last block + // and old indices are removed up to the tail, but [tail, current) indices are still there. + conf.TxLookupLimit = 2 + conf.SkipTxIndexing = true + chain, err = createAndInsertChain(chainDB, conf, gspec, blocks2[0:1], chain.CurrentHeader().Hash()) + require.NoError(err) + tail = chain.CurrentBlock().Number.Uint64() - conf.TxLookupLimit + 1 + checkRemoved(&tail, chain.CurrentBlock().Number.Uint64(), chain) } // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted @@ -857,15 +1009,40 @@ func TestCanonicalHashMarker(t *testing.T) { func TestTxLookupBlockChain(t *testing.T) { cacheConf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, // ensure channel doesn't block - TxLookupLimit: 5, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, // ensure channel doesn't block + TxLookupLimit: 5, + } + createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + tt.testFunc(t, createTxLookupBlockChain) + }) + } +} + +func TestTxLookupSkipIndexingBlockChain(t *testing.T) { + cacheConf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, // ensure channel doesn't block + TxLookupLimit: 5, + SkipTxIndexing: true, } createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) @@ -1172,3 +1349,26 @@ func TestEIP3651(t *testing.T) { t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) } } + +func createAndInsertChain(db ethdb.Database, cacheConfig *CacheConfig, gspec *Genesis, blocks types.Blocks, lastAcceptedHash common.Hash) (*BlockChain, error) { + chain, err := createBlockChain(db, cacheConfig, gspec, lastAcceptedHash) + if err != nil { + return nil, err + } + _, err = chain.InsertChain(blocks) + if err != nil { + return nil, err + } + for _, block := range blocks { + err := chain.Accept(block) + if err != nil { + return nil, err + } + } + + chain.DrainAcceptorQueue() + time.Sleep(1000 * time.Millisecond) // Wait for indices initialisation + + chain.Stop() + return chain, nil +} diff --git a/core/state/statedb.go b/core/state/statedb.go index 6c44427975..34bdafcedb 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -191,13 +191,13 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. -func (s *StateDB) StartPrefetcher(namespace string) { +func (s *StateDB) StartPrefetcher(namespace string, maxConcurrency int) { if s.prefetcher != nil { s.prefetcher.close() s.prefetcher = nil } if s.snap != nil { - s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) + s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, maxConcurrency) } } diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 39f022d220..7c65fd12c4 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -28,16 +28,16 @@ package state import ( "sync" + "time" "github.com/ava-labs/subnet-evm/metrics" + "github.com/ava-labs/subnet-evm/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -var ( - // triePrefetchMetricsPrefix is the prefix under which to publish the metrics. - triePrefetchMetricsPrefix = "trie/prefetch/" -) +// triePrefetchMetricsPrefix is the prefix under which to publish the metrics. +const triePrefetchMetricsPrefix = "trie/prefetch/" // triePrefetcher is an active prefetcher, which receives accounts or storage // items and does trie-loading of them. The goal is to get as much useful content @@ -50,63 +50,91 @@ type triePrefetcher struct { fetches map[string]Trie // Partially or fully fetcher tries fetchers map[string]*subfetcher // Subfetchers for each trie - deliveryCopyMissMeter metrics.Meter - deliveryRequestMissMeter metrics.Meter - deliveryWaitMissMeter metrics.Meter + maxConcurrency int + workers *utils.BoundedWorkers + + subfetcherWorkersMeter metrics.Meter + subfetcherWaitTimer metrics.Counter + subfetcherCopiesMeter metrics.Meter accountLoadMeter metrics.Meter accountDupMeter metrics.Meter accountSkipMeter metrics.Meter accountWasteMeter metrics.Meter - storageLoadMeter metrics.Meter - storageDupMeter metrics.Meter - storageSkipMeter metrics.Meter - storageWasteMeter metrics.Meter + + storageFetchersMeter metrics.Meter + storageLoadMeter metrics.Meter + storageLargestLoadMeter metrics.Meter + storageDupMeter metrics.Meter + storageSkipMeter metrics.Meter + storageWasteMeter metrics.Meter } -func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { +func newTriePrefetcher(db Database, root common.Hash, namespace string, maxConcurrency int) *triePrefetcher { prefix := triePrefetchMetricsPrefix + namespace - p := &triePrefetcher{ + return &triePrefetcher{ db: db, root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map - deliveryCopyMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/copy", nil), - deliveryRequestMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/request", nil), - deliveryWaitMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/wait", nil), + maxConcurrency: maxConcurrency, + workers: utils.NewBoundedWorkers(maxConcurrency), // Scale up as needed to [maxConcurrency] + + subfetcherWorkersMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/workers", nil), + subfetcherWaitTimer: metrics.GetOrRegisterCounter(prefix+"/subfetcher/wait", nil), + subfetcherCopiesMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/copies", nil), accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), - storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), - storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), - storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), - storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), + + storageFetchersMeter: metrics.GetOrRegisterMeter(prefix+"/storage/fetchers", nil), + storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), + storageLargestLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/lload", nil), + storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), + storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), + storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), } - return p } // close iterates over all the subfetchers, aborts any that were left spinning // and reports the stats to the metrics subsystem. func (p *triePrefetcher) close() { + // If the prefetcher is an inactive one, bail out + if p.fetches != nil { + return + } + + // Collect stats from all fetchers + var ( + storageFetchers int64 + largestLoad int64 + ) for _, fetcher := range p.fetchers { - fetcher.abort() // safe to do multiple times + fetcher.abort() // safe to call multiple times (should be a no-op on happy path) if metrics.Enabled { + p.subfetcherCopiesMeter.Mark(int64(fetcher.copies())) + if fetcher.root == p.root { p.accountLoadMeter.Mark(int64(len(fetcher.seen))) p.accountDupMeter.Mark(int64(fetcher.dups)) - p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) + p.accountSkipMeter.Mark(int64(fetcher.skips())) for _, key := range fetcher.used { delete(fetcher.seen, string(key)) } p.accountWasteMeter.Mark(int64(len(fetcher.seen))) } else { - p.storageLoadMeter.Mark(int64(len(fetcher.seen))) + storageFetchers++ + oseen := int64(len(fetcher.seen)) + if oseen > largestLoad { + largestLoad = oseen + } + p.storageLoadMeter.Mark(oseen) p.storageDupMeter.Mark(int64(fetcher.dups)) - p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) + p.storageSkipMeter.Mark(int64(fetcher.skips())) for _, key := range fetcher.used { delete(fetcher.seen, string(key)) @@ -115,6 +143,20 @@ func (p *triePrefetcher) close() { } } } + if metrics.Enabled { + p.storageFetchersMeter.Mark(storageFetchers) + p.storageLargestLoadMeter.Mark(largestLoad) + } + + // Stop all workers once fetchers are aborted (otherwise + // could stop while waiting) + // + // Record number of workers that were spawned during this run + workersUsed := int64(p.workers.Wait()) + if metrics.Enabled { + p.subfetcherWorkersMeter.Mark(workersUsed) + } + // Clear out all fetchers (will crash on a second call, deliberate) p.fetchers = nil } @@ -127,20 +169,23 @@ func (p *triePrefetcher) copy() *triePrefetcher { copy := &triePrefetcher{ db: p.db, root: p.root, - fetches: make(map[string]Trie), // Active prefetchers use the fetches map + fetches: make(map[string]Trie), // Active prefetchers use the fetchers map - deliveryCopyMissMeter: p.deliveryCopyMissMeter, - deliveryRequestMissMeter: p.deliveryRequestMissMeter, - deliveryWaitMissMeter: p.deliveryWaitMissMeter, + subfetcherWorkersMeter: p.subfetcherWorkersMeter, + subfetcherWaitTimer: p.subfetcherWaitTimer, + subfetcherCopiesMeter: p.subfetcherCopiesMeter, accountLoadMeter: p.accountLoadMeter, accountDupMeter: p.accountDupMeter, accountSkipMeter: p.accountSkipMeter, accountWasteMeter: p.accountWasteMeter, - storageLoadMeter: p.storageLoadMeter, - storageDupMeter: p.storageDupMeter, - storageSkipMeter: p.storageSkipMeter, - storageWasteMeter: p.storageWasteMeter, + + storageFetchersMeter: p.storageFetchersMeter, + storageLoadMeter: p.storageLoadMeter, + storageLargestLoadMeter: p.storageLargestLoadMeter, + storageDupMeter: p.storageDupMeter, + storageSkipMeter: p.storageSkipMeter, + storageWasteMeter: p.storageWasteMeter, } // If the prefetcher is already a copy, duplicate the data if p.fetches != nil { @@ -165,11 +210,12 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr comm if p.fetches != nil { return } + // Active fetcher, schedule the retrievals id := p.trieID(owner, root) fetcher := p.fetchers[id] if fetcher == nil { - fetcher = newSubfetcher(p.db, p.root, owner, root, addr) + fetcher = newSubfetcher(p, owner, root, addr) p.fetchers[id] = fetcher } fetcher.schedule(keys) @@ -183,24 +229,27 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { if p.fetches != nil { trie := p.fetches[id] if trie == nil { - p.deliveryCopyMissMeter.Mark(1) return nil } return p.db.CopyTrie(trie) } + // Otherwise the prefetcher is active, bail if no trie was prefetched for this root fetcher := p.fetchers[id] if fetcher == nil { - p.deliveryRequestMissMeter.Mark(1) return nil } - // Interrupt the prefetcher if it's by any chance still running and return - // a copy of any pre-loaded trie. - fetcher.abort() // safe to do multiple times + // Wait for the fetcher to finish and shutdown orchestrator, if it exists + start := time.Now() + fetcher.wait() + if metrics.Enabled { + p.subfetcherWaitTimer.Inc(time.Since(start).Milliseconds()) + } + + // Return a copy of one of the prefetched tries trie := fetcher.peek() if trie == nil { - p.deliveryWaitMissMeter.Mark(1) return nil } return trie @@ -224,20 +273,15 @@ func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { // main prefetcher is paused and either all requested items are processed or if // the trie being worked on is retrieved from the prefetcher. type subfetcher struct { + p *triePrefetcher + db Database // Database to load trie nodes through state common.Hash // Root hash of the state to prefetch owner common.Hash // Owner of the trie, usually account hash root common.Hash // Root hash of the trie to prefetch addr common.Address // Address of the account that the trie belongs to - trie Trie // Trie being populated with nodes - - tasks [][]byte // Items queued up for retrieval - lock sync.Mutex // Lock protecting the task queue - wake chan struct{} // Wake channel if a new task is scheduled - stop chan struct{} // Channel to interrupt processing - term chan struct{} // Channel to signal interruption - copy chan chan Trie // Channel to request a copy of the current trie + to *trieOrchestrator // Orchestrate concurrent fetching of a single trie seen map[string]struct{} // Tracks the entries already loaded dups int // Number of duplicate preload tasks @@ -246,139 +290,346 @@ type subfetcher struct { // newSubfetcher creates a goroutine to prefetch state items belonging to a // particular root hash. -func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address) *subfetcher { +func newSubfetcher(p *triePrefetcher, owner common.Hash, root common.Hash, addr common.Address) *subfetcher { sf := &subfetcher{ - db: db, - state: state, + p: p, + db: p.db, + state: p.root, owner: owner, root: root, addr: addr, - wake: make(chan struct{}, 1), - stop: make(chan struct{}), - term: make(chan struct{}), - copy: make(chan chan Trie), seen: make(map[string]struct{}), } - go sf.loop() + sf.to = newTrieOrchestrator(sf) + if sf.to != nil { + go sf.to.processTasks() + } + // We return [sf] here to ensure we don't try to re-create if + // we aren't able to setup a [newTrieOrchestrator] the first time. return sf } // schedule adds a batch of trie keys to the queue to prefetch. +// This should never block, so an array is used instead of a channel. +// +// This is not thread-safe. func (sf *subfetcher) schedule(keys [][]byte) { // Append the tasks to the current queue - sf.lock.Lock() - sf.tasks = append(sf.tasks, keys...) - sf.lock.Unlock() + tasks := make([][]byte, 0, len(keys)) + for _, key := range keys { + // Check if keys already seen + sk := string(key) + if _, ok := sf.seen[sk]; ok { + sf.dups++ + continue + } + sf.seen[sk] = struct{}{} + tasks = append(tasks, key) + } - // Notify the prefetcher, it's fine if it's already terminated - select { - case sf.wake <- struct{}{}: - default: + // After counting keys, exit if they can't be prefetched + if sf.to == nil { + return } + + // Add tasks to queue for prefetching + sf.to.enqueueTasks(tasks) } // peek tries to retrieve a deep copy of the fetcher's trie in whatever form it // is currently. func (sf *subfetcher) peek() Trie { - ch := make(chan Trie) - select { - case sf.copy <- ch: - // Subfetcher still alive, return copy from it - return <-ch + if sf.to == nil { + return nil + } + return sf.to.copyBase() +} - case <-sf.term: - // Subfetcher already terminated, return a copy directly - if sf.trie == nil { - return nil - } - return sf.db.CopyTrie(sf.trie) +// wait must only be called if [triePrefetcher] has not been closed. If this happens, +// workers will not finish. +func (sf *subfetcher) wait() { + if sf.to == nil { + // Unable to open trie + return } + sf.to.wait() } -// abort interrupts the subfetcher immediately. It is safe to call abort multiple -// times but it is not thread safe. func (sf *subfetcher) abort() { - select { - case <-sf.stop: - default: - close(sf.stop) + if sf.to == nil { + // Unable to open trie + return + } + sf.to.abort() +} + +func (sf *subfetcher) skips() int { + if sf.to == nil { + // Unable to open trie + return 0 + } + return sf.to.skipCount() +} + +func (sf *subfetcher) copies() int { + if sf.to == nil { + // Unable to open trie + return 0 } - <-sf.term + return sf.to.copies } -// loop waits for new tasks to be scheduled and keeps loading them until it runs -// out of tasks or its underlying trie is retrieved for committing. -func (sf *subfetcher) loop() { - // No matter how the loop stops, signal anyone waiting that it's terminated - defer close(sf.term) +// trieOrchestrator is not thread-safe. +type trieOrchestrator struct { + sf *subfetcher + + // base is an unmodified Trie we keep for + // creating copies for each worker goroutine. + // + // We care more about quick copies than good copies + // because most (if not all) of the nodes that will be populated + // in the copy will come from the underlying triedb cache. Ones + // that don't come from this cache probably had to be fetched + // from disk anyways. + base Trie + baseLock sync.Mutex + + tasksAllowed bool + skips int // number of tasks skipped + pendingTasks [][]byte + taskLock sync.Mutex + + processingTasks sync.WaitGroup + + wake chan struct{} + stop chan struct{} + stopOnce sync.Once + loopTerm chan struct{} + + copies int + copyChan chan Trie + copySpawner chan struct{} +} +func newTrieOrchestrator(sf *subfetcher) *trieOrchestrator { // Start by opening the trie and stop processing if it fails + var ( + base Trie + err error + ) if sf.owner == (common.Hash{}) { - trie, err := sf.db.OpenTrie(sf.root) + base, err = sf.db.OpenTrie(sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return + return nil } - sf.trie = trie } else { - trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) + base, err = sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return + return nil } - sf.trie = trie } - // Trie opened successfully, keep prefetching items + // Instantiate trieOrchestrator + to := &trieOrchestrator{ + sf: sf, + base: base, + + tasksAllowed: true, + wake: make(chan struct{}, 1), + stop: make(chan struct{}), + loopTerm: make(chan struct{}), + + copyChan: make(chan Trie, sf.p.maxConcurrency), + copySpawner: make(chan struct{}, sf.p.maxConcurrency), + } + + // Create initial trie copy + to.copies++ + to.copySpawner <- struct{}{} + to.copyChan <- to.copyBase() + return to +} + +func (to *trieOrchestrator) copyBase() Trie { + to.baseLock.Lock() + defer to.baseLock.Unlock() + + return to.sf.db.CopyTrie(to.base) +} + +func (to *trieOrchestrator) skipCount() int { + to.taskLock.Lock() + defer to.taskLock.Unlock() + + return to.skips +} + +func (to *trieOrchestrator) enqueueTasks(tasks [][]byte) { + to.taskLock.Lock() + defer to.taskLock.Unlock() + + if len(tasks) == 0 { + return + } + + // Add tasks to [pendingTasks] + if !to.tasksAllowed { + to.skips += len(tasks) + return + } + to.processingTasks.Add(len(tasks)) + to.pendingTasks = append(to.pendingTasks, tasks...) + + // Wake up processor + select { + case to.wake <- struct{}{}: + default: + } +} + +func (to *trieOrchestrator) handleStop(remaining int) { + to.taskLock.Lock() + to.skips += remaining + to.taskLock.Unlock() + to.processingTasks.Add(-remaining) +} + +func (to *trieOrchestrator) processTasks() { + defer close(to.loopTerm) + for { + // Determine if we should process or exit select { - case <-sf.wake: - // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock - sf.lock.Lock() - tasks := sf.tasks - sf.tasks = nil - sf.lock.Unlock() - - // Prefetch any tasks until the loop is interrupted - for i, task := range tasks { + case <-to.wake: + case <-to.stop: + return + } + + // Get current tasks + to.taskLock.Lock() + tasks := to.pendingTasks + to.pendingTasks = nil + to.taskLock.Unlock() + + // Enqueue more work as soon as trie copies are available + lt := len(tasks) + for i := 0; i < lt; i++ { + // Try to stop as soon as possible, if channel is closed + remaining := lt - i + select { + case <-to.stop: + to.handleStop(remaining) + return + default: + } + + // Try to create to get an active copy first (select is non-deterministic, + // so we may end up creating a new copy when we don't need to) + var t Trie + select { + case t = <-to.copyChan: + default: + // Wait for an available copy or create one, if we weren't + // able to get a previously created copy select { - case <-sf.stop: - // If termination is requested, add any leftover back and return - sf.lock.Lock() - sf.tasks = append(sf.tasks, tasks[i:]...) - sf.lock.Unlock() + case <-to.stop: + to.handleStop(remaining) return - - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) - - default: - // No termination request yet, prefetch the next entry - if _, ok := sf.seen[string(task)]; ok { - sf.dups++ - } else { - var err error - if len(task) == common.AddressLength { - _, err = sf.trie.GetAccount(common.BytesToAddress(task)) - } else { - _, err = sf.trie.GetStorage(sf.addr, task) - } - if err != nil { - log.Error("Trie prefetcher failed fetching", "root", sf.root, "err", err) - } - sf.seen[string(task)] = struct{}{} - } + case t = <-to.copyChan: + case to.copySpawner <- struct{}{}: + to.copies++ + t = to.copyBase() } } - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) + // Enqueue work, unless stopped. + fTask := tasks[i] + f := func() { + // Perform task + var err error + if len(fTask) == common.AddressLength { + _, err = t.GetAccount(common.BytesToAddress(fTask)) + } else { + _, err = t.GetStorage(to.sf.addr, fTask) + } + if err != nil { + log.Error("Trie prefetcher failed fetching", "root", to.sf.root, "err", err) + } + to.processingTasks.Done() + + // Return copy when we are done with it, so someone else can use it + // + // channel is buffered and will not block + to.copyChan <- t + } - case <-sf.stop: - // Termination is requested, abort and leave remaining tasks - return + // Enqueue task for processing (may spawn new goroutine + // if not at [maxConcurrency]) + // + // If workers are stopped before calling [Execute], this function may + // panic. + to.sf.p.workers.Execute(f) } } } + +func (to *trieOrchestrator) stopAcceptingTasks() { + to.taskLock.Lock() + defer to.taskLock.Unlock() + + if !to.tasksAllowed { + return + } + to.tasksAllowed = false + + // We don't clear [to.pendingTasks] here because + // it will be faster to prefetch them even though we + // are still waiting. +} + +// wait stops accepting new tasks and waits for ongoing tasks to complete. If +// wait is called, it is not necessary to call [abort]. +// +// It is safe to call wait multiple times. +func (to *trieOrchestrator) wait() { + // Prevent more tasks from being enqueued + to.stopAcceptingTasks() + + // Wait for processing tasks to complete + to.processingTasks.Wait() + + // Stop orchestrator loop + to.stopOnce.Do(func() { + close(to.stop) + }) + <-to.loopTerm +} + +// abort stops any ongoing tasks and shuts down the orchestrator loop. If abort +// is called, it is not necessary to call [wait]. +// +// It is safe to call abort multiple times. +func (to *trieOrchestrator) abort() { + // Prevent more tasks from being enqueued + to.stopAcceptingTasks() + + // Stop orchestrator loop + to.stopOnce.Do(func() { + close(to.stop) + }) + <-to.loopTerm + + // Capture any dangling pending tasks (processTasks + // may exit before enqueing all pendingTasks) + to.taskLock.Lock() + pendingCount := len(to.pendingTasks) + to.skips += pendingCount + to.pendingTasks = nil + to.taskLock.Unlock() + to.processingTasks.Add(-pendingCount) + + // Wait for processing tasks to complete + to.processingTasks.Wait() +} diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index 3c7d941839..588d251561 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -36,6 +36,8 @@ import ( "github.com/ethereum/go-ethereum/common" ) +const maxConcurrency = 4 + func filledStateDB() *StateDB { state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) @@ -56,7 +58,7 @@ func filledStateDB() *StateDB { func TestCopyAndClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) skey := common.HexToHash("aaa") prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) @@ -81,7 +83,7 @@ func TestCopyAndClose(t *testing.T) { func TestUseAfterClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) skey := common.HexToHash("aaa") prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) a := prefetcher.trie(common.Hash{}, db.originalRoot) @@ -97,7 +99,7 @@ func TestUseAfterClose(t *testing.T) { func TestCopyClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) skey := common.HexToHash("aaa") prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) cpy := prefetcher.copy() diff --git a/eth/backend.go b/eth/backend.go index 85b9aca042..fe218029d6 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -189,6 +189,7 @@ func New( TrieCleanRejournal: config.TrieCleanRejournal, TrieDirtyLimit: config.TrieDirtyCache, TrieDirtyCommitTarget: config.TrieDirtyCommitTarget, + TriePrefetcherParallelism: config.TriePrefetcherParallelism, Pruning: config.Pruning, AcceptorQueueLimit: config.AcceptorQueueLimit, CommitInterval: config.CommitInterval, @@ -203,6 +204,7 @@ func New( Preimages: config.Preimages, AcceptedCacheSize: config.AcceptedCacheSize, TxLookupLimit: config.TxLookupLimit, + SkipTxIndexing: config.SkipTxIndexing, } ) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 10f2af1019..e5c9716788 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -53,18 +53,19 @@ var DefaultConfig = NewDefaultConfig() func NewDefaultConfig() Config { return Config{ - NetworkId: 1, - TrieCleanCache: 512, - TrieDirtyCache: 256, - TrieDirtyCommitTarget: 20, - SnapshotCache: 256, - AcceptedCacheSize: 32, - Miner: miner.Config{}, - TxPool: txpool.DefaultConfig, - RPCGasCap: 25000000, - RPCEVMTimeout: 5 * time.Second, - GPO: DefaultFullGPOConfig, - RPCTxFeeCap: 1, + NetworkId: 1, + TrieCleanCache: 512, + TrieDirtyCache: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 16, + SnapshotCache: 256, + AcceptedCacheSize: 32, + Miner: miner.Config{}, + TxPool: txpool.DefaultConfig, + RPCGasCap: 25000000, + RPCEVMTimeout: 5 * time.Second, + GPO: DefaultFullGPOConfig, + RPCTxFeeCap: 1, } } @@ -94,13 +95,14 @@ type Config struct { SkipBcVersionCheck bool `toml:"-"` // TrieDB and snapshot options - TrieCleanCache int - TrieCleanJournal string - TrieCleanRejournal time.Duration - TrieDirtyCache int - TrieDirtyCommitTarget int - SnapshotCache int - Preimages bool + TrieCleanCache int + TrieCleanJournal string + TrieCleanRejournal time.Duration + TrieDirtyCache int + TrieDirtyCommitTarget int + TriePrefetcherParallelism int + SnapshotCache int + Preimages bool // AcceptedCacheSize is the depth of accepted headers cache and accepted // logs cache at the accepted tip. @@ -156,4 +158,9 @@ type Config struct { // * 0: means no limit // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes TxLookupLimit uint64 + + // SkipTxIndexing skips indexing transactions. + // This is useful for validators that don't need to index transactions. + // TxLookupLimit can be still used to control unindexing old transactions. + SkipTxIndexing bool } diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 593cdfabd4..1224db22d7 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -87,10 +87,11 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i // Import the canonical chain cacheConfig := &core.CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - SnapshotLimit: 128, - Pruning: false, // Archive mode + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TriePrefetcherParallelism: 4, + SnapshotLimit: 128, + Pruning: false, // Archive mode } chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, backend.engine, vm.Config{}, common.Hash{}, false) if err != nil { diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index 719f6f2044..200098e005 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -45,9 +45,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/rlp" - - // Force-load native, to trigger registration - _ "github.com/ava-labs/subnet-evm/eth/tracers/native" ) type callContext struct { @@ -297,6 +294,7 @@ func TestInternals(t *testing.T) { } return tr } + for _, tc := range []struct { name string code []byte @@ -314,13 +312,13 @@ func TestInternals(t *testing.T) { byte(vm.CALL), }, tracer: mkTracer("callTracer", nil), - want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, }, { name: "Stack depletion in LOG0", code: []byte{byte(vm.LOG3)}, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0xc350","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, }, { name: "Mem expansion in LOG0", @@ -333,11 +331,11 @@ func TestInternals(t *testing.T) { byte(vm.LOG0), }, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`, + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`, }, { // Leads to OOM on the prestate tracer - name: "Prestate-tracer - mem expansion in CREATE2", + name: "Prestate-tracer - CREATE2 OOM", code: []byte{ byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, @@ -351,41 +349,63 @@ func TestInternals(t *testing.T) { byte(vm.PUSH1), 0x0, byte(vm.LOG0), }, - tracer: mkTracer("prestateTracer", json.RawMessage(`{ "withLog": true }`)), - want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52640350"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`, + tracer: mkTracer("prestateTracer", nil), + want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`, + }, + { + // CREATE2 which requires padding memory by prestate tracer + name: "Prestate-tracer - CREATE2 Memory padding", + code: []byte{ + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.MSTORE), + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.CREATE2), + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x0, + byte(vm.LOG0), + }, + tracer: mkTracer("prestateTracer", nil), + want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600160ff60016000f560ff6000a0"},"0x91ff9a805d36f54e3e272e230f3e3f5c1b330804":{"balance":"0x0"}}`, }, } { - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), - core.GenesisAlloc{ - to: core.GenesisAccount{ - Code: tc.code, - }, - origin: core.GenesisAccount{ - Balance: big.NewInt(500000000000000), - }, - }, false) - evm := vm.NewEVM(context, txContext, statedb, params.TestPreSubnetEVMConfig, vm.Config{Tracer: tc.tracer}) - msg := &core.Message{ - To: &to, - From: origin, - Value: big.NewInt(0), - GasLimit: 50000, - GasPrice: big.NewInt(0), - GasFeeCap: big.NewInt(0), - GasTipCap: big.NewInt(0), - SkipAccountChecks: false, - } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)) - if _, err := st.TransitionDb(); err != nil { - t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err) - } - // Retrieve the trace result and compare against the expected - res, err := tc.tracer.GetResult() - if err != nil { - t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err) - } - if string(res) != tc.want { - t.Fatalf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want) - } + t.Run(tc.name, func(t *testing.T) { + _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), + core.GenesisAlloc{ + to: core.GenesisAccount{ + Code: tc.code, + }, + origin: core.GenesisAccount{ + Balance: big.NewInt(500000000000000), + }, + }, false) + + evm := vm.NewEVM(context, txContext, statedb, params.TestPreSubnetEVMConfig, vm.Config{Tracer: tc.tracer}) + msg := &core.Message{ + To: &to, + From: origin, + Value: big.NewInt(0), + GasLimit: 80000, + GasPrice: big.NewInt(0), + GasFeeCap: big.NewInt(0), + GasTipCap: big.NewInt(0), + SkipAccountChecks: false, + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)) + if _, err := st.TransitionDb(); err != nil { + t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err) + } + // Retrieve the trace result and compare against the expected + res, err := tc.tracer.GetResult() + if err != nil { + t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err) + } + if string(res) != tc.want { + t.Fatalf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want) + } + }) } } diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index aaeaef5e86..2271c12627 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -38,6 +38,7 @@ import ( "github.com/ava-labs/subnet-evm/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" ) //go:generate go run github.com/fjl/gencodec -type callFrame -field-override callFrameMarshaling -out gen_callframe_json.go @@ -194,6 +195,7 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco data, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(mStart.Uint64()), int64(mSize.Uint64())) if err != nil { // mSize was unrealistically large + log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "callTracer", "offset", mStart, "size", mSize) return } diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index 747fc555b0..b623797a60 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) //go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go @@ -175,7 +176,11 @@ func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, case stackLen >= 4 && op == vm.CREATE2: offset := stackData[stackLen-2] size := stackData[stackLen-3] - init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + init, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(offset.Uint64()), int64(size.Uint64())) + if err != nil { + log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "prestateTracer", "offset", offset, "size", size) + return + } inithash := crypto.Keccak256(init) salt := stackData[stackLen-4] addr := crypto.CreateAddress2(caller, salt.Bytes32(), inithash) diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 4aa377f72c..e7711827cd 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -73,6 +73,7 @@ type Client interface { BlockByHash(context.Context, common.Hash) (*types.Block, error) BlockByNumber(context.Context, *big.Int) (*types.Block, error) BlockNumber(context.Context) (uint64, error) + BlockReceipts(context.Context, rpc.BlockNumberOrHash) ([]*types.Receipt, error) HeaderByHash(context.Context, common.Hash) (*types.Header, error) HeaderByNumber(context.Context, *big.Int) (*types.Header, error) TransactionByHash(context.Context, common.Hash) (tx *types.Transaction, isPending bool, err error) @@ -175,6 +176,16 @@ func (ec *client) BlockNumber(ctx context.Context) (uint64, error) { return uint64(result), err } +// BlockReceipts returns the receipts of a given block number or hash. +func (ec *client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { + var r []*types.Receipt + err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash.String()) + if err == nil && r == nil { + return nil, interfaces.NotFound + } + return r, err +} + type rpcBlock struct { Hash common.Hash `json:"hash"` Transactions []rpcTransaction `json:"transactions"` diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index bae448634d..c0f3d7a69d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -899,6 +899,34 @@ func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address return res[:], state.Error() } +// GetBlockReceipts returns the block receipts for the given block hash or number or tag. +func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) { + block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash) + if block == nil || err != nil { + // When the block doesn't exist, the RPC method should return JSON null + // as per specification. + return nil, nil + } + receipts, err := s.b.GetReceipts(ctx, block.Hash()) + if err != nil { + return nil, err + } + txs := block.Transactions() + if len(txs) != len(receipts) { + return nil, fmt.Errorf("receipts length mismatch: %d vs %d", len(txs), len(receipts)) + } + + // Derive the sender. + signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time()) + + result := make([]map[string]interface{}, len(receipts)) + for i, receipt := range receipts { + result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) + } + + return result, nil +} + // OverrideAccount indicates the overriding fields of account during the execution // of a message call. // Note, state and stateDiff can't be specified at the same time. If state is @@ -1790,13 +1818,18 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. // Derive the sender. signer := types.MakeSigner(s.b.ChainConfig(), header.Number, header.Time) + return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index)), nil +} + +// marshalReceipt marshals a transaction receipt into a JSON object. +func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { from, _ := types.Sender(signer, tx) fields := map[string]interface{}{ "blockHash": blockHash, "blockNumber": hexutil.Uint64(blockNumber), - "transactionHash": hash, - "transactionIndex": hexutil.Uint64(index), + "transactionHash": tx.Hash(), + "transactionIndex": hexutil.Uint64(txIndex), "from": from, "to": tx.To(), "gasUsed": hexutil.Uint64(receipt.GasUsed), @@ -1821,7 +1854,7 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. if receipt.ContractAddress != (common.Address{}) { fields["contractAddress"] = receipt.ContractAddress } - return fields, nil + return fields } // sign is a helper function that signs a transaction with the private key of the given address. diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 5f170b86ed..1dddcd8e95 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -32,6 +32,7 @@ import ( "crypto/ecdsa" "encoding/json" "errors" + "fmt" "hash" "math/big" "reflect" @@ -57,6 +58,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" + "github.com/stretchr/testify/require" "golang.org/x/crypto/sha3" ) @@ -244,7 +246,7 @@ func (b testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) return b.chain.GetHeaderByNumber(uint64(number)), nil } func (b testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - panic("implement me") + return b.chain.GetHeaderByHash(hash), nil } func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { panic("implement me") @@ -259,13 +261,16 @@ func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) return b.chain.GetBlockByNumber(uint64(number)), nil } func (b testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - panic("implement me") + return b.chain.GetBlockByHash(hash), nil } func (b testBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.BlockByNumber(ctx, blockNr) } - panic("implement me") + if blockHash, ok := blockNrOrHash.Hash(); ok { + return b.BlockByHash(ctx, blockHash) + } + panic("unknown type rpc.BlockNumberOrHash") } func (b testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { return b.chain.GetBlock(hash, uint64(number.Int64())).Body(), nil @@ -292,7 +297,12 @@ func (b testBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOr } func (b testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { panic("implement me") } func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { - panic("implement me") + header, err := b.HeaderByHash(ctx, hash) + if header == nil || err != nil { + return nil, err + } + receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config()) + return receipts, nil } func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { panic("implement me") } func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) (*vm.EVM, func() error) { @@ -746,3 +756,174 @@ func TestRPCMarshalBlock(t *testing.T) { } } } + +func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Hash) { + // Initialize test accounts + var ( + acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) + contract = common.HexToAddress("0000000000000000000000000000000000031ec7") + genesis = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + acc1Addr: {Balance: big.NewInt(params.Ether)}, + acc2Addr: {Balance: big.NewInt(params.Ether)}, + // // SPDX-License-Identifier: GPL-3.0 + // pragma solidity >=0.7.0 <0.9.0; + // + // contract Token { + // event Transfer(address indexed from, address indexed to, uint256 value); + // function transfer(address to, uint256 value) public returns (bool) { + // emit Transfer(msg.sender, to, value); + // return true; + // } + // } + contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")}, + }, + } + signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID) + txHashes = make([]common.Hash, genBlocks) + ) + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + var ( + tx *types.Transaction + err error + ) + switch i { + case 0: + // transfer 1000wei + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key) + case 1: + // create contract + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key) + case 2: + // with logs + // transfer(address to, uint256 value) + data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &contract, Gas: 60000, GasPrice: b.BaseFee(), Data: common.FromHex(data)}), signer, acc1Key) + case 3: + // dynamic fee with logs + // transfer(address to, uint256 value) + data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) + fee := big.NewInt(500) + fee.Add(fee, b.BaseFee()) + tx, err = types.SignTx(types.NewTx(&types.DynamicFeeTx{Nonce: uint64(i), To: &contract, Gas: 60000, Value: big.NewInt(1), GasTipCap: big.NewInt(500), GasFeeCap: fee, Data: common.FromHex(data)}), signer, acc1Key) + case 4: + // access list with contract create + accessList := types.AccessList{{ + Address: contract, + StorageKeys: []common.Hash{{0}}, + }} + tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key) + } + if err != nil { + t.Errorf("failed to sign tx: %v", err) + } + if tx != nil { + b.AddTx(tx) + txHashes[i] = tx.Hash() + } + }) + return backend, txHashes +} + +func TestRPCGetBlockReceipts(t *testing.T) { + t.Parallel() + + var ( + genBlocks = 5 + backend, _ = setupReceiptBackend(t, genBlocks) + api = NewBlockChainAPI(backend) + ) + blockHashes := make([]common.Hash, genBlocks+1) + ctx := context.Background() + for i := 0; i <= genBlocks; i++ { + header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) + if err != nil { + t.Errorf("failed to get block: %d err: %v", i, err) + } + blockHashes[i] = header.Hash() + } + + var testSuite = []struct { + test rpc.BlockNumberOrHash + want string + }{ + // 0. block without any txs(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[0], false), + want: `[]`, + }, + // 1. block without any txs(number) + { + test: rpc.BlockNumberOrHashWithNumber(0), + want: `[]`, + }, + // 2. earliest tag + { + test: rpc.BlockNumberOrHashWithNumber(rpc.EarliestBlockNumber), + want: `[]`, + }, + // 3. latest tag + { + test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), + want: `[{"blockHash":"0x581862f55d194bd8945820d0d5f28c3a53a72fffa17c0e0376e43478c2e2a75e","blockNumber":"0x5","contractAddress":"0xfdaa97661a584d977b4d3abb5370766ff5b86a18","cumulativeGasUsed":"0xe01c","effectiveGasPrice":"0x5d21dba00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0xe01c","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0x8afe030574f663fe5096371d6f58a6287bfb3e0c73a5050220f5775a08e7abc9","transactionIndex":"0x0","type":"0x1"}]`, + }, + // 4. block with legacy transfer tx(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[1], false), + want: `[{"blockHash":"0xcc27e155b6eadfa892992a8cd8adaf3c929a6ec6d98c4dfbc60258883c73568e","blockNumber":"0x1","contractAddress":null,"cumulativeGasUsed":"0x5208","effectiveGasPrice":"0x5d21dba00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x5208","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":"0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e","transactionHash":"0xdf92bc7c4c0341ecbdcd2a3ca7011fe9e21df4b8553bf0c8caabe6cb4a1aee26","transactionIndex":"0x0","type":"0x0"}]`, + }, + // 5. block with contract create tx(number) + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(2)), + want: `[{"blockHash":"0x5dcac7b6d680226ef9429ad15f726aa051d422365adaae65247cec38b42788cd","blockNumber":"0x2","contractAddress":"0xae9bea628c4ce503dcfd7e305cab4e29e7476592","cumulativeGasUsed":"0xcf50","effectiveGasPrice":"0x5d21dba00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0xcf50","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0x22aa617165f83a9f8c191c2b7724ae43eeb1249bee06c98c03c7624c21d27dc8","transactionIndex":"0x0","type":"0x0"}]`, + }, + // 6. block with legacy contract call tx(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[3], false), + want: `[{"blockHash":"0x2517532d72847621eb37804ae02257e8b3b9bf9668d3e6f7d20dbd24d7dfe863","blockNumber":"0x3","contractAddress":null,"cumulativeGasUsed":"0x5e28","effectiveGasPrice":"0x5d21dba00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x5e28","logs":[{"address":"0x0000000000000000000000000000000000031ec7","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7","0x0000000000000000000000000000000000000000000000000000000000000003"],"data":"0x000000000000000000000000000000000000000000000000000000000000000d","blockNumber":"0x3","transactionHash":"0x7366a7738f47e32f5b6d292ca064b6b66f295d3931533a3745975be1191fccdf","transactionIndex":"0x0","blockHash":"0x2517532d72847621eb37804ae02257e8b3b9bf9668d3e6f7d20dbd24d7dfe863","logIndex":"0x0","removed":false}],"logsBloom":"0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000","status":"0x1","to":"0x0000000000000000000000000000000000031ec7","transactionHash":"0x7366a7738f47e32f5b6d292ca064b6b66f295d3931533a3745975be1191fccdf","transactionIndex":"0x0","type":"0x0"}]`, + }, + // 7. block with dynamic fee tx(number) + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(4)), + want: `[{"blockHash":"0xf3ec30848127f600b396b9cf7ead62b3baacbdf0fbf32a419cc4837b362d4e33","blockNumber":"0x4","contractAddress":null,"cumulativeGasUsed":"0x538d","effectiveGasPrice":"0x5d21dbbf4","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x538d","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x0","to":"0x0000000000000000000000000000000000031ec7","transactionHash":"0x4e1e9194ca6f9d4e1736e9e441f66104f273548ed6d91b236a5f9c2ea10fa06d","transactionIndex":"0x0","type":"0x2"}]`, + }, + // 8. block is empty + { + test: rpc.BlockNumberOrHashWithHash(common.Hash{}, false), + want: `null`, + }, + // 9. block is not found + { + test: rpc.BlockNumberOrHashWithHash(common.HexToHash("deadbeef"), false), + want: `null`, + }, + // 10. block is not found + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(genBlocks + 1)), + want: `null`, + }, + } + + for i, tt := range testSuite { + var ( + result interface{} + err error + ) + result, err = api.GetBlockReceipts(context.Background(), tt.test) + if err != nil { + t.Errorf("test %d: want no error, have %v", i, err) + continue + } + data, err := json.Marshal(result) + if err != nil { + t.Errorf("test %d: json marshal error", i) + continue + } + want, have := tt.want, string(data) + require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have) + } +} diff --git a/params/avalanche_params.go b/params/avalanche_params.go index fbda5b01c0..0e7d65d197 100644 --- a/params/avalanche_params.go +++ b/params/avalanche_params.go @@ -4,6 +4,9 @@ package params const ( + DynamicFeeExtraDataSize = 80 + RollupWindow uint64 = 10 + WarpDefaultQuorumNumerator uint64 = 67 WarpQuorumNumeratorMinimum uint64 = 33 WarpQuorumDenominator uint64 = 100 diff --git a/params/config.go b/params/config.go index 991a4570dc..cd9821d95c 100644 --- a/params/config.go +++ b/params/config.go @@ -52,9 +52,6 @@ var ( TestInitialBaseFee int64 = 225_000_000_000 TestMaxBaseFee int64 = 225_000_000_000 - DynamicFeeExtraDataSize = 80 - RollupWindow uint64 = 10 - DefaultFeeConfig = commontype.FeeConfig{ GasLimit: big.NewInt(8_000_000), TargetBlockRate: 2, // in seconds diff --git a/peer/network_test.go b/peer/network_test.go index e8d61fff1f..71d06befc4 100644 --- a/peer/network_test.go +++ b/peer/network_test.go @@ -20,6 +20,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/subnet-evm/plugin/evm/message" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/plugin/evm/block.go b/plugin/evm/block.go index df6062573b..783b9ce05e 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -69,13 +69,13 @@ func (b *Block) Accept(context.Context) error { // precompiles are committed atomically with the vm's lastAcceptedKey. rules := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()) sharedMemoryWriter := NewSharedMemoryWriter() - if err := b.handlePrecompileAccept(&rules, sharedMemoryWriter); err != nil { + if err := b.handlePrecompileAccept(rules, sharedMemoryWriter); err != nil { return err } + if err := vm.blockChain.Accept(b.ethBlock); err != nil { return fmt.Errorf("chain could not accept %s: %w", b.ID(), err) } - if err := vm.acceptedBlockDB.Put(lastAcceptedKey, b.id[:]); err != nil { return fmt.Errorf("failed to put %s as the last accepted block: %w", b.ID(), err) } @@ -97,7 +97,7 @@ func (b *Block) Accept(context.Context) error { // contract.Accepter // This function assumes that the Accept function will ONLY operate on state maintained in the VM's versiondb. // This ensures that any DB operations are performed atomically with marking the block as accepted. -func (b *Block) handlePrecompileAccept(rules *params.Rules, sharedMemoryWriter *sharedMemoryWriter) error { +func (b *Block) handlePrecompileAccept(rules params.Rules, sharedMemoryWriter *sharedMemoryWriter) error { // Short circuit early if there are no precompile accepters to execute if len(rules.AccepterPrecompiles) == 0 { return nil diff --git a/plugin/evm/block_test.go b/plugin/evm/block_test.go index d00193c76b..bf61ae1093 100644 --- a/plugin/evm/block_test.go +++ b/plugin/evm/block_test.go @@ -89,7 +89,7 @@ func TestHandlePrecompileAccept(t *testing.T) { // Call handlePrecompileAccept blk := vm.newBlock(ethBlock) - rules := ¶ms.Rules{ + rules := params.Rules{ AccepterPrecompiles: map[common.Address]precompileconfig.Accepter{ precompileAddr: mockAccepter, }, diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 7f4ca902d6..8633c5a5b2 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/subnet-evm/core/txpool" "github.com/ava-labs/subnet-evm/eth" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/spf13/cast" ) @@ -21,6 +22,7 @@ const ( defaultTrieCleanCache = 512 defaultTrieDirtyCache = 512 defaultTrieDirtyCommitTarget = 20 + defaultTriePrefetcherParallelism = 16 defaultSnapshotCache = 256 defaultSyncableCommitInterval = defaultCommitInterval * 4 defaultSnapshotWait = false @@ -85,9 +87,9 @@ type Config struct { // Subnet EVM APIs SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - WarpAPIEnabled bool `json:"warp-api-enabled"` AdminAPIEnabled bool `json:"admin-api-enabled"` AdminAPIDir string `json:"admin-api-dir"` + WarpAPIEnabled bool `json:"warp-api-enabled"` // EnabledEthAPIs is a list of Ethereum services that should be enabled // If none is specified, then we use the default list [defaultEnabledAPIs] @@ -103,12 +105,13 @@ type Config struct { RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` // Cache settings - TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) - TrieCleanJournal string `json:"trie-clean-journal"` // Directory to use to save the trie clean cache (must be populated to enable journaling the trie clean cache) - TrieCleanRejournal Duration `json:"trie-clean-rejournal"` // Frequency to re-journal the trie clean cache to disk (minimum 1 minute, must be populated to enable journaling the trie clean cache) - TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) - TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) - SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) + TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) + TrieCleanJournal string `json:"trie-clean-journal"` // Directory to use to save the trie clean cache (must be populated to enable journaling the trie clean cache) + TrieCleanRejournal Duration `json:"trie-clean-rejournal"` // Frequency to re-journal the trie clean cache to disk (minimum 1 minute, must be populated to enable journaling the trie clean cache) + TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) + TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) + TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once + SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) // Eth Settings Preimages bool `json:"preimages-enabled"` @@ -208,6 +211,17 @@ type Config struct { // * 0: means no limit // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes TxLookupLimit uint64 `json:"tx-lookup-limit"` + + // SkipTxIndexing skips indexing transactions. + // This is useful for validators that don't need to index transactions. + // TxLookupLimit can be still used to control unindexing old transactions. + SkipTxIndexing bool `json:"skip-tx-indexing"` + + // WarpOffChainMessages encodes off-chain messages (unrelated to any on-chain event ie. block or AddressedCall) + // that the node should be willing to sign. + // Note: only supports AddressedCall payloads as defined here: + // https://github.com/ava-labs/avalanchego/tree/7623ffd4be915a5185c9ed5e11fa9be15a6e1f00/vms/platformvm/warp/payload#addressedcall + WarpOffChainMessages []hexutil.Bytes `json:"warp-off-chain-messages"` } // EthAPIs returns an array of strings representing the Eth APIs that should be enabled @@ -244,9 +258,9 @@ func (c *Config) SetDefaults() { c.TrieCleanCache = defaultTrieCleanCache c.TrieDirtyCache = defaultTrieDirtyCache c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget + c.TriePrefetcherParallelism = defaultTriePrefetcherParallelism c.SnapshotCache = defaultSnapshotCache c.AcceptorQueueLimit = defaultAcceptorQueueLimit - c.CommitInterval = defaultCommitInterval c.SnapshotWait = defaultSnapshotWait c.RegossipFrequency.Duration = defaultRegossipFrequency c.RegossipMaxTxs = defaultRegossipMaxTxs @@ -256,11 +270,12 @@ func (c *Config) SetDefaults() { c.PriorityRegossipTxsPerAddress = defaultPriorityRegossipTxsPerAddress c.OfflinePruningBloomFilterSize = defaultOfflinePruningBloomFilterSize c.LogLevel = defaultLogLevel + c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism c.LogJSONFormat = defaultLogJSONFormat c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests c.MaxOutboundActiveCrossChainRequests = defaultMaxOutboundActiveCrossChainRequests - c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache + c.CommitInterval = defaultCommitInterval c.StateSyncCommitInterval = defaultSyncableCommitInterval c.StateSyncMinBlocks = defaultStateSyncMinBlocks c.StateSyncRequestSize = defaultStateSyncRequestSize @@ -303,6 +318,5 @@ func (c *Config) Validate() error { if c.Pruning && c.CommitInterval == 0 { return fmt.Errorf("cannot use commit interval of 0 with pruning enabled") } - return nil } diff --git a/plugin/evm/gossiper.go b/plugin/evm/gossiper.go index 578ed1d3ad..57d2cf5151 100644 --- a/plugin/evm/gossiper.go +++ b/plugin/evm/gossiper.go @@ -166,7 +166,7 @@ func (n *pushGossiper) queueExecutableTxs( } // queueRegossipTxs finds the best non-priority transactions in the mempool and adds up to -// [RegossipMaxTxs] of them to [txsToGossip]. +// [RegossipMaxTxs] of them to [ethTxsToGossip]. func (n *pushGossiper) queueRegossipTxs() types.Transactions { // Fetch all pending transactions pending := n.txPool.Pending(true) diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index e49838cd56..a51c932254 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -12,7 +12,7 @@ import ( const ( Version = uint16(0) - maxMessageSize = 1 * units.MiB + maxMessageSize = 2*units.MiB - 64*units.KiB // Subtract 64 KiB from p2p network cap to leave room for encoding overhead from AvalancheGo ) var ( @@ -22,7 +22,7 @@ var ( func init() { Codec = codec.NewManager(maxMessageSize) - c := linearcodec.NewDefault() + c := linearcodec.NewCustomMaxLength(maxMessageSize) errs := wrappers.Errs{} errs.Add( @@ -53,7 +53,7 @@ func init() { } CrossChainCodec = codec.NewManager(maxMessageSize) - ccc := linearcodec.NewDefault() + ccc := linearcodec.NewCustomMaxLength(maxMessageSize) errs = wrappers.Errs{} errs.Add( diff --git a/plugin/evm/message/message_test.go b/plugin/evm/message/message_test.go index 0a18fde784..85f0b97453 100644 --- a/plugin/evm/message/message_test.go +++ b/plugin/evm/message/message_test.go @@ -36,11 +36,11 @@ func TestMarshalTxs(t *testing.T) { assert.Equal(msg, parsedMsg.Txs) } -func TestTxsTooLarge(t *testing.T) { +func TestEthTxsTooLarge(t *testing.T) { assert := assert.New(t) builtMsg := EthTxsGossip{ - Txs: utils.RandomBytes(1024 * units.KiB), + Txs: utils.RandomBytes(maxMessageSize), } _, err := BuildGossipMessage(Codec, builtMsg) assert.Error(err) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 4abd15a008..b869374b68 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -419,6 +419,7 @@ func (vm *VM) Initialize( vm.ethConfig.SkipUpgradeCheck = vm.config.SkipUpgradeCheck vm.ethConfig.AcceptedCacheSize = vm.config.AcceptedCacheSize vm.ethConfig.TxLookupLimit = vm.config.TxLookupLimit + vm.ethConfig.SkipTxIndexing = vm.config.SkipTxIndexing // Create directory for offline pruning if len(vm.ethConfig.OfflinePruningDataDirectory) != 0 { @@ -749,8 +750,13 @@ func (vm *VM) setAppRequestHandlers() { Cache: vm.config.StateSyncServerTrieCache, }, ) - - networkHandler := newNetworkHandler(vm.blockChain, vm.chaindb, evmTrieDB, vm.warpBackend, vm.networkCodec) + networkHandler := newNetworkHandler( + vm.blockChain, + vm.chaindb, + evmTrieDB, + vm.warpBackend, + vm.networkCodec, + ) vm.Network.SetRequestHandler(networkHandler) } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 257bd9b675..75ccc898ef 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -33,7 +33,6 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" avalancheConstants "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -41,6 +40,8 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/chain" + commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/subnet-evm/accounts/abi" accountKeystore "github.com/ava-labs/subnet-evm/accounts/keystore" "github.com/ava-labs/subnet-evm/commontype" @@ -64,12 +65,11 @@ import ( "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/vmerrs" - avagoconstants "github.com/ava-labs/avalanchego/utils/constants" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) var ( - testNetworkID uint32 = avagoconstants.UnitTestID + testNetworkID uint32 = avalancheConstants.UnitTestID testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} testMinGasPrice int64 = 225_000_000_000 @@ -204,14 +204,15 @@ func GenesisVM(t *testing.T, *commonEng.SenderTest, ) { vm := &VM{} - ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, genesisJSON) + ctx, db, genesisBytes, issuer, _ := setupGenesis(t, genesisJSON) + vm.p2pSender = &commonEng.FakeSender{} appSender := &commonEng.SenderTest{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } err := vm.Initialize( context.Background(), ctx, - dbManager, + db, genesisBytes, []byte(upgradeJSON), []byte(configJSON), @@ -226,7 +227,7 @@ func GenesisVM(t *testing.T, require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) } - return issuer, vm, dbManager, appSender + return issuer, vm, db, appSender } func TestVMConfig(t *testing.T) { @@ -293,6 +294,11 @@ func TestVMUpgrades(t *testing.T) { genesis: genesisJSONSubnetEVM, expectedGasPrice: big.NewInt(0), }, + { + name: "DUpgrade", + genesis: genesisJSONDUpgrade, + expectedGasPrice: big.NewInt(0), + }, } for _, test := range genesisTests { t.Run(test.name, func(t *testing.T) { @@ -381,7 +387,7 @@ func issueAndAccept(t *testing.T, issuer <-chan commonEng.Message, vm *VM) snowm func TestBuildEthTxBlock(t *testing.T) { // reduce block gas cost - issuer, vm, dbManager, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "{\"pruning-enabled\":true}", "") + issuer, vm, db, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "{\"pruning-enabled\":true}", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -484,7 +490,7 @@ func TestBuildEthTxBlock(t *testing.T) { if err := restartedVM.Initialize( context.Background(), NewContext(), - dbManager, + db, genesisBytes, []byte(""), []byte("{\"pruning-enabled\":true}"), @@ -1985,14 +1991,14 @@ func TestConfigureLogLevel(t *testing.T) { for _, test := range configTests { t.Run(test.name, func(t *testing.T) { vm := &VM{} - ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) + ctx, db, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) appSender := &commonEng.SenderTest{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } err := vm.Initialize( context.Background(), ctx, - dbManager, + db, genesisBytes, []byte(""), []byte(test.logConfig), @@ -2356,11 +2362,11 @@ func TestVerifyManagerConfig(t *testing.T) { require.NoError(t, err) vm := &VM{} - ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) + ctx, db, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) err = vm.Initialize( context.Background(), ctx, - dbManager, + db, genesisBytes, []byte(""), []byte(""), @@ -2387,11 +2393,11 @@ func TestVerifyManagerConfig(t *testing.T) { require.NoError(t, err) vm = &VM{} - ctx, dbManager, genesisBytes, issuer, _ = setupGenesis(t, string(genesisJSON)) + ctx, db, genesisBytes, issuer, _ = setupGenesis(t, string(genesisJSON)) err = vm.Initialize( context.Background(), ctx, - dbManager, + db, genesisBytes, upgradeBytesJSON, []byte(""), @@ -3045,7 +3051,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { metrics.Enabled = false defer func() { metrics.Enabled = true }() - issuer, vm, dbManager, appSender := GenesisVM(t, true, genesisJSONPreSubnetEVM, "{\"pruning-enabled\":true}", "") + issuer, vm, db, appSender := GenesisVM(t, true, genesisJSONPreSubnetEVM, "{\"pruning-enabled\":true}", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -3089,12 +3095,12 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { require.NoError(t, err) // this will not be allowed - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, appSender) + err = reinitVM.Initialize(context.Background(), vm.ctx, db, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, appSender) require.ErrorContains(t, err, "mismatching SubnetEVM fork block timestamp in database") // try again with skip-upgrade-check config := []byte("{\"skip-upgrade-check\": true}") - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, config, issuer, []*commonEng.Fx{}, appSender) + err = reinitVM.Initialize(context.Background(), vm.ctx, db, genesisWithUpgradeBytes, []byte{}, config, issuer, []*commonEng.Fx{}, appSender) require.NoError(t, err) require.NoError(t, reinitVM.Shutdown(context.Background())) } diff --git a/rpc/types.go b/rpc/types.go index 678cac8c77..6bc1a769d6 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -31,7 +31,6 @@ import ( "encoding/json" "fmt" "math" - "strconv" "strings" "github.com/ethereum/go-ethereum/common" @@ -102,7 +101,7 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { case "pending": *bn = PendingBlockNumber return nil - // Include "finalized" as an option for compatibility with FinalizedBlockNumber from geth. + // Include "finalized" as an option for compatibility with FinalizedBlockNumber case "accepted", "finalized": *bn = AcceptedBlockNumber return nil @@ -238,7 +237,7 @@ func (bnh *BlockNumberOrHash) Number() (BlockNumber, bool) { func (bnh *BlockNumberOrHash) String() string { if bnh.BlockNumber != nil { - return strconv.Itoa(int(*bnh.BlockNumber)) + return bnh.BlockNumber.String() } if bnh.BlockHash != nil { return bnh.BlockHash.String() diff --git a/rpc/types_test.go b/rpc/types_test.go index 09041cdee7..a255c1e9f7 100644 --- a/rpc/types_test.go +++ b/rpc/types_test.go @@ -163,3 +163,24 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) { }) } } + +func TestBlockNumberOrHash_StringAndUnmarshal(t *testing.T) { + tests := []BlockNumberOrHash{ + BlockNumberOrHashWithNumber(math.MaxInt64), + BlockNumberOrHashWithNumber(PendingBlockNumber), + BlockNumberOrHashWithNumber(LatestBlockNumber), + BlockNumberOrHashWithNumber(EarliestBlockNumber), + BlockNumberOrHashWithNumber(32), + BlockNumberOrHashWithHash(common.Hash{0xaa}, false), + } + for _, want := range tests { + marshalled, _ := json.Marshal(want.String()) + var have BlockNumberOrHash + if err := json.Unmarshal(marshalled, &have); err != nil { + t.Fatalf("cannot unmarshal (%v): %v", string(marshalled), err) + } + if !reflect.DeepEqual(want, have) { + t.Fatalf("wrong result: have %v, want %v", have, want) + } + } +} diff --git a/scripts/versions.sh b/scripts/versions.sh index d7d978e229..f075dff984 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -5,4 +5,4 @@ AVALANCHE_VERSION=${AVALANCHE_VERSION:-'v1.10.18-rc.8'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier -LATEST_CORETH_VERSION='0.12.7' +LATEST_CORETH_VERSION='0.12.10-rc.2' diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 3c907b4a42..e373098d22 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -24,6 +24,7 @@ import ( clientstats "github.com/ava-labs/subnet-evm/sync/client/stats" "github.com/ava-labs/subnet-evm/sync/handlers" handlerstats "github.com/ava-labs/subnet-evm/sync/handlers/stats" + "github.com/ava-labs/subnet-evm/sync/syncutils" "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -410,8 +411,8 @@ func TestGetLeafs(t *testing.T) { const leafsLimit = 1024 trieDB := trie.NewDatabase(memorydb.New()) - largeTrieRoot, largeTrieKeys, _ := trie.GenerateTrie(t, trieDB, 100_000, common.HashLength) - smallTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) + largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) + smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) client := NewClient(&ClientConfig{ @@ -781,7 +782,7 @@ func TestGetLeafsRetries(t *testing.T) { rand.Seed(1) trieDB := trie.NewDatabase(memorydb.New()) - root, _, _ := trie.GenerateTrie(t, trieDB, 100_000, common.HashLength) + root, _, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) mockNetClient := &mockNetwork{} diff --git a/sync/handlers/block_request.go b/sync/handlers/block_request.go index 76203b78bc..a8fc070eb0 100644 --- a/sync/handlers/block_request.go +++ b/sync/handlers/block_request.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/subnet-evm/plugin/evm/message" "github.com/ava-labs/subnet-evm/sync/handlers/stats" @@ -17,9 +18,12 @@ import ( "github.com/ethereum/go-ethereum/log" ) -// parentLimit specifies how many parents to retrieve and send given a starting hash -// This value overrides any specified limit in blockRequest.Parents if it is greater than this value -const parentLimit = uint16(64) +const ( + // parentLimit specifies how many parents to retrieve and send given a starting hash + // This value overrides any specified limit in blockRequest.Parents if it is greater than this value + parentLimit = uint16(64) + targetMessageByteSize = units.MiB - units.KiB // Target total block bytes slightly under original network codec max size of 1MB +) // BlockRequestHandler is a peer.RequestHandler for message.BlockRequest // serving requested blocks starting at specified hash @@ -52,6 +56,7 @@ func (b *BlockRequestHandler) OnBlockRequest(ctx context.Context, nodeID ids.Nod parents = parentLimit } blocks := make([][]byte, 0, parents) + totalBytes := 0 // ensure metrics are captured properly on all return paths defer func() { @@ -84,7 +89,13 @@ func (b *BlockRequestHandler) OnBlockRequest(ctx context.Context, nodeID ids.Nod return nil, nil } + if buf.Len()+totalBytes > targetMessageByteSize && len(blocks) > 0 { + log.Debug("Skipping block due to max total bytes size", "totalBlockDataSize", totalBytes, "blockSize", buf.Len(), "maxTotalBytesSize", targetMessageByteSize) + break + } + blocks = append(blocks, buf.Bytes()) + totalBytes += buf.Len() hash = block.ParentHash() height-- } diff --git a/sync/handlers/block_request_test.go b/sync/handlers/block_request_test.go index c3886ce78a..4d8b9e8159 100644 --- a/sync/handlers/block_request_test.go +++ b/sync/handlers/block_request_test.go @@ -5,9 +5,11 @@ package handlers import ( "context" + "math/big" "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/core/types" @@ -16,31 +18,33 @@ import ( "github.com/ava-labs/subnet-evm/plugin/evm/message" "github.com/ava-labs/subnet-evm/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) -func TestBlockRequestHandler(t *testing.T) { - var gspec = &core.Genesis{ - Config: params.TestChainConfig, - } - memdb := memorydb.New() - genesis := gspec.MustCommit(memdb) - engine := dummy.NewETHFaker() - blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(i int, b *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } +type blockRequestTest struct { + name string - assert.Len(t, blocks, 96) + // starting block, specify either Index or (hash+height) + startBlockIndex int + startBlockHash common.Hash + startBlockHeight uint64 + + requestedParents uint16 + expectedBlocks int + expectNilResponse bool + assertResponse func(t testing.TB, stats *stats.MockHandlerStats, b []byte) +} + +func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*types.Block) { + mockHandlerStats := &stats.MockHandlerStats{} // convert into map blocksDB := make(map[common.Hash]*types.Block, len(blocks)) for _, blk := range blocks { blocksDB[blk.Hash()] = blk } - - mockHandlerStats := &stats.MockHandlerStats{} blockProvider := &TestBlockProvider{ GetBlockFn: func(hash common.Hash, height uint64) *types.Block { blk, ok := blocksDB[hash] @@ -52,19 +56,64 @@ func TestBlockRequestHandler(t *testing.T) { } blockRequestHandler := NewBlockRequestHandler(blockProvider, message.Codec, mockHandlerStats) - tests := []struct { - name string + var blockRequest message.BlockRequest + if test.startBlockHash != (common.Hash{}) { + blockRequest.Hash = test.startBlockHash + blockRequest.Height = test.startBlockHeight + } else { + startingBlock := blocks[test.startBlockIndex] + blockRequest.Hash = startingBlock.Hash() + blockRequest.Height = startingBlock.NumberU64() + } + blockRequest.Parents = test.requestedParents - // starting block, specify either Index or (hash+height) - startBlockIndex int - startBlockHash common.Hash - startBlockHeight uint64 + responseBytes, err := blockRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, blockRequest) + if err != nil { + t.Fatal("unexpected error during block request", err) + } + if test.assertResponse != nil { + test.assertResponse(t, mockHandlerStats, responseBytes) + } + + if test.expectNilResponse { + assert.Nil(t, responseBytes) + return + } + + assert.NotEmpty(t, responseBytes) + + var response message.BlockResponse + if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { + t.Fatal("error unmarshalling", err) + } + assert.Len(t, response.Blocks, test.expectedBlocks) + + for _, blockBytes := range response.Blocks { + block := new(types.Block) + if err := rlp.DecodeBytes(blockBytes, block); err != nil { + t.Fatal("could not parse block", err) + } + assert.GreaterOrEqual(t, test.startBlockIndex, 0) + assert.Equal(t, blocks[test.startBlockIndex].Hash(), block.Hash()) + test.startBlockIndex-- + } + mockHandlerStats.Reset() +} + +func TestBlockRequestHandler(t *testing.T) { + var gspec = &core.Genesis{ + Config: params.TestChainConfig, + } + memdb := memorydb.New() + genesis := gspec.MustCommit(memdb) + engine := dummy.NewETHFaker() + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(i int, b *core.BlockGen) {}) + if err != nil { + t.Fatal("unexpected error when generating test blockchain", err) + } + assert.Len(t, blocks, 96) - requestedParents uint16 - expectedBlocks int - expectNilResponse bool - assertResponse func(t *testing.T, response []byte) - }{ + tests := []blockRequestTest{ { name: "handler_returns_blocks_as_requested", startBlockIndex: 64, @@ -89,55 +138,74 @@ func TestBlockRequestHandler(t *testing.T) { startBlockHeight: 1_000_000, requestedParents: 64, expectNilResponse: true, - assertResponse: func(t *testing.T, _ []byte) { + assertResponse: func(t testing.TB, mockHandlerStats *stats.MockHandlerStats, _ []byte) { assert.Equal(t, uint32(1), mockHandlerStats.MissingBlockHashCount) }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - var blockRequest message.BlockRequest - if test.startBlockHash != (common.Hash{}) { - blockRequest.Hash = test.startBlockHash - blockRequest.Height = test.startBlockHeight - } else { - startingBlock := blocks[test.startBlockIndex] - blockRequest.Hash = startingBlock.Hash() - blockRequest.Height = startingBlock.NumberU64() - } - blockRequest.Parents = test.requestedParents - - responseBytes, err := blockRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, blockRequest) - if err != nil { - t.Fatal("unexpected error during block request", err) - } - if test.assertResponse != nil { - test.assertResponse(t, responseBytes) - } - - if test.expectNilResponse { - assert.Nil(t, responseBytes) - return - } + executeBlockRequestTest(t, test, blocks) + }) + } +} - assert.NotEmpty(t, responseBytes) +func TestBlockRequestHandlerLargeBlocks(t *testing.T) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + funds = big.NewInt(1000000000000000000) + gspec = &core.Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: core.GenesisAlloc{addr1: {Balance: funds}}, + } + signer = types.LatestSigner(gspec.Config) + ) + memdb := memorydb.New() + genesis := gspec.MustCommit(memdb) + engine := dummy.NewETHFaker() + blocks, _, err := core.GenerateChain(gspec.Config, genesis, engine, memdb, 96, 0, func(i int, b *core.BlockGen) { + var data []byte + switch { + case i <= 32: + data = make([]byte, units.MiB) + default: + data = make([]byte, units.MiB/16) + } + tx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr1), addr1, big.NewInt(10000), 4_215_304, nil, data), signer, key1) + if err != nil { + t.Fatal(err) + } + b.AddTx(tx) + }) + if err != nil { + t.Fatal("unexpected error when generating test blockchain", err) + } + assert.Len(t, blocks, 96) - var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling", err) - } - assert.Len(t, response.Blocks, test.expectedBlocks) - - for _, blockBytes := range response.Blocks { - block := new(types.Block) - if err := rlp.DecodeBytes(blockBytes, block); err != nil { - t.Fatal("could not parse block", err) - } - assert.GreaterOrEqual(t, test.startBlockIndex, 0) - assert.Equal(t, blocks[test.startBlockIndex].Hash(), block.Hash()) - test.startBlockIndex-- - } - mockHandlerStats.Reset() + tests := []blockRequestTest{ + { + name: "handler_returns_blocks_as_requested", + startBlockIndex: 64, + requestedParents: 10, + expectedBlocks: 10, + }, + { + name: "handler_caps_blocks_size_limit", + startBlockIndex: 64, + requestedParents: 16, + expectedBlocks: 15, + }, + { + name: "handler_caps_blocks_size_limit_on_first_block", + startBlockIndex: 32, + requestedParents: 10, + expectedBlocks: 1, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + executeBlockRequestTest(t, test, blocks) }) } } diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index 6d374c10cd..ac1af61903 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/subnet-evm/ethdb/memorydb" "github.com/ava-labs/subnet-evm/plugin/evm/message" "github.com/ava-labs/subnet-evm/sync/handlers/stats" + "github.com/ava-labs/subnet-evm/sync/syncutils" "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -30,13 +31,17 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { memdb := memorydb.New() trieDB := trie.NewDatabase(memdb) - corruptedTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, 100, common.HashLength) + corruptedTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 100, common.HashLength) + tr, err := trie.New(trie.TrieID(corruptedTrieRoot), trieDB) + if err != nil { + t.Fatal(err) + } // Corrupt [corruptedTrieRoot] - trie.CorruptTrie(t, trieDB, corruptedTrieRoot, 5) + syncutils.CorruptTrie(t, memdb, tr, 5) - largeTrieRoot, largeTrieKeys, _ := trie.GenerateTrie(t, trieDB, 10_000, common.HashLength) - smallTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, 500, common.HashLength) - accountTrieRoot, accounts := trie.FillAccounts( + largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 10_000, common.HashLength) + smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 500, common.HashLength) + accountTrieRoot, accounts := syncutils.FillAccounts( t, trieDB, common.Hash{}, diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index d9a31aa9ee..dcf389b60a 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -22,6 +22,7 @@ import ( statesyncclient "github.com/ava-labs/subnet-evm/sync/client" "github.com/ava-labs/subnet-evm/sync/handlers" handlerstats "github.com/ava-labs/subnet-evm/sync/handlers/stats" + "github.com/ava-labs/subnet-evm/sync/syncutils" "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -121,7 +122,7 @@ func TestSimpleSyncCases(t *testing.T) { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, nil) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, nil) return memorydb.New(), serverDB, serverTrieDB, root }, }, @@ -129,7 +130,7 @@ func TestSimpleSyncCases(t *testing.T) { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index%3 == 0 { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) @@ -158,9 +159,9 @@ func TestSimpleSyncCases(t *testing.T) { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { if i%5 == 0 { - account.Root, _, _ = trie.GenerateTrie(t, serverTrieDB, 16, common.HashLength) + account.Root, _, _ = syncutils.GenerateTrie(t, serverTrieDB, 16, common.HashLength) } return account @@ -180,7 +181,7 @@ func TestSimpleSyncCases(t *testing.T) { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccountsSmall, nil) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccountsSmall, nil) return memorydb.New(), serverDB, serverTrieDB, root }, GetLeafsIntercept: func(_ message.LeafsRequest, _ message.LeafsResponse) (message.LeafsResponse, error) { @@ -280,8 +281,8 @@ func TestResumeSyncLargeStorageTrieInterrupted(t *testing.T) { serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { // Set the root for a single account if index == 10 { account.Root = largeStorageRoot @@ -312,16 +313,16 @@ func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot1, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - largeStorageRoot2, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root1, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot1, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + largeStorageRoot2, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root1, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { // Set the root for a single account if index == 10 { account.Root = largeStorageRoot1 } return account }) - root2, _ := trie.FillAccounts(t, serverTrieDB, root1, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + root2, _ := syncutils.FillAccounts(t, serverTrieDB, root1, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index == 20 { account.Root = largeStorageRoot2 } @@ -353,8 +354,8 @@ func TestResumeSyncLargeStorageTrieWithConsecutiveDuplicatesInterrupted(t *testi serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { // Set the root for 2 successive accounts if index == 10 || index == 11 { account.Root = largeStorageRoot @@ -385,8 +386,8 @@ func TestResumeSyncLargeStorageTrieWithSpreadOutDuplicatesInterrupted(t *testing serverDB := memorydb.New() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index == 10 || index == 90 { account.Root = largeStorageRoot } @@ -465,7 +466,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { continue } corruptedStorageRoots[acc.Root] = struct{}{} - trie.CorruptTrie(t, clientTrieDB, acc.Root, 2) + tr, err := trie.New(trie.TrieID(acc.Root), clientTrieDB) + if err != nil { + t.Fatal(err) + } + syncutils.CorruptTrie(t, clientDB, tr, 2) } if err := it.Err; err != nil { t.Fatal(err) @@ -475,7 +480,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { "delete intermediate account trie nodes": { deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientDB ethdb.Database) { clientTrieDB := trie.NewDatabase(clientDB) - trie.CorruptTrie(t, clientTrieDB, root, 5) + tr, err := trie.New(trie.TrieID(root), clientTrieDB) + if err != nil { + t.Fatal(err) + } + syncutils.CorruptTrie(t, clientDB, tr, 5) }, }, } { diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go index 42646ed59f..a97204304e 100644 --- a/sync/statesync/test_sync.go +++ b/sync/statesync/test_sync.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/subnet-evm/core/state/snapshot" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/ethdb" + "github.com/ava-labs/subnet-evm/sync/syncutils" "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -38,7 +39,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } trieAccountLeaves := 0 - trie.AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + syncutils.AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { trieAccountLeaves++ accHash := common.BytesToHash(key) var acc types.StateAccount @@ -73,7 +74,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database storageTrieLeavesCount := 0 // check storage trie and storage snapshot consistency - trie.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + syncutils.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { storageTrieLeavesCount++ snapshotVal := rawdb.ReadStorageSnapshot(clientDB, accHash, common.BytesToHash(key)) assert.Equal(t, val, snapshotVal) @@ -89,7 +90,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { - newRoot, _ := trie.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + newRoot, _ := syncutils.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) if err != nil { @@ -102,7 +103,7 @@ func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB // now create state trie numKeys := 16 - account.Root, _, _ = trie.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) + account.Root, _, _ = syncutils.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) return account }) return newRoot @@ -119,18 +120,18 @@ func FillAccountsWithOverlappingStorage( ) (common.Hash, map[*keystore.Key]*types.StateAccount) { storageRoots := make([]common.Hash, 0, numOverlappingStorageRoots) for i := 0; i < numOverlappingStorageRoots; i++ { - storageRoot, _, _ := trie.GenerateTrie(t, trieDB, 16, common.HashLength) + storageRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 16, common.HashLength) storageRoots = append(storageRoots, storageRoot) } storageRootIndex := 0 - return trie.FillAccounts(t, trieDB, root, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { + return syncutils.FillAccounts(t, trieDB, root, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { switch i % 3 { case 0: // unmodified account case 1: // account with overlapping storage root account.Root = storageRoots[storageRootIndex%numOverlappingStorageRoots] storageRootIndex++ case 2: // account with unique storage root - account.Root, _, _ = trie.GenerateTrie(t, trieDB, 16, common.HashLength) + account.Root, _, _ = syncutils.GenerateTrie(t, trieDB, 16, common.HashLength) } return account diff --git a/trie/test_trie.go b/sync/syncutils/test_trie.go similarity index 84% rename from trie/test_trie.go rename to sync/syncutils/test_trie.go index c61a8decc0..38aec32290 100644 --- a/trie/test_trie.go +++ b/sync/syncutils/test_trie.go @@ -1,7 +1,7 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package trie +package syncutils import ( cryptoRand "crypto/rand" @@ -13,6 +13,8 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/subnet-evm/accounts/keystore" "github.com/ava-labs/subnet-evm/core/types" + "github.com/ava-labs/subnet-evm/ethdb" + "github.com/ava-labs/subnet-evm/trie" "github.com/ava-labs/subnet-evm/trie/trienode" "github.com/ethereum/go-ethereum/common" @@ -24,11 +26,11 @@ import ( // Returns the root of the generated trie, the slice of keys inserted into the trie in lexicographical // order, and the slice of corresponding values. // GenerateTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results -func GenerateTrie(t *testing.T, trieDB *Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { +func GenerateTrie(t *testing.T, trieDB *trie.Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { if keySize < wrappers.LongLen+1 { t.Fatal("key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") } - testTrie := NewEmpty(trieDB) + testTrie := trie.NewEmpty(trieDB) keys, values := FillTrie(t, numKeys, keySize, testTrie) @@ -45,7 +47,7 @@ func GenerateTrie(t *testing.T, trieDB *Database, numKeys int, keySize int) (com // FillTrie fills a given trie with [numKeys] number of keys, each of size [keySize] // returns inserted keys and values // FillTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results -func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *Trie) ([][]byte, [][]byte) { +func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *trie.Trie) ([][]byte, [][]byte) { keys := make([][]byte, 0, numKeys) values := make([][]byte, 0, numKeys) @@ -70,18 +72,18 @@ func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *Trie) ([][]byte, // AssertTrieConsistency ensures given trieDB [a] and [b] both have the same // non-empty trie at [root]. (all key/value pairs must be equal) -func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLeaf func(key, val []byte) error) { - trieA, err := New(TrieID(root), a) +func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database, onLeaf func(key, val []byte) error) { + trieA, err := trie.New(trie.TrieID(root), a) if err != nil { t.Fatalf("error creating trieA, root=%s, err=%v", root, err) } - trieB, err := New(TrieID(root), b) + trieB, err := trie.New(trie.TrieID(root), b) if err != nil { t.Fatalf("error creating trieB, root=%s, err=%v", root, err) } - itA := NewIterator(trieA.NodeIterator(nil)) - itB := NewIterator(trieB.NodeIterator(nil)) + itA := trie.NewIterator(trieA.NodeIterator(nil)) + itB := trie.NewIterator(trieB.NodeIterator(nil)) count := 0 for itA.Next() && itB.Next() { count++ @@ -100,16 +102,11 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLea assert.Greater(t, count, 0) } -// CorruptTrie deletes every [n]th trie node from the trie given by [root] from the trieDB. -// Assumes that the trie given by root can be iterated without issue. -func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) { - batch := trieDB.diskdb.NewBatch() - // next delete some trie nodes - tr, err := New(TrieID(root), trieDB) - if err != nil { - t.Fatal(err) - } - +// CorruptTrie deletes every [n]th trie node from the trie given by [tr] from the underlying [db]. +// Assumes [tr] can be iterated without issue. +func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { + // Delete some trie nodes + batch := diskdb.NewBatch() nodeIt := tr.NodeIterator(nil) count := 0 for nodeIt.Next(true) { @@ -133,7 +130,7 @@ func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) { // [onAccount] is called if non-nil (so the caller can modify the account before it is stored in the secure trie). // returns the new trie root and a map of funded keys to StateAccount structs. func FillAccounts( - t *testing.T, trieDB *Database, root common.Hash, numAccounts int, + t *testing.T, trieDB *trie.Database, root common.Hash, numAccounts int, onAccount func(*testing.T, int, types.StateAccount) types.StateAccount, ) (common.Hash, map[*keystore.Key]*types.StateAccount) { var ( @@ -143,7 +140,7 @@ func FillAccounts( accounts = make(map[*keystore.Key]*types.StateAccount, numAccounts) ) - tr, err := NewStateTrie(TrieID(root), trieDB) + tr, err := trie.NewStateTrie(trie.TrieID(root), trieDB) if err != nil { t.Fatalf("error opening trie: %v", err) } diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go index 6a88372adc..205326367d 100644 --- a/trie/triedb/hashdb/database.go +++ b/trie/triedb/hashdb/database.go @@ -48,10 +48,11 @@ var ( memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) - memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) - memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) - memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) - memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) + memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) + memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) + memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) + memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) + memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/size", nil) memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/childsize", nil) memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("trie/memcache/dirty/nodes", nil) @@ -220,7 +221,7 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { // Content unavailable in memory, attempt to retrieve from disk enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash) - if len(enc) > 0 { + if len(enc) != 0 { if db.cleans != nil { db.cleans.Set(hash[:], enc) memcacheCleanMissMeter.Mark(1) @@ -287,9 +288,9 @@ func (db *Database) Dereference(root common.Hash) { log.Error("Attempted to dereference the trie cache meta root") return } - db.lock.Lock() defer db.lock.Unlock() + nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() db.dereference(root) @@ -464,6 +465,7 @@ func (db *Database) Cap(limit common.StorageSize) error { log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) + return nil } @@ -522,6 +524,7 @@ func (db *Database) Commit(node common.Hash, report bool) error { // Reset the garbage collection statistics db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 + return nil } diff --git a/utils/bounded_workers.go b/utils/bounded_workers.go new file mode 100644 index 0000000000..806f923fd4 --- /dev/null +++ b/utils/bounded_workers.go @@ -0,0 +1,81 @@ +// (c) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "sync" + "sync/atomic" +) + +type BoundedWorkers struct { + workerCount atomic.Int32 + workerSpawner chan struct{} + outstandingWorkers sync.WaitGroup + + work chan func() + workClose sync.Once +} + +// NewBoundedWorkers returns an instance of [BoundedWorkers] that +// will spawn up to [max] goroutines. +func NewBoundedWorkers(max int) *BoundedWorkers { + return &BoundedWorkers{ + workerSpawner: make(chan struct{}, max), + work: make(chan func()), + } +} + +// startWorker creates a new goroutine to execute [f] immediately and then keeps the goroutine +// alive to continue executing new work. +func (b *BoundedWorkers) startWorker(f func()) { + b.workerCount.Add(1) + b.outstandingWorkers.Add(1) + + go func() { + defer b.outstandingWorkers.Done() + + if f != nil { + f() + } + for f := range b.work { + f() + } + }() +} + +// Execute the given function on an existing goroutine waiting for more work, a new goroutine, +// or return if the context is canceled. +// +// Execute must not be called after Wait, otherwise it might panic. +func (b *BoundedWorkers) Execute(f func()) { + // Ensure we feed idle workers first + select { + case b.work <- f: + return + default: + } + + // Fallback to waiting for an idle worker or allocating + // a new worker (if we aren't yet at max concurrency) + select { + case b.work <- f: + case b.workerSpawner <- struct{}{}: + b.startWorker(f) + } +} + +// Wait returns after all enqueued work finishes and all goroutines to exit. +// Wait returns the number of workers that were spawned during the run. +// +// Wait can only be called after ALL calls to [Execute] have returned. +// +// It is safe to call Wait multiple times but not safe to call [Execute] +// after [Wait] has been called. +func (b *BoundedWorkers) Wait() int { + b.workClose.Do(func() { + close(b.work) + }) + b.outstandingWorkers.Wait() + return int(b.workerCount.Load()) +} From 6478754b8ff742fcf0f17dcb67f81dcb854c721e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 5 Jan 2024 14:57:54 +0300 Subject: [PATCH 8/8] add prefetcher to worker --- miner/worker.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/miner/worker.go b/miner/worker.go index 00023d8607..9e179c4d85 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -195,6 +195,13 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte if err != nil { return nil, fmt.Errorf("failed to create new current environment: %w", err) } + // Ensure we always stop prefetcher after block building is complete. + defer func() { + if env.state == nil { + return + } + env.state.StopPrefetcher() + }() // Configure any upgrades that should go into effect during this block. err = core.ApplyUpgrades(w.chainConfig, &parent.Time, types.NewBlockWithHeader(header), env.state) if err != nil { @@ -231,6 +238,7 @@ func (w *worker) createCurrentEnvironment(predicateContext *precompileconfig.Pre if err != nil { return nil, err } + state.StartPrefetcher("miner", w.eth.BlockChain().CacheConfig().TriePrefetcherParallelism) return &environment{ signer: types.MakeSigner(w.chainConfig, header.Number, header.Time), state: state,