diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 87c79fcfe2..9e89d2d0da 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -33,3 +33,36 @@ Please make sure your contributions adhere to our coding guidelines: Before you submit a feature request, please check and make sure that it isn't possible through some other means. +## Mocks + +Mocks are auto-generated using [mockgen](https://pkg.go.dev/go.uber.org/mock/mockgen) and `//go:generate` commands in the code. + +* To **re-generate all mocks**, use the command below from the root of the project: + + ```sh + go generate -run "go.uber.org/mock/mockgen" ./... + ``` + +* To **add** an interface that needs a corresponding mock generated: + * if the file `mocks_generate_test.go` exists in the package where the interface is located, either: + * modify its `//go:generate go run go.uber.org/mock/mockgen` to generate a mock for your interface (preferred); or + * add another `//go:generate go run go.uber.org/mock/mockgen` to generate a mock for your interface according to specific mock generation settings + * if the file `mocks_generate_test.go` does not exist in the package where the interface is located, create it with content (adapt as needed): + + ```go + // Copyright (C) 2025-2025, Ava Labs, Inc. All rights reserved. + // See the file LICENSE for licensing terms. + + package mypackage + + //go:generate go run go.uber.org/mock/mockgen -package=${GOPACKAGE} -destination=mocks_test.go . YourInterface + ``` + + Notes: + 1. Ideally generate all mocks to `mocks_test.go` for the package you need to use the mocks for and do not export mocks to other packages. This reduces package dependencies, reduces production code pollution and forces to have locally defined narrow interfaces. + 1. Prefer using reflect mode to generate mocks than source mode, unless you need a mock for an unexported interface, which should be rare. +* To **remove** an interface from having a corresponding mock generated: + 1. Edit the `mocks_generate_test.go` file in the directory where the interface is defined + 1. If the `//go:generate` mockgen command line: + * generates a mock file for multiple interfaces, remove your interface from the line + * generates a mock file only for the interface, remove the entire line. If the file is empty, remove `mocks_generate_test.go` as well. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 8200c0597f..7751a8146f 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -3,3 +3,7 @@ ## How this works ## How this was tested + +## Need to be documented? + +## Need to update RELEASES.md? diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 46bbd58745..3adc2a853c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,6 @@ jobs: - uses: actions/setup-go@v5 with: go-version: "~1.22.8" - check-latest: true - name: change avalanchego dep if: ${{ github.event_name == 'workflow_dispatch' }} run: | @@ -74,7 +73,6 @@ jobs: - uses: actions/setup-go@v5 with: go-version: "~1.22.8" - check-latest: true - name: change avalanchego dep if: ${{ github.event_name == 'workflow_dispatch' }} run: | @@ -86,6 +84,17 @@ jobs: run: echo "TIMEOUT=1200s" >> "$GITHUB_ENV" - run: go mod download shell: bash + - name: go mod tidy + run: | + go mod tidy + git diff --exit-code + - name: Mocks are up to date + shell: bash + run: | + grep -lr -E '^// Code generated by MockGen\. DO NOT EDIT\.$' . | xargs -r rm + go generate -run "go.uber.org/mock/mockgen" ./... + git add --intent-to-add --all + git diff --exit-code - run: ./scripts/build.sh evm shell: bash - run: ./scripts/build_test.sh @@ -113,7 +122,6 @@ jobs: - uses: actions/setup-go@v5 with: go-version: "~1.22.8" - check-latest: true - name: Run e2e tests run: E2E_SERIAL=1 ./scripts/tests.e2e.sh shell: bash diff --git a/.github/workflows/sync-subnet-evm-branch.yml b/.github/workflows/sync-subnet-evm-branch.yml index 97e8554ef8..e053aec587 100644 --- a/.github/workflows/sync-subnet-evm-branch.yml +++ b/.github/workflows/sync-subnet-evm-branch.yml @@ -17,4 +17,3 @@ jobs: - uses: actions/setup-go@v5 with: go-version: "~1.22.8" - check-latest: true diff --git a/README.md b/README.md index 69a11a6c43..40cd3a8407 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [Avalanche](https://docs.avax.network/intro) is a network composed of multiple blockchains. Each blockchain is an instance of a Virtual Machine (VM), much like an object in an object-oriented language is an instance of a class. That is, the VM defines the behavior of the blockchain. -Coreth (from core Ethereum) is the [Virtual Machine (VM)](https://docs.avax.network/learn/avalanche/virtual-machines) that defines the Contract Chain (C-Chain). +Coreth (from core Ethereum) is the [Virtual Machine (VM)](https://docs.avax.network/learn/virtual-machines) that defines the Contract Chain (C-Chain). This chain implements the Ethereum Virtual Machine and supports Solidity smart contracts as well as most other Ethereum client functionality. ## Building diff --git a/RELEASES.md b/RELEASES.md index 64a6fb87ee..bce253816e 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,18 @@ # Release Notes +## [v0.14.1](https://github.com/ava-labs/coreth/releases/tag/v0.14.1) +- Remove API eth_getAssetBalance that was used to query ANT balances (deprecated since v0.10.0) +- Remove legacy gossip handler and metrics (deprecated since v0.10.0) +- Refactored trie_prefetcher.go to be structurally similar to [upstream](https://github.com/ethereum/go-ethereum/tree/v1.13.14). + +## [v0.14.0](https://github.com/ava-labs/coreth/releases/tag/v0.14.0) +- Minor version update to correspond to avalanchego v1.12.0 / Etna. +- Remove unused historical opcodes CALLEX, BALANCEMC +- Remove unused pre-AP2 handling of genesis contract +- Fix to track tx size in block building +- Test fixes +- Update go version to 1.22 + ## [v0.13.8](https://github.com/ava-labs/coreth/releases/tag/v0.13.8) - Update geth dependency to v1.13.14 - eupgrade: lowering the base fee to 1 nAVAX diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go index 685b034d14..e1311c47d9 100644 --- a/accounts/abi/bind/util_test.go +++ b/accounts/abi/bind/util_test.go @@ -142,7 +142,7 @@ func TestWaitDeployedCornerCases(t *testing.T) { // Create a transaction that is not mined. tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) - tx, _ = types.SignTx(tx, types.LatestSigner(params.TestChainConfig), testKey) + tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey) go func() { contextCanceled := errors.New("context canceled") @@ -151,6 +151,8 @@ func TestWaitDeployedCornerCases(t *testing.T) { } }() - backend.Client().SendTransaction(ctx, tx) + if err := backend.Client().SendTransaction(ctx, tx); err != nil { + t.Fatalf("Failed to send transaction: %s", err) + } cancel() } diff --git a/consensus/dummy/README.md b/consensus/dummy/README.md index 23ff20072a..cca69cf239 100644 --- a/consensus/dummy/README.md +++ b/consensus/dummy/README.md @@ -18,7 +18,7 @@ The dynamic fee algorithm aims to adjust the base fee to handle network congesti - EIP-1559 is intended for Ethereum where a block is produced roughly every 10s - C-Chain typically produces blocks every 2 seconds, but the dynamic fee algorithm needs to handle the case that the network quiesces and there are no blocks for a long period of time -- Since C-Chain produces blocks at a different cadence, it adapts EIP-1559 to sum the amount of gas consumed within a 10 second interval instead of using only the amount of gas consumed in the parent block +- Since C-Chain produces blocks at a different cadence, it adapts EIP-1559 to sum the amount of gas consumed within a 10-second interval instead of using only the amount of gas consumed in the parent block ## Consensus Engine Callbacks diff --git a/core/blockchain.go b/core/blockchain.go index 1f94fe845d..6998fbb57a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -179,9 +179,9 @@ func (c *CacheConfig) triedbConfig() *triedb.Config { config := &triedb.Config{Preimages: c.Preimages} if c.StateScheme == rawdb.HashScheme || c.StateScheme == "" { config.DBOverride = hashdb.Config{ - CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, - StatsPrefix: trieCleanCacheStatsNamespace, - ReferenceRoot: true, // Automatically reference root nodes when an update is made + CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, + StatsPrefix: trieCleanCacheStatsNamespace, + ReferenceRootAtomicallyOnUpdate: true, // Automatically reference root nodes when an update is made }.BackendConstructor } if c.StateScheme == rawdb.PathScheme { @@ -1282,16 +1282,6 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { blockContentValidationTimer.Inc(time.Since(substart).Milliseconds()) // No validation errors for the block - var activeState *state.StateDB - defer func() { - // The chain importer is starting and stopping trie prefetchers. If a bad - // block or other error is hit however, an early return may not properly - // terminate the background threads. This defer ensures that we clean up - // and dangling prefetcher, without deferring each and holding on live refs. - if activeState != nil { - activeState.StopPrefetcher() - } - }() // Retrieve the parent block to determine which root to build state on substart = time.Now() @@ -1310,8 +1300,8 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { blockStateInitTimer.Inc(time.Since(substart).Milliseconds()) // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) - activeState = statedb + statedb.StartPrefetcher("chain", state.WithConcurrentWorkers(bc.cacheConfig.TriePrefetcherParallelism)) + defer statedb.StopPrefetcher() // Process block using the parent state as reference point pstart := time.Now() @@ -1669,10 +1659,8 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) } // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) - defer func() { - statedb.StopPrefetcher() - }() + statedb.StartPrefetcher("chain", state.WithConcurrentWorkers(bc.cacheConfig.TriePrefetcherParallelism)) + defer statedb.StopPrefetcher() // Process previously stored block receipts, _, usedGas, err := bc.processor.Process(current, parent.Header(), statedb, vm.Config{}) diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 96199417ee..70465df4b9 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -247,6 +247,20 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { return bc.HasState(block.Root()) } +// ContractCodeWithPrefix retrieves a blob of data associated with a contract +// hash either from ephemeral in-memory cache, or from persistent storage. +// +// If the code doesn't exist in the in-memory cache, check the storage with +// new code scheme. +func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { + type codeReader interface { + ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) + } + // TODO(rjl493456442) The associated account address is also required + // in Verkle scheme. Fix it once snap-sync is supported for Verkle. + return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Address{}, hash) +} + // State returns a new mutable state based on the current HEAD block. func (bc *BlockChain) State() (*state.StateDB, error) { return bc.StateAt(bc.CurrentBlock().Root) diff --git a/core/state/database.go b/core/state/database.go index b810bf2c3d..dd6dd2f096 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -171,6 +171,10 @@ func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) return nil, errors.New("not found") } +func (db *cachingDB) ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) { + return db.ContractCode(address, codeHash) +} + // ContractCodeSize retrieves a particular contracts code's size. func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) { if cached, ok := db.codeSizeCache.Get(codeHash); ok { diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 659c31195e..a71ba5601b 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -52,7 +52,7 @@ const ( // stateBloomFilePrefix is the filename prefix of state bloom filter. stateBloomFilePrefix = "statebloom" - // stateBloomFilePrefix is the filename suffix of state bloom filter. + // stateBloomFileSuffix is the filename suffix of state bloom filter. stateBloomFileSuffix = "bf.gz" // stateBloomFileTempSuffix is the filename suffix of state bloom filter diff --git a/core/state/statedb.go b/core/state/statedb.go index 887da56899..a1c278342e 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -83,20 +83,13 @@ func (wp *workerPool) Done() { wp.BoundedWorkers.Wait() } -func withConcurrentWorkers(prefetchers int) ethstate.PrefetcherOption { +func WithConcurrentWorkers(prefetchers int) ethstate.PrefetcherOption { pool := &workerPool{ BoundedWorkers: utils.NewBoundedWorkers(prefetchers), } return ethstate.WithWorkerPools(func() ethstate.WorkerPool { return pool }) } -// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the -// state trie concurrently while the state is mutated so that when we reach the -// commit phase, most of the needed data is already hot. -func (s *StateDB) StartPrefetcher(namespace string, maxConcurrency int) { - s.StateDB.StartPrefetcher(namespace, withConcurrentWorkers(maxConcurrency)) -} - // Retrieve the balance from the given address or 0 if object not found func (s *StateDB) GetBalanceMultiCoin(addr common.Address, coinID common.Hash) *big.Int { NormalizeCoinID(&coinID) diff --git a/core/test_blockchain.go b/core/test_blockchain.go index dacccd2e94..254d19ff43 100644 --- a/core/test_blockchain.go +++ b/core/test_blockchain.go @@ -1502,7 +1502,7 @@ func checkTxIndicesHelper(t *testing.T, expectedTail *uint64, indexedFrom uint64 require.EventuallyWithTf(t, func(c *assert.CollectT) { stored = *rawdb.ReadTxIndexTail(db) - require.Equalf(t, tailValue, stored, "expected tail to be %d, found %d", tailValue, stored) + assert.Equalf(c, tailValue, stored, "expected tail to be %d, found %d", tailValue, stored) }, 30*time.Second, 500*time.Millisecond, "expected tail to be %d eventually", tailValue) } diff --git a/core/types/account.go b/core/types/account.go index efc0927770..8965845107 100644 --- a/core/types/account.go +++ b/core/types/account.go @@ -17,71 +17,13 @@ package types import ( - "bytes" - "encoding/hex" - "encoding/json" - "fmt" - "math/big" - - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/common/hexutil" - "github.com/ava-labs/libevm/common/math" + ethtypes "github.com/ava-labs/libevm/core/types" ) -//go:generate go run github.com/fjl/gencodec -type Account -field-override accountMarshaling -out gen_account.go - // Account represents an Ethereum account and its attached data. // This type is used to specify accounts in the genesis block state, and // is also useful for JSON encoding/decoding of accounts. -type Account struct { - Code []byte `json:"code,omitempty"` - Storage map[common.Hash]common.Hash `json:"storage,omitempty"` - Balance *big.Int `json:"balance" gencodec:"required"` - Nonce uint64 `json:"nonce,omitempty"` - - // used in tests - PrivateKey []byte `json:"secretKey,omitempty"` -} - -type accountMarshaling struct { - Code hexutil.Bytes - Balance *math.HexOrDecimal256 - Nonce math.HexOrDecimal64 - Storage map[storageJSON]storageJSON - PrivateKey hexutil.Bytes -} - -// storageJSON represents a 256 bit byte array, but allows less than 256 bits when -// unmarshaling from hex. -type storageJSON common.Hash - -func (h *storageJSON) UnmarshalText(text []byte) error { - text = bytes.TrimPrefix(text, []byte("0x")) - if len(text) > 64 { - return fmt.Errorf("too many hex characters in storage key/value %q", text) - } - offset := len(h) - len(text)/2 // pad on the left - if _, err := hex.Decode(h[offset:], text); err != nil { - return fmt.Errorf("invalid hex storage key/value %q", text) - } - return nil -} - -func (h storageJSON) MarshalText() ([]byte, error) { - return hexutil.Bytes(h[:]).MarshalText() -} +type Account = ethtypes.Account // GenesisAlloc specifies the initial state of a genesis block. -type GenesisAlloc map[common.Address]Account - -func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error { - m := make(map[common.UnprefixedAddress]Account) - if err := json.Unmarshal(data, &m); err != nil { - return err - } - *ga = make(GenesisAlloc) - for addr, a := range m { - (*ga)[common.Address(addr)] = a - } - return nil -} +type GenesisAlloc = ethtypes.GenesisAlloc diff --git a/core/types/gen_account.go b/core/types/gen_account.go deleted file mode 100644 index c3c7fb3fdf..0000000000 --- a/core/types/gen_account.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by github.com/fjl/gencodec. DO NOT EDIT. - -package types - -import ( - "encoding/json" - "errors" - "math/big" - - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/common/hexutil" - "github.com/ava-labs/libevm/common/math" -) - -var _ = (*accountMarshaling)(nil) - -// MarshalJSON marshals as JSON. -func (a Account) MarshalJSON() ([]byte, error) { - type Account struct { - Code hexutil.Bytes `json:"code,omitempty"` - Storage map[storageJSON]storageJSON `json:"storage,omitempty"` - Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"` - Nonce math.HexOrDecimal64 `json:"nonce,omitempty"` - PrivateKey hexutil.Bytes `json:"secretKey,omitempty"` - } - var enc Account - enc.Code = a.Code - if a.Storage != nil { - enc.Storage = make(map[storageJSON]storageJSON, len(a.Storage)) - for k, v := range a.Storage { - enc.Storage[storageJSON(k)] = storageJSON(v) - } - } - enc.Balance = (*math.HexOrDecimal256)(a.Balance) - enc.Nonce = math.HexOrDecimal64(a.Nonce) - enc.PrivateKey = a.PrivateKey - return json.Marshal(&enc) -} - -// UnmarshalJSON unmarshals from JSON. -func (a *Account) UnmarshalJSON(input []byte) error { - type Account struct { - Code *hexutil.Bytes `json:"code,omitempty"` - Storage map[storageJSON]storageJSON `json:"storage,omitempty"` - Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"` - Nonce *math.HexOrDecimal64 `json:"nonce,omitempty"` - PrivateKey *hexutil.Bytes `json:"secretKey,omitempty"` - } - var dec Account - if err := json.Unmarshal(input, &dec); err != nil { - return err - } - if dec.Code != nil { - a.Code = *dec.Code - } - if dec.Storage != nil { - a.Storage = make(map[common.Hash]common.Hash, len(dec.Storage)) - for k, v := range dec.Storage { - a.Storage[common.Hash(k)] = common.Hash(v) - } - } - if dec.Balance == nil { - return errors.New("missing required field 'balance' for Account") - } - a.Balance = (*big.Int)(dec.Balance) - if dec.Nonce != nil { - a.Nonce = uint64(*dec.Nonce) - } - if dec.PrivateKey != nil { - a.PrivateKey = *dec.PrivateKey - } - return nil -} diff --git a/eth/protocols/snap/discovery.go b/eth/protocols/snap/discovery.go new file mode 100644 index 0000000000..dece58eacc --- /dev/null +++ b/eth/protocols/snap/discovery.go @@ -0,0 +1,32 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "github.com/ava-labs/libevm/rlp" +) + +// enrEntry is the ENR entry which advertises `snap` protocol on the discovery. +type enrEntry struct { + // Ignore additional fields (for forward compatibility). + Rest []rlp.RawValue `rlp:"tail"` +} + +// ENRKey implements enr.Entry. +func (e enrEntry) ENRKey() string { + return "snap" +} diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go new file mode 100644 index 0000000000..4ceb4a1abc --- /dev/null +++ b/eth/protocols/snap/handler.go @@ -0,0 +1,654 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/sync/handlers" + "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/p2p" + "github.com/ava-labs/libevm/p2p/enode" + "github.com/ava-labs/libevm/p2p/enr" + "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/trie" + "github.com/ava-labs/libevm/trie/trienode" +) + +const ( + maxLeavesLimit = uint16(2048) + stateKeyLength = common.HashLength +) + +const ( + // softResponseLimit is the target maximum size of replies to data retrievals. + softResponseLimit = 1 * 1024 * 1024 + + // maxCodeLookups is the maximum number of bytecodes to serve. This number is + // there to limit the number of disk lookups. + maxCodeLookups = 1024 + + // stateLookupSlack defines the ratio by how much a state response can exceed + // the requested limit in order to try and avoid breaking up contracts into + // multiple packages and proving them. + stateLookupSlack = 0.1 + + // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This + // number is there to limit the number of disk lookups. + maxTrieNodeLookups = 1024 + + // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes. + // If we spend too much time, then it's a fairly high chance of timing out + // at the remote side, which means all the work is in vain. + maxTrieNodeTimeSpent = 5 * time.Second +) + +// Handler is a callback to invoke from an outside runner after the boilerplate +// exchanges have passed. +type Handler func(peer *Peer) error + +// Backend defines the data retrieval methods to serve remote requests and the +// callback methods to invoke on remote deliveries. +type Backend interface { + // Chain retrieves the blockchain object to serve data. + Chain() *core.BlockChain + + // RunPeer is invoked when a peer joins on the `eth` protocol. The handler + // should do any peer maintenance work, handshakes and validations. If all + // is passed, control should be given back to the `handler` to process the + // inbound messages going forward. + RunPeer(peer *Peer, handler Handler) error + + // PeerInfo retrieves all known `snap` information about a peer. + PeerInfo(id enode.ID) interface{} + + // Handle is a callback to be invoked when a data packet is received from + // the remote peer. Only packets not consumed by the protocol handler will + // be forwarded to the backend. + Handle(peer *Peer, packet Packet) error +} + +// MakeProtocols constructs the P2P protocol definitions for `snap`. +func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { + // Filter the discovery iterator for nodes advertising snap support. + dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool { + var snap enrEntry + return n.Load(&snap) == nil + }) + + protocols := make([]p2p.Protocol, len(ProtocolVersions)) + for i, version := range ProtocolVersions { + version := version // Closure + + protocols[i] = p2p.Protocol{ + Name: ProtocolName, + Version: version, + Length: protocolLengths[version], + Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { + return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error { + return Handle(backend, peer) + }) + }, + NodeInfo: func() interface{} { + return nodeInfo(backend.Chain()) + }, + PeerInfo: func(id enode.ID) interface{} { + return backend.PeerInfo(id) + }, + Attributes: []enr.Entry{&enrEntry{}}, + DialCandidates: dnsdisc, + } + } + return protocols +} + +// Handle is the callback invoked to manage the life cycle of a `snap` peer. +// When this function terminates, the peer is disconnected. +func Handle(backend Backend, peer *Peer) error { + for { + if err := HandleMessage(backend, peer); err != nil { + peer.Log().Debug("Message handling failed in `snap`", "err", err) + return err + } + } +} + +// HandleMessage is invoked whenever an inbound message is received from a +// remote peer on the `snap` protocol. The remote connection is torn down upon +// returning any error. +func HandleMessage(backend Backend, peer *Peer) error { + // Read the next message from the remote peer, and ensure it's fully consumed + msg, err := peer.rw.ReadMsg() + if err != nil { + return err + } + if msg.Size > maxMessageSize { + return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) + } + defer msg.Discard() + start := time.Now() + // Track the amount of time it takes to serve the request and run the handler + if metrics.Enabled { + h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) + defer func(start time.Time) { + sampler := func() metrics.Sample { + return metrics.ResettingSample( + metrics.NewExpDecaySample(1028, 0.015), + ) + } + metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) + }(start) + } + // Handle the message depending on its contents + switch { + case msg.Code == GetAccountRangeMsg: + // Decode the account retrieval request + var req GetAccountRangePacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Service the request, potentially returning nothing in case of errors + accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req) + + // Send back anything accumulated (or empty in case of errors) + return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ + ID: req.ID, + Accounts: accounts, + Proof: proofs, + }) + + case msg.Code == AccountRangeMsg: + // A range of accounts arrived to one of our previous requests + res := new(AccountRangePacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Ensure the range is monotonically increasing + for i := 1; i < len(res.Accounts); i++ { + if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { + return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) + } + } + requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID) + + return backend.Handle(peer, res) + + case msg.Code == GetStorageRangesMsg: + // Decode the storage retrieval request + var req GetStorageRangesPacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Service the request, potentially returning nothing in case of errors + slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req) + + // Send back anything accumulated (or empty in case of errors) + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ + ID: req.ID, + Slots: slots, + Proof: proofs, + }) + + case msg.Code == StorageRangesMsg: + // A range of storage slots arrived to one of our previous requests + res := new(StorageRangesPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Ensure the ranges are monotonically increasing + for i, slots := range res.Slots { + for j := 1; j < len(slots); j++ { + if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { + return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) + } + } + } + requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID) + + return backend.Handle(peer, res) + + case msg.Code == GetByteCodesMsg: + // Decode bytecode retrieval request + var req GetByteCodesPacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Service the request, potentially returning nothing in case of errors + codes := ServiceGetByteCodesQuery(backend.Chain(), &req) + + // Send back anything accumulated (or empty in case of errors) + return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ + ID: req.ID, + Codes: codes, + }) + + case msg.Code == ByteCodesMsg: + // A batch of byte codes arrived to one of our previous requests + res := new(ByteCodesPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID) + + return backend.Handle(peer, res) + + case msg.Code == GetTrieNodesMsg: + // Decode trie node retrieval request + var req GetTrieNodesPacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Service the request, potentially returning nothing in case of errors + nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start) + if err != nil { + return err + } + // Send back anything accumulated (or empty in case of errors) + return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ + ID: req.ID, + Nodes: nodes, + }) + + case msg.Code == TrieNodesMsg: + // A batch of trie nodes arrived to one of our previous requests + res := new(TrieNodesPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID) + + return backend.Handle(peer, res) + + default: + return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) + } +} + +// ServiceGetAccountRangeQuery assembles the response to an account range query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Retrieve the requested state and bail out if non existent + tr, err := trie.New(trie.StateTrieID(req.Root), chain.TrieDB()) + if err != nil { + log.Debug("Failed to open account trie", "root", req.Root, "err", err) + return nil, nil + } + //nodeIt, err := tr.NodeIterator(req.Origin[:]) + //if err != nil { + // log.Debug("Failed to iterate over account range", "origin", req.Origin, "err", err) + // return nil, nil + //} + //it := trie.NewIterator(nodeIt) + // XXX: restore these + // Patching in the existing response mechanism for now + leafsRequest := &message.LeafsRequest{ + Root: req.Root, + Start: req.Origin[:], + End: req.Limit[:], + NodeType: message.StateTrieNode, + } + leafsResponse := &message.LeafsResponse{} + handlerStats := stats.NewNoopHandlerStats() + handler := handlers.NewResponseBuilder( + leafsRequest, leafsResponse, tr, chain.Snapshots(), stateKeyLength, maxLeavesLimit, handlerStats, + ) + responseTime := 1200 * time.Millisecond // 2s is the AppRequest timeout, better be conservative here + ctx, cancel := context.WithTimeout(context.Background(), responseTime) + defer cancel() // don't leak a goroutine + handler.HandleRequest(ctx) + + //it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin, false) + //if err != nil { + // return nil, nil + //} + // Iterate over the requested range and pile accounts up + var ( + accounts []*AccountData + size uint64 + last common.Hash + ) + for i, leafResponseKey := range leafsResponse.Keys { + //for it.Next() { + //hash, account := it.Hash(), common.CopyBytes(it.Account()) + //hash, account := common.BytesToHash(it.Key), it.Value + hash, account := common.BytesToHash(leafResponseKey), leafsResponse.Vals[i] + acc := new(types.StateAccount) + if err := rlp.DecodeBytes(account, &acc); err != nil { + log.Warn("Failed to unmarshal account", "hash", hash, "err", err) + continue + } + account = types.SlimAccountRLP(*acc) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(account)) + accounts = append(accounts, &AccountData{ + Hash: hash, + Body: account, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], req.Limit[:]) >= 0 { + break + } + if size > req.Bytes { + break + } + } + //it.Release() + + // Generate the Merkle proofs for the first and last account + proof := trienode.NewProofSet() + if err := tr.Prove(req.Origin[:], proof); err != nil { + log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) + return nil, nil + } + if last != (common.Hash{}) { + if err := tr.Prove(last[:], proof); err != nil { + log.Warn("Failed to prove account range", "last", last, "err", err) + return nil, nil + } + } + var proofs [][]byte + for _, blob := range proof.List() { + proofs = append(proofs, blob) + } + return accounts, proofs +} + +func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? + // TODO(karalabe): - Logging locally is not ideal as remote faults annoy the local user + // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) + + // Calculate the hard limit at which to abort, even if mid storage trie + hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) + + // Retrieve storage ranges until the packet limit is reached + var ( + slots [][]*StorageData + proofs [][]byte + size uint64 + ) + for _, account := range req.Accounts { + // If we've exceeded the requested data limit, abort without opening + // a new storage range (that we'd need to prove due to exceeded size) + if size >= req.Bytes { + break + } + // The first account might start from a different origin and end sooner + var origin common.Hash + if len(req.Origin) > 0 { + origin, req.Origin = common.BytesToHash(req.Origin), nil + } + var limit = common.MaxHash + if len(req.Limit) > 0 { + limit, req.Limit = common.BytesToHash(req.Limit), nil + } + + // XXX: put this back + accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.TrieDB()) + if err != nil { + return nil, nil + } + acc, err := accTrie.GetAccountByHash(account) + if err != nil || acc == nil { + return nil, nil + } + id := trie.StorageTrieID(req.Root, account, acc.Root) + stTrie, err := trie.New(id, chain.TrieDB()) // XXX: put this back (was trie.NewStateTrie) + if err != nil { + return nil, nil + } + // Patching in the existing response mechanism for now + leafsRequest := &message.LeafsRequest{ + Root: acc.Root, + Account: account, + Start: origin[:], + End: limit[:], + NodeType: message.StateTrieNode, + } + leafsResponse := &message.LeafsResponse{} + handlerStats := stats.NewNoopHandlerStats() + handler := handlers.NewResponseBuilder( + leafsRequest, leafsResponse, stTrie, chain.Snapshots(), stateKeyLength, maxLeavesLimit, handlerStats, + ) + responseTime := 1200 * time.Millisecond // 2s is the AppRequest timeout, better be conservative here + ctx, cancel := context.WithTimeout(context.Background(), responseTime) + defer cancel() // don't leak a goroutine + handler.HandleRequest(ctx) + + // XXX: put this back + // Retrieve the requested state and bail out if non existent + //it, err := chain.Snapshots().StorageIterator(req.Root, account, origin) + //if err != nil { + // return nil, nil + //} + //nodeIt, err := stTrie.NodeIterator(origin[:]) + //if err != nil { + // log.Debug("Failed to iterate over storage range", "origin", origin, "err", err) + // return nil, nil + //} + //it := trie.NewIterator(nodeIt) + + // Iterate over the requested range and pile slots up + var ( + storage []*StorageData + last common.Hash + abort bool + ) + //for it.Next() { + for i, leafResponseKey := range leafsResponse.Keys { + if size >= hardLimit { + abort = true + break + } + //hash, slot := it.Hash(), common.CopyBytes(it.Slot()) + //hash, slot := common.BytesToHash(it.Key), common.CopyBytes(it.Value) + hash, slot := common.BytesToHash(leafResponseKey), leafsResponse.Vals[i] + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(slot)) + storage = append(storage, &StorageData{ + Hash: hash, + Body: slot, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], limit[:]) >= 0 { + break + } + } + abort = abort || leafsResponse.More + + if len(storage) > 0 { + slots = append(slots, storage) + } + //it.Release() + + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if origin != (common.Hash{}) || (abort && len(storage) > 0) { + // Request started at a non-zero hash or was capped prematurely, add + // the endpoint Merkle proofs + proof := trienode.NewProofSet() + if err := stTrie.Prove(origin[:], proof); err != nil { + log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) + return nil, nil + } + if last != (common.Hash{}) { + if err := stTrie.Prove(last[:], proof); err != nil { + log.Warn("Failed to prove storage range", "last", last, "err", err) + return nil, nil + } + } + for _, blob := range proof.List() { + proofs = append(proofs, blob) + } + // Proof terminates the reply as proofs are only added if a node + // refuses to serve more data (exception when a contract fetch is + // finishing, but that's that). + break + } + } + return slots, proofs +} + +// ServiceGetByteCodesQuery assembles the response to a byte codes query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + if len(req.Hashes) > maxCodeLookups { + req.Hashes = req.Hashes[:maxCodeLookups] + } + // Retrieve bytecodes until the packet size limit is reached + var ( + codes [][]byte + bytes uint64 + ) + for _, hash := range req.Hashes { + if hash == types.EmptyCodeHash { + // Peers should not request the empty code, but if they do, at + // least sent them back a correct response without db lookups + codes = append(codes, []byte{}) + } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil { + codes = append(codes, blob) + bytes += uint64(len(blob)) + } + if bytes > req.Bytes { + break + } + } + return codes +} + +// ServiceGetTrieNodesQuery assembles the response to a trie nodes query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Make sure we have the state associated with the request + triedb := chain.TrieDB() + + accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb) + if err != nil { + // We don't have the requested state available, bail out + return nil, nil + } + // The 'snap' might be nil, in which case we cannot serve storage slots. + snap := chain.Snapshots().Snapshot(req.Root) + // Retrieve trie nodes until the packet size limit is reached + var ( + nodes [][]byte + bytes uint64 + loads int // Trie hash expansions to count database reads + ) + for _, pathset := range req.Paths { + switch len(pathset) { + case 0: + // Ensure we penalize invalid requests + return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest) + + case 1: + // If we're only retrieving an account trie node, fetch it directly + blob, resolved, err := accTrie.GetNode(pathset[0]) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + default: + var stRoot common.Hash + // Storage slots requested, open the storage trie and retrieve from there + if snap == nil { + // We don't have the requested state snapshotted yet (or it is stale), + // but can look up the account via the trie instead. + account, err := accTrie.GetAccountByHash(common.BytesToHash(pathset[0])) + loads += 8 // We don't know the exact cost of lookup, this is an estimate + if err != nil || account == nil { + break + } + stRoot = account.Root + } else { + account, err := snap.Account(common.BytesToHash(pathset[0])) + loads++ // always account database reads, even for failures + if err != nil || account == nil { + break + } + stRoot = common.BytesToHash(account.Root) + } + id := trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), stRoot) + stTrie, err := trie.NewStateTrie(id, triedb) + loads++ // always account database reads, even for failures + if err != nil { + break + } + for _, path := range pathset[1:] { + blob, resolved, err := stTrie.GetNode(path) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + // Sanity check limits to avoid DoS on the store trie loads + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + } + // Abort request processing if we've exceeded our limits + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + return nodes, nil +} + +// NodeInfo represents a short summary of the `snap` sub-protocol metadata +// known about the host peer. +type NodeInfo struct{} + +// nodeInfo retrieves some `snap` protocol metadata about the running host node. +func nodeInfo(chain *core.BlockChain) *NodeInfo { + return &NodeInfo{} +} diff --git a/eth/protocols/snap/handler_fuzzing_test.go b/eth/protocols/snap/handler_fuzzing_test.go new file mode 100644 index 0000000000..91b9aad6fb --- /dev/null +++ b/eth/protocols/snap/handler_fuzzing_test.go @@ -0,0 +1,166 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/core/vm" + "github.com/ava-labs/libevm/p2p" + "github.com/ava-labs/libevm/p2p/enode" + "github.com/ava-labs/libevm/params" + "github.com/ava-labs/libevm/rlp" + fuzz "github.com/google/gofuzz" +) + +func FuzzARange(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + doFuzz(data, &GetAccountRangePacket{}, GetAccountRangeMsg) + }) +} + +func FuzzSRange(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + doFuzz(data, &GetStorageRangesPacket{}, GetStorageRangesMsg) + }) +} + +func FuzzByteCodes(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + doFuzz(data, &GetByteCodesPacket{}, GetByteCodesMsg) + }) +} + +func FuzzTrieNodes(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + doFuzz(data, &GetTrieNodesPacket{}, GetTrieNodesMsg) + }) +} + +func doFuzz(input []byte, obj interface{}, code int) { + bc := getChain() + defer bc.Stop() + fuzz.NewFromGoFuzz(input).Fuzz(obj) + var data []byte + switch p := obj.(type) { + case *GetTrieNodesPacket: + p.Root = trieRoot + data, _ = rlp.EncodeToBytes(obj) + default: + data, _ = rlp.EncodeToBytes(obj) + } + cli := &dummyRW{ + code: uint64(code), + data: data, + } + peer := NewFakePeer(65, "gazonk01", cli) + err := HandleMessage(&dummyBackend{bc}, peer) + switch { + case err == nil && cli.writeCount != 1: + panic(fmt.Sprintf("Expected 1 response, got %d", cli.writeCount)) + case err != nil && cli.writeCount != 0: + panic(fmt.Sprintf("Expected 0 response, got %d", cli.writeCount)) + } +} + +var trieRoot common.Hash + +func getChain() *core.BlockChain { + ga := make(types.GenesisAlloc, 1000) + var a = make([]byte, 20) + var mkStorage = func(k, v int) (common.Hash, common.Hash) { + var kB = make([]byte, 32) + var vB = make([]byte, 32) + binary.LittleEndian.PutUint64(kB, uint64(k)) + binary.LittleEndian.PutUint64(vB, uint64(v)) + return common.BytesToHash(kB), common.BytesToHash(vB) + } + storage := make(map[common.Hash]common.Hash) + for i := 0; i < 10; i++ { + k, v := mkStorage(i, i) + storage[k] = v + } + for i := 0; i < 1000; i++ { + binary.LittleEndian.PutUint64(a, uint64(i+0xff)) + acc := types.Account{Balance: big.NewInt(int64(i))} + if i%2 == 1 { + acc.Storage = storage + } + ga[common.BytesToAddress(a)] = acc + } + gspec := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: ga, + } + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 2, 10, func(i int, gen *core.BlockGen) {}) + cacheConf := &core.CacheConfig{ + TrieCleanLimit: 0, + TrieDirtyLimit: 0, + // TrieTimeLimit: 5 * time.Minute, + // TrieCleanNoPrefetch: true, + SnapshotLimit: 100, + SnapshotWait: true, + } + if err != nil { + panic(err) + } + trieRoot = blocks[len(blocks)-1].Root() + bc, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), cacheConf, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + if _, err := bc.InsertChain(blocks); err != nil { + panic(err) + } + return bc +} + +type dummyBackend struct { + chain *core.BlockChain +} + +func (d *dummyBackend) Chain() *core.BlockChain { return d.chain } +func (d *dummyBackend) RunPeer(*Peer, Handler) error { return nil } +func (d *dummyBackend) PeerInfo(enode.ID) interface{} { return "Foo" } +func (d *dummyBackend) Handle(*Peer, Packet) error { return nil } + +type dummyRW struct { + code uint64 + data []byte + writeCount int +} + +func (d *dummyRW) ReadMsg() (p2p.Msg, error) { + return p2p.Msg{ + Code: d.code, + Payload: bytes.NewReader(d.data), + ReceivedAt: time.Now(), + Size: uint32(len(d.data)), + }, nil +} + +func (d *dummyRW) WriteMsg(msg p2p.Msg) error { + d.writeCount++ + return nil +} diff --git a/eth/protocols/snap/metrics.go b/eth/protocols/snap/metrics.go new file mode 100644 index 0000000000..378a4cab8d --- /dev/null +++ b/eth/protocols/snap/metrics.go @@ -0,0 +1,57 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + metrics "github.com/ava-labs/libevm/metrics" +) + +var ( + ingressRegistrationErrorName = "eth/protocols/snap/ingress/registration/error" + egressRegistrationErrorName = "eth/protocols/snap/egress/registration/error" + + IngressRegistrationErrorMeter = metrics.NewRegisteredMeter(ingressRegistrationErrorName, nil) + EgressRegistrationErrorMeter = metrics.NewRegisteredMeter(egressRegistrationErrorName, nil) + + // deletionGauge is the metric to track how many trie node deletions + // are performed in total during the sync process. + deletionGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/delete", nil) + + // lookupGauge is the metric to track how many trie node lookups are + // performed to determine if node needs to be deleted. + lookupGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/lookup", nil) + + // boundaryAccountNodesGauge is the metric to track how many boundary trie + // nodes in account trie are met. + boundaryAccountNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/account", nil) + + // boundaryAccountNodesGauge is the metric to track how many boundary trie + // nodes in storage tries are met. + boundaryStorageNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/storage", nil) + + // smallStorageGauge is the metric to track how many storages are small enough + // to retrieved in one or two request. + smallStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/small", nil) + + // largeStorageGauge is the metric to track how many storages are large enough + // to retrieved concurrently. + largeStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/large", nil) + + // skipStorageHealingGauge is the metric to track how many storages are retrieved + // in multiple requests but healing is not necessary. + skipStorageHealingGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/noheal", nil) +) diff --git a/eth/protocols/snap/peer.go b/eth/protocols/snap/peer.go new file mode 100644 index 0000000000..89eb0d10dc --- /dev/null +++ b/eth/protocols/snap/peer.go @@ -0,0 +1,133 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/p2p" +) + +// Peer is a collection of relevant information we have about a `snap` peer. +type Peer struct { + id string // Unique ID for the peer, cached + + *p2p.Peer // The embedded P2P package peer + rw p2p.MsgReadWriter // Input/output streams for snap + version uint // Protocol version negotiated + + logger log.Logger // Contextual logger with the peer id injected +} + +// NewPeer creates a wrapper for a network connection and negotiated protocol +// version. +func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { + id := p.ID().String() + return &Peer{ + id: id, + Peer: p, + rw: rw, + version: version, + logger: log.New("peer", id[:8]), + } +} + +// NewFakePeer creates a fake snap peer without a backing p2p peer, for testing purposes. +func NewFakePeer(version uint, id string, rw p2p.MsgReadWriter) *Peer { + return &Peer{ + id: id, + rw: rw, + version: version, + logger: log.New("peer", id[:8]), + } +} + +// ID retrieves the peer's unique identifier. +func (p *Peer) ID() string { + return p.id +} + +// Version retrieves the peer's negotiated `snap` protocol version. +func (p *Peer) Version() uint { + return p.version +} + +// Log overrides the P2P logger with the higher level one containing only the id. +func (p *Peer) Log() log.Logger { + return p.logger +} + +// RequestAccountRange fetches a batch of accounts rooted in a specific account +// trie, starting with the origin. +func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes uint64) error { + p.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) + + requestTracker.Track(p.id, p.version, GetAccountRangeMsg, AccountRangeMsg, id) + return p2p.Send(p.rw, GetAccountRangeMsg, &GetAccountRangePacket{ + ID: id, + Root: root, + Origin: origin, + Limit: limit, + Bytes: bytes, + }) +} + +// RequestStorageRanges fetches a batch of storage slots belonging to one or more +// accounts. If slots from only one account is requested, an origin marker may also +// be used to retrieve from there. +func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { + if len(accounts) == 1 && origin != nil { + p.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) + } else { + p.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes)) + } + requestTracker.Track(p.id, p.version, GetStorageRangesMsg, StorageRangesMsg, id) + return p2p.Send(p.rw, GetStorageRangesMsg, &GetStorageRangesPacket{ + ID: id, + Root: root, + Accounts: accounts, + Origin: origin, + Limit: limit, + Bytes: bytes, + }) +} + +// RequestByteCodes fetches a batch of bytecodes by hash. +func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { + p.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) + + requestTracker.Track(p.id, p.version, GetByteCodesMsg, ByteCodesMsg, id) + return p2p.Send(p.rw, GetByteCodesMsg, &GetByteCodesPacket{ + ID: id, + Hashes: hashes, + Bytes: bytes, + }) +} + +// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in +// a specific state trie. +func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { + p.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) + + requestTracker.Track(p.id, p.version, GetTrieNodesMsg, TrieNodesMsg, id) + return p2p.Send(p.rw, GetTrieNodesMsg, &GetTrieNodesPacket{ + ID: id, + Root: root, + Paths: paths, + Bytes: bytes, + }) +} diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go new file mode 100644 index 0000000000..d615a08e0e --- /dev/null +++ b/eth/protocols/snap/protocol.go @@ -0,0 +1,218 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "errors" + "fmt" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/rlp" +) + +// Constants to match up protocol versions and messages +const ( + SNAP1 = 1 +) + +// ProtocolName is the official short name of the `snap` protocol used during +// devp2p capability negotiation. +const ProtocolName = "snap" + +// ProtocolVersions are the supported versions of the `snap` protocol (first +// is primary). +var ProtocolVersions = []uint{SNAP1} + +// protocolLengths are the number of implemented message corresponding to +// different protocol versions. +var protocolLengths = map[uint]uint64{SNAP1: 8} + +// maxMessageSize is the maximum cap on the size of a protocol message. +const maxMessageSize = 10 * 1024 * 1024 + +const ( + GetAccountRangeMsg = 0x00 + AccountRangeMsg = 0x01 + GetStorageRangesMsg = 0x02 + StorageRangesMsg = 0x03 + GetByteCodesMsg = 0x04 + ByteCodesMsg = 0x05 + GetTrieNodesMsg = 0x06 + TrieNodesMsg = 0x07 +) + +var ( + errMsgTooLarge = errors.New("message too long") + errDecode = errors.New("invalid message") + errInvalidMsgCode = errors.New("invalid message code") + errBadRequest = errors.New("bad request") +) + +// Packet represents a p2p message in the `snap` protocol. +type Packet interface { + Name() string // Name returns a string corresponding to the message type. + Kind() byte // Kind returns the message type. +} + +// GetAccountRangePacket represents an account query. +type GetAccountRangePacket struct { + ID uint64 // Request ID to match up responses with + Root common.Hash // Root hash of the account trie to serve + Origin common.Hash // Hash of the first account to retrieve + Limit common.Hash // Hash of the last account to retrieve + Bytes uint64 // Soft limit at which to stop returning data +} + +// AccountRangePacket represents an account query response. +type AccountRangePacket struct { + ID uint64 // ID of the request this is a response for + Accounts []*AccountData // List of consecutive accounts from the trie + Proof [][]byte // List of trie nodes proving the account range +} + +// AccountData represents a single account in a query response. +type AccountData struct { + Hash common.Hash // Hash of the account + Body rlp.RawValue // Account body in slim format +} + +// Unpack retrieves the accounts from the range packet and converts from slim +// wire representation to consensus format. The returned data is RLP encoded +// since it's expected to be serialized to disk without further interpretation. +// +// Note, this method does a round of RLP decoding and reencoding, so only use it +// once and cache the results if need be. Ideally discard the packet afterwards +// to not double the memory use. +func (p *AccountRangePacket) Unpack() ([]common.Hash, [][]byte, error) { + var ( + hashes = make([]common.Hash, len(p.Accounts)) + accounts = make([][]byte, len(p.Accounts)) + ) + for i, acc := range p.Accounts { + val, err := types.FullAccountRLP(acc.Body) + if err != nil { + return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err) + } + hashes[i], accounts[i] = acc.Hash, val + } + return hashes, accounts, nil +} + +// GetStorageRangesPacket represents an storage slot query. +type GetStorageRangesPacket struct { + ID uint64 // Request ID to match up responses with + Root common.Hash // Root hash of the account trie to serve + Accounts []common.Hash // Account hashes of the storage tries to serve + Origin []byte // Hash of the first storage slot to retrieve (large contract mode) + Limit []byte // Hash of the last storage slot to retrieve (large contract mode) + Bytes uint64 // Soft limit at which to stop returning data +} + +// StorageRangesPacket represents a storage slot query response. +type StorageRangesPacket struct { + ID uint64 // ID of the request this is a response for + Slots [][]*StorageData // Lists of consecutive storage slots for the requested accounts + Proof [][]byte // Merkle proofs for the *last* slot range, if it's incomplete +} + +// StorageData represents a single storage slot in a query response. +type StorageData struct { + Hash common.Hash // Hash of the storage slot + Body []byte // Data content of the slot +} + +// Unpack retrieves the storage slots from the range packet and returns them in +// a split flat format that's more consistent with the internal data structures. +func (p *StorageRangesPacket) Unpack() ([][]common.Hash, [][][]byte) { + var ( + hashset = make([][]common.Hash, len(p.Slots)) + slotset = make([][][]byte, len(p.Slots)) + ) + for i, slots := range p.Slots { + hashset[i] = make([]common.Hash, len(slots)) + slotset[i] = make([][]byte, len(slots)) + for j, slot := range slots { + hashset[i][j] = slot.Hash + slotset[i][j] = slot.Body + } + } + return hashset, slotset +} + +// GetByteCodesPacket represents a contract bytecode query. +type GetByteCodesPacket struct { + ID uint64 // Request ID to match up responses with + Hashes []common.Hash // Code hashes to retrieve the code for + Bytes uint64 // Soft limit at which to stop returning data +} + +// ByteCodesPacket represents a contract bytecode query response. +type ByteCodesPacket struct { + ID uint64 // ID of the request this is a response for + Codes [][]byte // Requested contract bytecodes +} + +// GetTrieNodesPacket represents a state trie node query. +type GetTrieNodesPacket struct { + ID uint64 // Request ID to match up responses with + Root common.Hash // Root hash of the account trie to serve + Paths []TrieNodePathSet // Trie node hashes to retrieve the nodes for + Bytes uint64 // Soft limit at which to stop returning data +} + +// TrieNodePathSet is a list of trie node paths to retrieve. A naive way to +// represent trie nodes would be a simple list of `account || storage` path +// segments concatenated, but that would be very wasteful on the network. +// +// Instead, this array special cases the first element as the path in the +// account trie and the remaining elements as paths in the storage trie. To +// address an account node, the slice should have a length of 1 consisting +// of only the account path. There's no need to be able to address both an +// account node and a storage node in the same request as it cannot happen +// that a slot is accessed before the account path is fully expanded. +type TrieNodePathSet [][]byte + +// TrieNodesPacket represents a state trie node query response. +type TrieNodesPacket struct { + ID uint64 // ID of the request this is a response for + Nodes [][]byte // Requested state trie nodes +} + +func (*GetAccountRangePacket) Name() string { return "GetAccountRange" } +func (*GetAccountRangePacket) Kind() byte { return GetAccountRangeMsg } + +func (*AccountRangePacket) Name() string { return "AccountRange" } +func (*AccountRangePacket) Kind() byte { return AccountRangeMsg } + +func (*GetStorageRangesPacket) Name() string { return "GetStorageRanges" } +func (*GetStorageRangesPacket) Kind() byte { return GetStorageRangesMsg } + +func (*StorageRangesPacket) Name() string { return "StorageRanges" } +func (*StorageRangesPacket) Kind() byte { return StorageRangesMsg } + +func (*GetByteCodesPacket) Name() string { return "GetByteCodes" } +func (*GetByteCodesPacket) Kind() byte { return GetByteCodesMsg } + +func (*ByteCodesPacket) Name() string { return "ByteCodes" } +func (*ByteCodesPacket) Kind() byte { return ByteCodesMsg } + +func (*GetTrieNodesPacket) Name() string { return "GetTrieNodes" } +func (*GetTrieNodesPacket) Kind() byte { return GetTrieNodesMsg } + +func (*TrieNodesPacket) Name() string { return "TrieNodes" } +func (*TrieNodesPacket) Kind() byte { return TrieNodesMsg } diff --git a/eth/protocols/snap/range.go b/eth/protocols/snap/range.go new file mode 100644 index 0000000000..ea7ceeb9e4 --- /dev/null +++ b/eth/protocols/snap/range.go @@ -0,0 +1,81 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "math/big" + + "github.com/ava-labs/libevm/common" + "github.com/holiman/uint256" +) + +// hashRange is a utility to handle ranges of hashes, Split up the +// hash-space into sections, and 'walk' over the sections +type hashRange struct { + current *uint256.Int + step *uint256.Int +} + +// newHashRange creates a new hashRange, initiated at the start position, +// and with the step set to fill the desired 'num' chunks +func newHashRange(start common.Hash, num uint64) *hashRange { + left := new(big.Int).Sub(hashSpace, start.Big()) + step := new(big.Int).Div( + new(big.Int).Add(left, new(big.Int).SetUint64(num-1)), + new(big.Int).SetUint64(num), + ) + step256 := new(uint256.Int) + step256.SetFromBig(step) + + return &hashRange{ + current: new(uint256.Int).SetBytes32(start[:]), + step: step256, + } +} + +// Next pushes the hash range to the next interval. +func (r *hashRange) Next() bool { + next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) + if overflow { + return false + } + r.current = next + return true +} + +// Start returns the first hash in the current interval. +func (r *hashRange) Start() common.Hash { + return r.current.Bytes32() +} + +// End returns the last hash in the current interval. +func (r *hashRange) End() common.Hash { + // If the end overflows (non divisible range), return a shorter interval + next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) + if overflow { + return common.MaxHash + } + return next.SubUint64(next, 1).Bytes32() +} + +// incHash returns the next hash, in lexicographical order (a.k.a plus one) +func incHash(h common.Hash) common.Hash { + var a uint256.Int + a.SetBytes32(h[:]) + a.AddUint64(&a, 1) + return common.Hash(a.Bytes32()) +} diff --git a/eth/protocols/snap/range_test.go b/eth/protocols/snap/range_test.go new file mode 100644 index 0000000000..7aca87d778 --- /dev/null +++ b/eth/protocols/snap/range_test.go @@ -0,0 +1,143 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "testing" + + "github.com/ava-labs/libevm/common" +) + +// Tests that given a starting hash and a density, the hash ranger can correctly +// split up the remaining hash space into a fixed number of chunks. +func TestHashRanges(t *testing.T) { + tests := []struct { + head common.Hash + chunks uint64 + starts []common.Hash + ends []common.Hash + }{ + // Simple test case to split the entire hash range into 4 chunks + { + head: common.Hash{}, + chunks: 4, + starts: []common.Hash{ + {}, + common.HexToHash("0x4000000000000000000000000000000000000000000000000000000000000000"), + common.HexToHash("0x8000000000000000000000000000000000000000000000000000000000000000"), + common.HexToHash("0xc000000000000000000000000000000000000000000000000000000000000000"), + }, + ends: []common.Hash{ + common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + common.MaxHash, + }, + }, + // Split a divisible part of the hash range up into 2 chunks + { + head: common.HexToHash("0x2000000000000000000000000000000000000000000000000000000000000000"), + chunks: 2, + starts: []common.Hash{ + {}, + common.HexToHash("0x9000000000000000000000000000000000000000000000000000000000000000"), + }, + ends: []common.Hash{ + common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + common.MaxHash, + }, + }, + // Split the entire hash range into a non divisible 3 chunks + { + head: common.Hash{}, + chunks: 3, + starts: []common.Hash{ + {}, + common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555556"), + common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"), + }, + ends: []common.Hash{ + common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"), + common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"), + common.MaxHash, + }, + }, + // Split a part of hash range into a non divisible 3 chunks + { + head: common.HexToHash("0x2000000000000000000000000000000000000000000000000000000000000000"), + chunks: 3, + starts: []common.Hash{ + {}, + common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"), + common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555556"), + }, + ends: []common.Hash{ + common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"), + common.MaxHash, + }, + }, + // Split a part of hash range into a non divisible 3 chunks, but with a + // meaningful space size for manual verification. + // - The head being 0xff...f0, we have 14 hashes left in the space + // - Chunking up 14 into 3 pieces is 4.(6), but we need the ceil of 5 to avoid a micro-last-chunk + // - Since the range is not divisible, the last interval will be shorter, capped at 0xff...f + // - The chunk ranges thus needs to be [..0, ..5], [..6, ..b], [..c, ..f] + { + head: common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0"), + chunks: 3, + starts: []common.Hash{ + {}, + common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6"), + common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc"), + }, + ends: []common.Hash{ + common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"), + common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"), + common.MaxHash, + }, + }, + } + for i, tt := range tests { + r := newHashRange(tt.head, tt.chunks) + + var ( + starts = []common.Hash{{}} + ends = []common.Hash{r.End()} + ) + for r.Next() { + starts = append(starts, r.Start()) + ends = append(ends, r.End()) + } + if len(starts) != len(tt.starts) { + t.Errorf("test %d: starts count mismatch: have %d, want %d", i, len(starts), len(tt.starts)) + } + for j := 0; j < len(starts) && j < len(tt.starts); j++ { + if starts[j] != tt.starts[j] { + t.Errorf("test %d, start %d: hash mismatch: have %x, want %x", i, j, starts[j], tt.starts[j]) + } + } + if len(ends) != len(tt.ends) { + t.Errorf("test %d: ends count mismatch: have %d, want %d", i, len(ends), len(tt.ends)) + } + for j := 0; j < len(ends) && j < len(tt.ends); j++ { + if ends[j] != tt.ends[j] { + t.Errorf("test %d, end %d: hash mismatch: have %x, want %x", i, j, ends[j], tt.ends[j]) + } + } + } +} diff --git a/eth/protocols/snap/sort_test.go b/eth/protocols/snap/sort_test.go new file mode 100644 index 0000000000..2d51de44d9 --- /dev/null +++ b/eth/protocols/snap/sort_test.go @@ -0,0 +1,101 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "fmt" + "testing" + + "github.com/ava-labs/libevm/common" +) + +func hexToNibbles(s string) []byte { + if len(s) >= 2 && s[0] == '0' && s[1] == 'x' { + s = s[2:] + } + var s2 []byte + for _, ch := range []byte(s) { + s2 = append(s2, '0') + s2 = append(s2, ch) + } + return common.Hex2Bytes(string(s2)) +} + +func TestRequestSorting(t *testing.T) { + // - Path 0x9 -> {0x19} + // - Path 0x99 -> {0x0099} + // - Path 0x01234567890123456789012345678901012345678901234567890123456789019 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19} + // - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099} + var f = func(path string) string { + data := hexToNibbles(path) + return string(data) + } + var ( + hashes []common.Hash + paths []string + ) + for _, x := range []string{ + "0x9", + "0x012345678901234567890123456789010123456789012345678901234567890195", + "0x012345678901234567890123456789010123456789012345678901234567890197", + "0x012345678901234567890123456789010123456789012345678901234567890196", + "0x99", + "0x012345678901234567890123456789010123456789012345678901234567890199", + "0x01234567890123456789012345678901012345678901234567890123456789019", + "0x0123456789012345678901234567890101234567890123456789012345678901", + "0x01234567890123456789012345678901012345678901234567890123456789010", + "0x01234567890123456789012345678901012345678901234567890123456789011", + } { + paths = append(paths, f(x)) + hashes = append(hashes, common.Hash{}) + } + _, _, syncPaths, pathsets := sortByAccountPath(paths, hashes) + { + var b = new(bytes.Buffer) + for i := 0; i < len(syncPaths); i++ { + fmt.Fprintf(b, "\n%d. paths %x", i, syncPaths[i]) + } + want := ` +0. paths [0099] +1. paths [0123456789012345678901234567890101234567890123456789012345678901 00] +2. paths [0123456789012345678901234567890101234567890123456789012345678901 0095] +3. paths [0123456789012345678901234567890101234567890123456789012345678901 0096] +4. paths [0123456789012345678901234567890101234567890123456789012345678901 0097] +5. paths [0123456789012345678901234567890101234567890123456789012345678901 0099] +6. paths [0123456789012345678901234567890101234567890123456789012345678901 10] +7. paths [0123456789012345678901234567890101234567890123456789012345678901 11] +8. paths [0123456789012345678901234567890101234567890123456789012345678901 19] +9. paths [19]` + if have := b.String(); have != want { + t.Errorf("have:%v\nwant:%v\n", have, want) + } + } + { + var b = new(bytes.Buffer) + for i := 0; i < len(pathsets); i++ { + fmt.Fprintf(b, "\n%d. pathset %x", i, pathsets[i]) + } + want := ` +0. pathset [0099] +1. pathset [0123456789012345678901234567890101234567890123456789012345678901 00 0095 0096 0097 0099 10 11 19] +2. pathset [19]` + if have := b.String(); have != want { + t.Errorf("have:%v\nwant:%v\n", have, want) + } + } +} diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go new file mode 100644 index 0000000000..d47580bc17 --- /dev/null +++ b/eth/protocols/snap/sync.go @@ -0,0 +1,3210 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + gomath "math" + "math/big" + "math/rand" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/common/math" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/state" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/event" + "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/p2p/msgrate" + "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/trie" + "github.com/ava-labs/libevm/trie/trienode" + "golang.org/x/crypto/sha3" +) + +const ( + // minRequestSize is the minimum number of bytes to request from a remote peer. + // This number is used as the low cap for account and storage range requests. + // Bytecode and trienode are limited inherently by item count (1). + minRequestSize = 64 * 1024 + + // maxRequestSize is the maximum number of bytes to request from a remote peer. + // This number is used as the high cap for account and storage range requests. + // Bytecode and trienode are limited more explicitly by the caps below. + maxRequestSize = 512 * 1024 + + // maxCodeRequestCount is the maximum number of bytecode blobs to request in a + // single query. If this number is too low, we're not filling responses fully + // and waste round trip times. If it's too high, we're capping responses and + // waste bandwidth. + // + // Deployed bytecodes are currently capped at 24KB, so the minimum request + // size should be maxRequestSize / 24K. Assuming that most contracts do not + // come close to that, requesting 4x should be a good approximation. + maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4 + + // maxTrieRequestCount is the maximum number of trie node blobs to request in + // a single query. If this number is too low, we're not filling responses fully + // and waste round trip times. If it's too high, we're capping responses and + // waste bandwidth. + maxTrieRequestCount = maxRequestSize / 512 + + // trienodeHealRateMeasurementImpact is the impact a single measurement has on + // the local node's trienode processing capacity. A value closer to 0 reacts + // slower to sudden changes, but it is also more stable against temporary hiccups. + trienodeHealRateMeasurementImpact = 0.005 + + // minTrienodeHealThrottle is the minimum divisor for throttling trie node + // heal requests to avoid overloading the local node and excessively expanding + // the state trie breadth wise. + minTrienodeHealThrottle = 1 + + // maxTrienodeHealThrottle is the maximum divisor for throttling trie node + // heal requests to avoid overloading the local node and exessively expanding + // the state trie bedth wise. + maxTrienodeHealThrottle = maxTrieRequestCount + + // trienodeHealThrottleIncrease is the multiplier for the throttle when the + // rate of arriving data is higher than the rate of processing it. + trienodeHealThrottleIncrease = 1.33 + + // trienodeHealThrottleDecrease is the divisor for the throttle when the + // rate of arriving data is lower than the rate of processing it. + trienodeHealThrottleDecrease = 1.25 +) + +var ( + // accountConcurrency is the number of chunks to split the account trie into + // to allow concurrent retrievals. + accountConcurrency = 16 + + // storageConcurrency is the number of chunks to split the a large contract + // storage trie into to allow concurrent retrievals. + storageConcurrency = 16 +) + +// ErrCancelled is returned from snap syncing if the operation was prematurely +// terminated. +var ErrCancelled = errors.New("sync cancelled") + +// accountRequest tracks a pending account range request to ensure responses are +// to actual requests and to validate any security constraints. +// +// Concurrency note: account requests and responses are handled concurrently from +// the main runloop to allow Merkle proof verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type accountRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent + + deliver chan *accountResponse // Channel to deliver successful response on + revert chan *accountRequest // Channel to deliver request failure on + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + origin common.Hash // First account requested to allow continuation checks + limit common.Hash // Last account requested to allow non-overlapping chunking + + task *accountTask // Task which this request is filling (only access fields through the runloop!!) +} + +// accountResponse is an already Merkle-verified remote response to an account +// range request. It contains the subtrie for the requested account range and +// the database that's going to be filled with the internal nodes on commit. +type accountResponse struct { + task *accountTask // Task which this request is filling + + hashes []common.Hash // Account hashes in the returned range + accounts []*types.StateAccount // Expanded accounts in the returned range + + cont bool // Whether the account range has a continuation +} + +// bytecodeRequest tracks a pending bytecode request to ensure responses are to +// actual requests and to validate any security constraints. +// +// Concurrency note: bytecode requests and responses are handled concurrently from +// the main runloop to allow Keccak256 hash verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type bytecodeRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent + + deliver chan *bytecodeResponse // Channel to deliver successful response on + revert chan *bytecodeRequest // Channel to deliver request failure on + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + hashes []common.Hash // Bytecode hashes to validate responses + task *accountTask // Task which this request is filling (only access fields through the runloop!!) +} + +// bytecodeResponse is an already verified remote response to a bytecode request. +type bytecodeResponse struct { + task *accountTask // Task which this request is filling + + hashes []common.Hash // Hashes of the bytecode to avoid double hashing + codes [][]byte // Actual bytecodes to store into the database (nil = missing) +} + +// storageRequest tracks a pending storage ranges request to ensure responses are +// to actual requests and to validate any security constraints. +// +// Concurrency note: storage requests and responses are handled concurrently from +// the main runloop to allow Merkle proof verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. tasks). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type storageRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent + + deliver chan *storageResponse // Channel to deliver successful response on + revert chan *storageRequest // Channel to deliver request failure on + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + accounts []common.Hash // Account hashes to validate responses + roots []common.Hash // Storage roots to validate responses + + origin common.Hash // First storage slot requested to allow continuation checks + limit common.Hash // Last storage slot requested to allow non-overlapping chunking + + mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!) + subTask *storageTask // Task which this response is filling (only access fields through the runloop!!) +} + +// storageResponse is an already Merkle-verified remote response to a storage +// range request. It contains the subtries for the requested storage ranges and +// the databases that's going to be filled with the internal nodes on commit. +type storageResponse struct { + mainTask *accountTask // Task which this response belongs to + subTask *storageTask // Task which this response is filling + + accounts []common.Hash // Account hashes requested, may be only partially filled + roots []common.Hash // Storage roots requested, may be only partially filled + + hashes [][]common.Hash // Storage slot hashes in the returned range + slots [][][]byte // Storage slot values in the returned range + + cont bool // Whether the last storage range has a continuation +} + +// trienodeHealRequest tracks a pending state trie request to ensure responses +// are to actual requests and to validate any security constraints. +// +// Concurrency note: trie node requests and responses are handled concurrently from +// the main runloop to allow Keccak256 hash verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type trienodeHealRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent + + deliver chan *trienodeHealResponse // Channel to deliver successful response on + revert chan *trienodeHealRequest // Channel to deliver request failure on + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + paths []string // Trie node paths for identifying trie node + hashes []common.Hash // Trie node hashes to validate responses + + task *healTask // Task which this request is filling (only access fields through the runloop!!) +} + +// trienodeHealResponse is an already verified remote response to a trie node request. +type trienodeHealResponse struct { + task *healTask // Task which this request is filling + + paths []string // Paths of the trie nodes + hashes []common.Hash // Hashes of the trie nodes to avoid double hashing + nodes [][]byte // Actual trie nodes to store into the database (nil = missing) +} + +// bytecodeHealRequest tracks a pending bytecode request to ensure responses are to +// actual requests and to validate any security constraints. +// +// Concurrency note: bytecode requests and responses are handled concurrently from +// the main runloop to allow Keccak256 hash verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type bytecodeHealRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + time time.Time // Timestamp when the request was sent + + deliver chan *bytecodeHealResponse // Channel to deliver successful response on + revert chan *bytecodeHealRequest // Channel to deliver request failure on + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + hashes []common.Hash // Bytecode hashes to validate responses + task *healTask // Task which this request is filling (only access fields through the runloop!!) +} + +// bytecodeHealResponse is an already verified remote response to a bytecode request. +type bytecodeHealResponse struct { + task *healTask // Task which this request is filling + + hashes []common.Hash // Hashes of the bytecode to avoid double hashing + codes [][]byte // Actual bytecodes to store into the database (nil = missing) +} + +// accountTask represents the sync task for a chunk of the account snapshot. +type accountTask struct { + // These fields get serialized to leveldb on shutdown + Next common.Hash // Next account to sync in this interval + Last common.Hash // Last account to sync in this interval + SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts + + // These fields are internals used during runtime + req *accountRequest // Pending request to fill this task + res *accountResponse // Validate response filling this task + pend int // Number of pending subtasks for this round + + needCode []bool // Flags whether the filling accounts need code retrieval + needState []bool // Flags whether the filling accounts need storage retrieval + needHeal []bool // Flags whether the filling accounts's state was chunked and need healing + + codeTasks map[common.Hash]struct{} // Code hashes that need retrieval + stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval + + genBatch ethdb.Batch // Batch used by the node generator + genTrie *trie.StackTrie // Node generator from storage slots + + done bool // Flag whether the task can be removed +} + +// storageTask represents the sync task for a chunk of the storage snapshot. +type storageTask struct { + Next common.Hash // Next account to sync in this interval + Last common.Hash // Last account to sync in this interval + + // These fields are internals used during runtime + root common.Hash // Storage root hash for this instance + req *storageRequest // Pending request to fill this task + + genBatch ethdb.Batch // Batch used by the node generator + genTrie *trie.StackTrie // Node generator from storage slots + + done bool // Flag whether the task can be removed +} + +// healTask represents the sync task for healing the snap-synced chunk boundaries. +type healTask struct { + scheduler *trie.Sync // State trie sync scheduler defining the tasks + + trieTasks map[string]common.Hash // Set of trie node tasks currently queued for retrieval, indexed by node path + codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash +} + +// SyncProgress is a database entry to allow suspending and resuming a snapshot state +// sync. Opposed to full and fast sync, there is no way to restart a suspended +// snap sync without prior knowledge of the suspension point. +type SyncProgress struct { + Tasks []*accountTask // The suspended account tasks (contract tasks within) + + // Status report during syncing phase + AccountSynced uint64 // Number of accounts downloaded + AccountBytes common.StorageSize // Number of account trie bytes persisted to disk + BytecodeSynced uint64 // Number of bytecodes downloaded + BytecodeBytes common.StorageSize // Number of bytecode bytes downloaded + StorageSynced uint64 // Number of storage slots downloaded + StorageBytes common.StorageSize // Number of storage trie bytes persisted to disk + + // Status report during healing phase + TrienodeHealSynced uint64 // Number of state trie nodes downloaded + TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk + BytecodeHealSynced uint64 // Number of bytecodes downloaded + BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk +} + +// SyncPending is analogous to SyncProgress, but it's used to report on pending +// ephemeral sync progress that doesn't get persisted into the database. +type SyncPending struct { + TrienodeHeal uint64 // Number of state trie nodes pending + BytecodeHeal uint64 // Number of bytecodes pending +} + +// SyncPeer abstracts out the methods required for a peer to be synced against +// with the goal of allowing the construction of mock peers without the full +// blown networking. +type SyncPeer interface { + // ID retrieves the peer's unique identifier. + ID() string + + // RequestAccountRange fetches a batch of accounts rooted in a specific account + // trie, starting with the origin. + RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error + + // RequestStorageRanges fetches a batch of storage slots belonging to one or + // more accounts. If slots from only one account is requested, an origin marker + // may also be used to retrieve from there. + RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error + + // RequestByteCodes fetches a batch of bytecodes by hash. + RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error + + // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in + // a specific state trie. + RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error + + // Log retrieves the peer's own contextual logger. + Log() log.Logger +} + +// Syncer is an Ethereum account and storage trie syncer based on snapshots and +// the snap protocol. It's purpose is to download all the accounts and storage +// slots from remote peers and reassemble chunks of the state trie, on top of +// which a state sync can be run to fix any gaps / overlaps. +// +// Every network request has a variety of failure events: +// - The peer disconnects after task assignment, failing to send the request +// - The peer disconnects after sending the request, before delivering on it +// - The peer remains connected, but does not deliver a response in time +// - The peer delivers a stale response after a previous timeout +// - The peer delivers a refusal to serve the requested state +type Syncer struct { + db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) + scheme string // Node scheme used in node database + + root common.Hash // Current state trie root being synced + tasks []*accountTask // Current account task set being synced + snapped bool // Flag to signal that snap phase is done + healer *healTask // Current state healing task being executed + update chan struct{} // Notification channel for possible sync progression + + peers map[string]SyncPeer // Currently active peers to download from + peerJoin *event.Feed // Event feed to react to peers joining + peerDrop *event.Feed // Event feed to react to peers dropping + rates *msgrate.Trackers // Message throughput rates for peers + + // Request tracking during syncing phase + statelessPeers map[string]struct{} // Peers that failed to deliver state data + accountIdlers map[string]struct{} // Peers that aren't serving account requests + bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests + storageIdlers map[string]struct{} // Peers that aren't serving storage requests + + accountReqs map[uint64]*accountRequest // Account requests currently running + bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running + storageReqs map[uint64]*storageRequest // Storage requests currently running + + accountSynced uint64 // Number of accounts downloaded + accountBytes common.StorageSize // Number of account trie bytes persisted to disk + bytecodeSynced uint64 // Number of bytecodes downloaded + bytecodeBytes common.StorageSize // Number of bytecode bytes downloaded + storageSynced uint64 // Number of storage slots downloaded + storageBytes common.StorageSize // Number of storage trie bytes persisted to disk + + extProgress *SyncProgress // progress that can be exposed to external caller. + + // Request tracking during healing phase + trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests + bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests + + trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running + bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running + + trienodeHealRate float64 // Average heal rate for processing trie node data + trienodeHealPend atomic.Uint64 // Number of trie nodes currently pending for processing + trienodeHealThrottle float64 // Divisor for throttling the amount of trienode heal data requested + trienodeHealThrottled time.Time // Timestamp the last time the throttle was updated + + trienodeHealSynced uint64 // Number of state trie nodes downloaded + trienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk + trienodeHealDups uint64 // Number of state trie nodes already processed + trienodeHealNops uint64 // Number of state trie nodes not requested + bytecodeHealSynced uint64 // Number of bytecodes downloaded + bytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk + bytecodeHealDups uint64 // Number of bytecodes already processed + bytecodeHealNops uint64 // Number of bytecodes not requested + + stateWriter ethdb.Batch // Shared batch writer used for persisting raw states + accountHealed uint64 // Number of accounts downloaded during the healing stage + accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage + storageHealed uint64 // Number of storage slots downloaded during the healing stage + storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage + + startTime time.Time // Time instance when snapshot sync started + logTime time.Time // Time instance when status was last reported + + pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown + lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root) +} + +// NewSyncer creates a new snapshot syncer to download the Ethereum state over the +// snap protocol. +func NewSyncer(db ethdb.KeyValueStore, scheme string) *Syncer { + return &Syncer{ + db: db, + scheme: scheme, + + peers: make(map[string]SyncPeer), + peerJoin: new(event.Feed), + peerDrop: new(event.Feed), + rates: msgrate.NewTrackers(log.New("proto", "snap")), + update: make(chan struct{}, 1), + + accountIdlers: make(map[string]struct{}), + storageIdlers: make(map[string]struct{}), + bytecodeIdlers: make(map[string]struct{}), + + accountReqs: make(map[uint64]*accountRequest), + storageReqs: make(map[uint64]*storageRequest), + bytecodeReqs: make(map[uint64]*bytecodeRequest), + + trienodeHealIdlers: make(map[string]struct{}), + bytecodeHealIdlers: make(map[string]struct{}), + + trienodeHealReqs: make(map[uint64]*trienodeHealRequest), + bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest), + trienodeHealThrottle: maxTrienodeHealThrottle, // Tune downward instead of insta-filling with junk + stateWriter: db.NewBatch(), + + extProgress: new(SyncProgress), + } +} + +// Register injects a new data source into the syncer's peerset. +func (s *Syncer) Register(peer SyncPeer) error { + // Make sure the peer is not registered yet + id := peer.ID() + + s.lock.Lock() + if _, ok := s.peers[id]; ok { + log.Error("Snap peer already registered", "id", id) + + s.lock.Unlock() + return errors.New("already registered") + } + s.peers[id] = peer + s.rates.Track(id, msgrate.NewTracker(s.rates.MeanCapacities(), s.rates.MedianRoundTrip())) + + // Mark the peer as idle, even if no sync is running + s.accountIdlers[id] = struct{}{} + s.storageIdlers[id] = struct{}{} + s.bytecodeIdlers[id] = struct{}{} + s.trienodeHealIdlers[id] = struct{}{} + s.bytecodeHealIdlers[id] = struct{}{} + s.lock.Unlock() + + // Notify any active syncs that a new peer can be assigned data + s.peerJoin.Send(id) + return nil +} + +// Unregister injects a new data source into the syncer's peerset. +func (s *Syncer) Unregister(id string) error { + // Remove all traces of the peer from the registry + s.lock.Lock() + if _, ok := s.peers[id]; !ok { + log.Error("Snap peer not registered", "id", id) + + s.lock.Unlock() + return errors.New("not registered") + } + delete(s.peers, id) + s.rates.Untrack(id) + + // Remove status markers, even if no sync is running + delete(s.statelessPeers, id) + + delete(s.accountIdlers, id) + delete(s.storageIdlers, id) + delete(s.bytecodeIdlers, id) + delete(s.trienodeHealIdlers, id) + delete(s.bytecodeHealIdlers, id) + s.lock.Unlock() + + // Notify any active syncs that pending requests need to be reverted + s.peerDrop.Send(id) + return nil +} + +// Sync starts (or resumes a previous) sync cycle to iterate over a state trie +// with the given root and reconstruct the nodes based on the snapshot leaves. +// Previously downloaded segments will not be redownloaded of fixed, rather any +// errors will be healed after the leaves are fully accumulated. +func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { + // Move the trie root from any previous value, revert stateless markers for + // any peers and initialize the syncer if it was not yet run + s.lock.Lock() + s.root = root + s.healer = &healTask{ + scheduler: state.NewStateSync(root, s.db, s.onHealState, s.scheme), + trieTasks: make(map[string]common.Hash), + codeTasks: make(map[common.Hash]struct{}), + } + s.statelessPeers = make(map[string]struct{}) + s.lock.Unlock() + + if s.startTime == (time.Time{}) { + s.startTime = time.Now() + } + // Retrieve the previous sync status from LevelDB and abort if already synced + s.loadSyncStatus() + if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 { + log.Debug("Snapshot sync already completed") + return nil + } + defer func() { // Persist any progress, independent of failure + for _, task := range s.tasks { + s.forwardAccountTask(task) + } + s.cleanAccountTasks() + s.saveSyncStatus() + }() + + log.Debug("Starting snapshot sync cycle", "root", root) + + // Flush out the last committed raw states + defer func() { + if s.stateWriter.ValueSize() > 0 { + s.stateWriter.Write() + s.stateWriter.Reset() + } + }() + defer s.report(true) + // commit any trie- and bytecode-healing data. + defer s.commitHealer(true) + + // Whether sync completed or not, disregard any future packets + defer func() { + log.Debug("Terminating snapshot sync cycle", "root", root) + s.lock.Lock() + s.accountReqs = make(map[uint64]*accountRequest) + s.storageReqs = make(map[uint64]*storageRequest) + s.bytecodeReqs = make(map[uint64]*bytecodeRequest) + s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest) + s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest) + s.lock.Unlock() + }() + // Keep scheduling sync tasks + peerJoin := make(chan string, 16) + peerJoinSub := s.peerJoin.Subscribe(peerJoin) + defer peerJoinSub.Unsubscribe() + + peerDrop := make(chan string, 16) + peerDropSub := s.peerDrop.Subscribe(peerDrop) + defer peerDropSub.Unsubscribe() + + // Create a set of unique channels for this sync cycle. We need these to be + // ephemeral so a data race doesn't accidentally deliver something stale on + // a persistent channel across syncs (yup, this happened) + var ( + accountReqFails = make(chan *accountRequest) + storageReqFails = make(chan *storageRequest) + bytecodeReqFails = make(chan *bytecodeRequest) + accountResps = make(chan *accountResponse) + storageResps = make(chan *storageResponse) + bytecodeResps = make(chan *bytecodeResponse) + trienodeHealReqFails = make(chan *trienodeHealRequest) + bytecodeHealReqFails = make(chan *bytecodeHealRequest) + trienodeHealResps = make(chan *trienodeHealResponse) + bytecodeHealResps = make(chan *bytecodeHealResponse) + ) + for { + // Remove all completed tasks and terminate sync if everything's done + s.cleanStorageTasks() + s.cleanAccountTasks() + if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 { + return nil + } + // Assign all the data retrieval tasks to any free peers + s.assignAccountTasks(accountResps, accountReqFails, cancel) + s.assignBytecodeTasks(bytecodeResps, bytecodeReqFails, cancel) + s.assignStorageTasks(storageResps, storageReqFails, cancel) + + if len(s.tasks) == 0 { + // Sync phase done, run heal phase + s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel) + s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel) + } + // Update sync progress + s.lock.Lock() + s.extProgress = &SyncProgress{ + AccountSynced: s.accountSynced, + AccountBytes: s.accountBytes, + BytecodeSynced: s.bytecodeSynced, + BytecodeBytes: s.bytecodeBytes, + StorageSynced: s.storageSynced, + StorageBytes: s.storageBytes, + TrienodeHealSynced: s.trienodeHealSynced, + TrienodeHealBytes: s.trienodeHealBytes, + BytecodeHealSynced: s.bytecodeHealSynced, + BytecodeHealBytes: s.bytecodeHealBytes, + } + s.lock.Unlock() + // Wait for something to happen + select { + case <-s.update: + // Something happened (new peer, delivery, timeout), recheck tasks + case <-peerJoin: + // A new peer joined, try to schedule it new tasks + case id := <-peerDrop: + s.revertRequests(id) + case <-cancel: + return ErrCancelled + + case req := <-accountReqFails: + s.revertAccountRequest(req) + case req := <-bytecodeReqFails: + s.revertBytecodeRequest(req) + case req := <-storageReqFails: + s.revertStorageRequest(req) + case req := <-trienodeHealReqFails: + s.revertTrienodeHealRequest(req) + case req := <-bytecodeHealReqFails: + s.revertBytecodeHealRequest(req) + + case res := <-accountResps: + s.processAccountResponse(res) + case res := <-bytecodeResps: + s.processBytecodeResponse(res) + case res := <-storageResps: + s.processStorageResponse(res) + case res := <-trienodeHealResps: + s.processTrienodeHealResponse(res) + case res := <-bytecodeHealResps: + s.processBytecodeHealResponse(res) + } + // Report stats if something meaningful happened + s.report(false) + } +} + +// cleanPath is used to remove the dangling nodes in the stackTrie. +func (s *Syncer) cleanPath(batch ethdb.Batch, owner common.Hash, path []byte) { + if owner == (common.Hash{}) && rawdb.ExistsAccountTrieNode(s.db, path) { + rawdb.DeleteAccountTrieNode(batch, path) + deletionGauge.Inc(1) + } + if owner != (common.Hash{}) && rawdb.ExistsStorageTrieNode(s.db, owner, path) { + rawdb.DeleteStorageTrieNode(batch, owner, path) + deletionGauge.Inc(1) + } + lookupGauge.Inc(1) +} + +// loadSyncStatus retrieves a previously aborted sync status from the database, +// or generates a fresh one if none is available. +func (s *Syncer) loadSyncStatus() { + var progress SyncProgress + + if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil { + if err := json.Unmarshal(status, &progress); err != nil { + log.Error("Failed to decode snap sync status", "err", err) + } else { + for _, task := range progress.Tasks { + log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last) + } + s.tasks = progress.Tasks + for _, task := range s.tasks { + task := task // closure for task.genBatch in the stacktrie writer callback + + task.genBatch = ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.accountBytes += common.StorageSize(len(key) + len(value)) + }, + } + options := trie.NewStackTrieOptions() + options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, blob, s.scheme) + }) + if s.scheme == rawdb.PathScheme { + // Configure the dangling node cleaner and also filter out boundary nodes + // only in the context of the path scheme. Deletion is forbidden in the + // hash scheme, as it can disrupt state completeness. + options = options.WithCleaner(func(path []byte) { + s.cleanPath(task.genBatch, common.Hash{}, path) + }) + // Skip the left boundary if it's not the first range. + // Skip the right boundary if it's not the last range. + options = options.WithSkipBoundary(task.Next != (common.Hash{}), task.Last != common.MaxHash, boundaryAccountNodesGauge) + } + task.genTrie = trie.NewStackTrie(options) + for accountHash, subtasks := range task.SubTasks { + for _, subtask := range subtasks { + subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback + + subtask.genBatch = ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + owner := accountHash // local assignment for stacktrie writer closure + options := trie.NewStackTrieOptions() + options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, blob, s.scheme) + }) + if s.scheme == rawdb.PathScheme { + // Configure the dangling node cleaner and also filter out boundary nodes + // only in the context of the path scheme. Deletion is forbidden in the + // hash scheme, as it can disrupt state completeness. + options = options.WithCleaner(func(path []byte) { + s.cleanPath(subtask.genBatch, owner, path) + }) + // Skip the left boundary if it's not the first range. + // Skip the right boundary if it's not the last range. + options = options.WithSkipBoundary(subtask.Next != common.Hash{}, subtask.Last != common.MaxHash, boundaryStorageNodesGauge) + } + subtask.genTrie = trie.NewStackTrie(options) + } + } + } + s.lock.Lock() + defer s.lock.Unlock() + + s.snapped = len(s.tasks) == 0 + + s.accountSynced = progress.AccountSynced + s.accountBytes = progress.AccountBytes + s.bytecodeSynced = progress.BytecodeSynced + s.bytecodeBytes = progress.BytecodeBytes + s.storageSynced = progress.StorageSynced + s.storageBytes = progress.StorageBytes + + s.trienodeHealSynced = progress.TrienodeHealSynced + s.trienodeHealBytes = progress.TrienodeHealBytes + s.bytecodeHealSynced = progress.BytecodeHealSynced + s.bytecodeHealBytes = progress.BytecodeHealBytes + return + } + } + // Either we've failed to decode the previous state, or there was none. + // Start a fresh sync by chunking up the account range and scheduling + // them for retrieval. + s.tasks = nil + s.accountSynced, s.accountBytes = 0, 0 + s.bytecodeSynced, s.bytecodeBytes = 0, 0 + s.storageSynced, s.storageBytes = 0, 0 + s.trienodeHealSynced, s.trienodeHealBytes = 0, 0 + s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0 + + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(int64(accountConcurrency)), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + // Make sure we don't overflow if the step is not a proper divisor + last = common.MaxHash + } + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.accountBytes += common.StorageSize(len(key) + len(value)) + }, + } + options := trie.NewStackTrieOptions() + options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, blob, s.scheme) + }) + if s.scheme == rawdb.PathScheme { + // Configure the dangling node cleaner and also filter out boundary nodes + // only in the context of the path scheme. Deletion is forbidden in the + // hash scheme, as it can disrupt state completeness. + options = options.WithCleaner(func(path []byte) { + s.cleanPath(batch, common.Hash{}, path) + }) + // Skip the left boundary if it's not the first range. + // Skip the right boundary if it's not the last range. + options = options.WithSkipBoundary(next != common.Hash{}, last != common.MaxHash, boundaryAccountNodesGauge) + } + s.tasks = append(s.tasks, &accountTask{ + Next: next, + Last: last, + SubTasks: make(map[common.Hash][]*storageTask), + genBatch: batch, + genTrie: trie.NewStackTrie(options), + }) + log.Debug("Created account sync task", "from", next, "last", last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } +} + +// saveSyncStatus marshals the remaining sync tasks into leveldb. +func (s *Syncer) saveSyncStatus() { + // Serialize any partial progress to disk before spinning down + for _, task := range s.tasks { + if err := task.genBatch.Write(); err != nil { + log.Error("Failed to persist account slots", "err", err) + } + for _, subtasks := range task.SubTasks { + for _, subtask := range subtasks { + if err := subtask.genBatch.Write(); err != nil { + log.Error("Failed to persist storage slots", "err", err) + } + } + } + } + // Store the actual progress markers + progress := &SyncProgress{ + Tasks: s.tasks, + AccountSynced: s.accountSynced, + AccountBytes: s.accountBytes, + BytecodeSynced: s.bytecodeSynced, + BytecodeBytes: s.bytecodeBytes, + StorageSynced: s.storageSynced, + StorageBytes: s.storageBytes, + TrienodeHealSynced: s.trienodeHealSynced, + TrienodeHealBytes: s.trienodeHealBytes, + BytecodeHealSynced: s.bytecodeHealSynced, + BytecodeHealBytes: s.bytecodeHealBytes, + } + status, err := json.Marshal(progress) + if err != nil { + panic(err) // This can only fail during implementation + } + rawdb.WriteSnapshotSyncStatus(s.db, status) +} + +// Progress returns the snap sync status statistics. +func (s *Syncer) Progress() (*SyncProgress, *SyncPending) { + s.lock.Lock() + defer s.lock.Unlock() + pending := new(SyncPending) + if s.healer != nil { + pending.TrienodeHeal = uint64(len(s.healer.trieTasks)) + pending.BytecodeHeal = uint64(len(s.healer.codeTasks)) + } + return s.extProgress, pending +} + +// cleanAccountTasks removes account range retrieval tasks that have already been +// completed. +func (s *Syncer) cleanAccountTasks() { + // If the sync was already done before, don't even bother + if len(s.tasks) == 0 { + return + } + // Sync wasn't finished previously, check for any task that can be finalized + for i := 0; i < len(s.tasks); i++ { + if s.tasks[i].done { + s.tasks = append(s.tasks[:i], s.tasks[i+1:]...) + i-- + } + } + // If everything was just finalized just, generate the account trie and start heal + if len(s.tasks) == 0 { + s.lock.Lock() + s.snapped = true + s.lock.Unlock() + + // Push the final sync report + s.reportSyncProgress(true) + } +} + +// cleanStorageTasks iterates over all the account tasks and storage sub-tasks +// within, cleaning any that have been completed. +func (s *Syncer) cleanStorageTasks() { + for _, task := range s.tasks { + for account, subtasks := range task.SubTasks { + // Remove storage range retrieval tasks that completed + for j := 0; j < len(subtasks); j++ { + if subtasks[j].done { + subtasks = append(subtasks[:j], subtasks[j+1:]...) + j-- + } + } + if len(subtasks) > 0 { + task.SubTasks[account] = subtasks + continue + } + // If all storage chunks are done, mark the account as done too + for j, hash := range task.res.hashes { + if hash == account { + task.needState[j] = false + } + } + delete(task.SubTasks, account) + task.pend-- + + // If this was the last pending task, forward the account task + if task.pend == 0 { + s.forwardAccountTask(task) + } + } + } +} + +// assignAccountTasks attempts to match idle peers to pending account range +// retrievals. +func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *accountRequest, cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.accountIdlers)), + caps: make([]int, 0, len(s.accountIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.accountIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, AccountRangeMsg, targetTTL)) + } + if len(idlers.ids) == 0 { + return + } + sort.Sort(sort.Reverse(idlers)) + + // Iterate over all the tasks and try to find a pending one + for _, task := range s.tasks { + // Skip any tasks already filling + if task.req != nil || task.res != nil { + continue + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + if len(idlers.ids) == 0 { + return + } + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] + + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.accountReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + req := &accountRequest{ + peer: idle, + id: reqid, + time: time.Now(), + deliver: success, + revert: fail, + cancel: cancel, + stale: make(chan struct{}), + origin: task.Next, + limit: task.Last, + task: task, + } + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { + peer.Log().Debug("Account range request timed out", "reqid", reqid) + s.rates.Update(idle, AccountRangeMsg, 0, 0) + s.scheduleRevertAccountRequest(req) + }) + s.accountReqs[reqid] = req + delete(s.accountIdlers, idle) + + s.pend.Add(1) + go func(root common.Hash) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if cap > maxRequestSize { + cap = maxRequestSize + } + if cap < minRequestSize { // Don't bother with peers below a bare minimum performance + cap = minRequestSize + } + if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, uint64(cap)); err != nil { + peer.Log().Debug("Failed to request account range", "err", err) + s.scheduleRevertAccountRequest(req) + } + }(s.root) + + // Inject the request into the task to block further assignments + task.req = req + } +} + +// assignBytecodeTasks attempts to match idle peers to pending code retrievals. +func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *bytecodeRequest, cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.bytecodeIdlers)), + caps: make([]int, 0, len(s.bytecodeIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.bytecodeIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { + return + } + sort.Sort(sort.Reverse(idlers)) + + // Iterate over all the tasks and try to find a pending one + for _, task := range s.tasks { + // Skip any tasks not in the bytecode retrieval phase + if task.res == nil { + continue + } + // Skip tasks that are already retrieving (or done with) all codes + if len(task.codeTasks) == 0 { + continue + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + if len(idlers.ids) == 0 { + return + } + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] + + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.bytecodeReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + if cap > maxCodeRequestCount { + cap = maxCodeRequestCount + } + hashes := make([]common.Hash, 0, cap) + for hash := range task.codeTasks { + delete(task.codeTasks, hash) + hashes = append(hashes, hash) + if len(hashes) >= cap { + break + } + } + req := &bytecodeRequest{ + peer: idle, + id: reqid, + time: time.Now(), + deliver: success, + revert: fail, + cancel: cancel, + stale: make(chan struct{}), + hashes: hashes, + task: task, + } + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { + peer.Log().Debug("Bytecode request timed out", "reqid", reqid) + s.rates.Update(idle, ByteCodesMsg, 0, 0) + s.scheduleRevertBytecodeRequest(req) + }) + s.bytecodeReqs[reqid] = req + delete(s.bytecodeIdlers, idle) + + s.pend.Add(1) + go func() { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil { + log.Debug("Failed to request bytecodes", "err", err) + s.scheduleRevertBytecodeRequest(req) + } + }() + } +} + +// assignStorageTasks attempts to match idle peers to pending storage range +// retrievals. +func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *storageRequest, cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.storageIdlers)), + caps: make([]int, 0, len(s.storageIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.storageIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, StorageRangesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { + return + } + sort.Sort(sort.Reverse(idlers)) + + // Iterate over all the tasks and try to find a pending one + for _, task := range s.tasks { + // Skip any tasks not in the storage retrieval phase + if task.res == nil { + continue + } + // Skip tasks that are already retrieving (or done with) all small states + if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 { + continue + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + if len(idlers.ids) == 0 { + return + } + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] + + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.storageReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer. If there are + // large contract tasks pending, complete those before diving into + // even more new contracts. + if cap > maxRequestSize { + cap = maxRequestSize + } + if cap < minRequestSize { // Don't bother with peers below a bare minimum performance + cap = minRequestSize + } + storageSets := cap / 1024 + + var ( + accounts = make([]common.Hash, 0, storageSets) + roots = make([]common.Hash, 0, storageSets) + subtask *storageTask + ) + for account, subtasks := range task.SubTasks { + for _, st := range subtasks { + // Skip any subtasks already filling + if st.req != nil { + continue + } + // Found an incomplete storage chunk, schedule it + accounts = append(accounts, account) + roots = append(roots, st.root) + subtask = st + break // Large contract chunks are downloaded individually + } + if subtask != nil { + break // Large contract chunks are downloaded individually + } + } + if subtask == nil { + // No large contract required retrieval, but small ones available + for account, root := range task.stateTasks { + delete(task.stateTasks, account) + + accounts = append(accounts, account) + roots = append(roots, root) + + if len(accounts) >= storageSets { + break + } + } + } + // If nothing was found, it means this task is actually already fully + // retrieving, but large contracts are hard to detect. Skip to the next. + if len(accounts) == 0 { + continue + } + req := &storageRequest{ + peer: idle, + id: reqid, + time: time.Now(), + deliver: success, + revert: fail, + cancel: cancel, + stale: make(chan struct{}), + accounts: accounts, + roots: roots, + mainTask: task, + subTask: subtask, + } + if subtask != nil { + req.origin = subtask.Next + req.limit = subtask.Last + } + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { + peer.Log().Debug("Storage request timed out", "reqid", reqid) + s.rates.Update(idle, StorageRangesMsg, 0, 0) + s.scheduleRevertStorageRequest(req) + }) + s.storageReqs[reqid] = req + delete(s.storageIdlers, idle) + + s.pend.Add(1) + go func(root common.Hash) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + var origin, limit []byte + if subtask != nil { + origin, limit = req.origin[:], req.limit[:] + } + if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, uint64(cap)); err != nil { + log.Debug("Failed to request storage", "err", err) + s.scheduleRevertStorageRequest(req) + } + }(s.root) + + // Inject the request into the subtask to block further assignments + if subtask != nil { + subtask.req = req + } + } +} + +// assignTrienodeHealTasks attempts to match idle peers to trie node requests to +// heal any trie errors caused by the snap sync's chunked retrieval model. +func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fail chan *trienodeHealRequest, cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.trienodeHealIdlers)), + caps: make([]int, 0, len(s.trienodeHealIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.trienodeHealIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, TrieNodesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { + return + } + sort.Sort(sort.Reverse(idlers)) + + // Iterate over pending tasks and try to find a peer to retrieve with + for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 { + // If there are not enough trie tasks queued to fully assign, fill the + // queue from the state sync scheduler. The trie synced schedules these + // together with bytecodes, so we need to queue them combined. + var ( + have = len(s.healer.trieTasks) + len(s.healer.codeTasks) + want = maxTrieRequestCount + maxCodeRequestCount + ) + if have < want { + paths, hashes, codes := s.healer.scheduler.Missing(want - have) + for i, path := range paths { + s.healer.trieTasks[path] = hashes[i] + } + for _, hash := range codes { + s.healer.codeTasks[hash] = struct{}{} + } + } + // If all the heal tasks are bytecodes or already downloading, bail + if len(s.healer.trieTasks) == 0 { + return + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + if len(idlers.ids) == 0 { + return + } + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] + + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.trienodeHealReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + if cap > maxTrieRequestCount { + cap = maxTrieRequestCount + } + cap = int(float64(cap) / s.trienodeHealThrottle) + if cap <= 0 { + cap = 1 + } + var ( + hashes = make([]common.Hash, 0, cap) + paths = make([]string, 0, cap) + pathsets = make([]TrieNodePathSet, 0, cap) + ) + for path, hash := range s.healer.trieTasks { + delete(s.healer.trieTasks, path) + + paths = append(paths, path) + hashes = append(hashes, hash) + if len(paths) >= cap { + break + } + } + // Group requests by account hash + paths, hashes, _, pathsets = sortByAccountPath(paths, hashes) + req := &trienodeHealRequest{ + peer: idle, + id: reqid, + time: time.Now(), + deliver: success, + revert: fail, + cancel: cancel, + stale: make(chan struct{}), + paths: paths, + hashes: hashes, + task: s.healer, + } + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { + peer.Log().Debug("Trienode heal request timed out", "reqid", reqid) + s.rates.Update(idle, TrieNodesMsg, 0, 0) + s.scheduleRevertTrienodeHealRequest(req) + }) + s.trienodeHealReqs[reqid] = req + delete(s.trienodeHealIdlers, idle) + + s.pend.Add(1) + go func(root common.Hash) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil { + log.Debug("Failed to request trienode healers", "err", err) + s.scheduleRevertTrienodeHealRequest(req) + } + }(s.root) + } +} + +// assignBytecodeHealTasks attempts to match idle peers to bytecode requests to +// heal any trie errors caused by the snap sync's chunked retrieval model. +func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fail chan *bytecodeHealRequest, cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // Sort the peers by download capacity to use faster ones if many available + idlers := &capacitySort{ + ids: make([]string, 0, len(s.bytecodeHealIdlers)), + caps: make([]int, 0, len(s.bytecodeHealIdlers)), + } + targetTTL := s.rates.TargetTimeout() + for id := range s.bytecodeHealIdlers { + if _, ok := s.statelessPeers[id]; ok { + continue + } + idlers.ids = append(idlers.ids, id) + idlers.caps = append(idlers.caps, s.rates.Capacity(id, ByteCodesMsg, targetTTL)) + } + if len(idlers.ids) == 0 { + return + } + sort.Sort(sort.Reverse(idlers)) + + // Iterate over pending tasks and try to find a peer to retrieve with + for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 { + // If there are not enough trie tasks queued to fully assign, fill the + // queue from the state sync scheduler. The trie synced schedules these + // together with trie nodes, so we need to queue them combined. + var ( + have = len(s.healer.trieTasks) + len(s.healer.codeTasks) + want = maxTrieRequestCount + maxCodeRequestCount + ) + if have < want { + paths, hashes, codes := s.healer.scheduler.Missing(want - have) + for i, path := range paths { + s.healer.trieTasks[path] = hashes[i] + } + for _, hash := range codes { + s.healer.codeTasks[hash] = struct{}{} + } + } + // If all the heal tasks are trienodes or already downloading, bail + if len(s.healer.codeTasks) == 0 { + return + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + if len(idlers.ids) == 0 { + return + } + var ( + idle = idlers.ids[0] + peer = s.peers[idle] + cap = idlers.caps[0] + ) + idlers.ids, idlers.caps = idlers.ids[1:], idlers.caps[1:] + + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.bytecodeHealReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + if cap > maxCodeRequestCount { + cap = maxCodeRequestCount + } + hashes := make([]common.Hash, 0, cap) + for hash := range s.healer.codeTasks { + delete(s.healer.codeTasks, hash) + + hashes = append(hashes, hash) + if len(hashes) >= cap { + break + } + } + req := &bytecodeHealRequest{ + peer: idle, + id: reqid, + time: time.Now(), + deliver: success, + revert: fail, + cancel: cancel, + stale: make(chan struct{}), + hashes: hashes, + task: s.healer, + } + req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() { + peer.Log().Debug("Bytecode heal request timed out", "reqid", reqid) + s.rates.Update(idle, ByteCodesMsg, 0, 0) + s.scheduleRevertBytecodeHealRequest(req) + }) + s.bytecodeHealReqs[reqid] = req + delete(s.bytecodeHealIdlers, idle) + + s.pend.Add(1) + go func() { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil { + log.Debug("Failed to request bytecode healers", "err", err) + s.scheduleRevertBytecodeHealRequest(req) + } + }() + } +} + +// revertRequests locates all the currently pending requests from a particular +// peer and reverts them, rescheduling for others to fulfill. +func (s *Syncer) revertRequests(peer string) { + // Gather the requests first, revertals need the lock too + s.lock.Lock() + var accountReqs []*accountRequest + for _, req := range s.accountReqs { + if req.peer == peer { + accountReqs = append(accountReqs, req) + } + } + var bytecodeReqs []*bytecodeRequest + for _, req := range s.bytecodeReqs { + if req.peer == peer { + bytecodeReqs = append(bytecodeReqs, req) + } + } + var storageReqs []*storageRequest + for _, req := range s.storageReqs { + if req.peer == peer { + storageReqs = append(storageReqs, req) + } + } + var trienodeHealReqs []*trienodeHealRequest + for _, req := range s.trienodeHealReqs { + if req.peer == peer { + trienodeHealReqs = append(trienodeHealReqs, req) + } + } + var bytecodeHealReqs []*bytecodeHealRequest + for _, req := range s.bytecodeHealReqs { + if req.peer == peer { + bytecodeHealReqs = append(bytecodeHealReqs, req) + } + } + s.lock.Unlock() + + // Revert all the requests matching the peer + for _, req := range accountReqs { + s.revertAccountRequest(req) + } + for _, req := range bytecodeReqs { + s.revertBytecodeRequest(req) + } + for _, req := range storageReqs { + s.revertStorageRequest(req) + } + for _, req := range trienodeHealReqs { + s.revertTrienodeHealRequest(req) + } + for _, req := range bytecodeHealReqs { + s.revertBytecodeHealRequest(req) + } +} + +// scheduleRevertAccountRequest asks the event loop to clean up an account range +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) { + select { + case req.revert <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertAccountRequest cleans up an account range request and returns all failed +// retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertAccountRequest. +func (s *Syncer) revertAccountRequest(req *accountRequest) { + log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id) + select { + case <-req.stale: + log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.accountReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the account + // task as not-pending, ready for rescheduling + req.timeout.Stop() + if req.task.req == req { + req.task.req = nil + } +} + +// scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request +// and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) { + select { + case req.revert <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertBytecodeRequest cleans up a bytecode request and returns all failed +// retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertBytecodeRequest. +func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) { + log.Debug("Reverting bytecode request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.bytecodeReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the code + // retrievals as not-pending, ready for rescheduling + req.timeout.Stop() + for _, hash := range req.hashes { + req.task.codeTasks[hash] = struct{}{} + } +} + +// scheduleRevertStorageRequest asks the event loop to clean up a storage range +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) { + select { + case req.revert <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertStorageRequest cleans up a storage range request and returns all failed +// retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertStorageRequest. +func (s *Syncer) revertStorageRequest(req *storageRequest) { + log.Debug("Reverting storage request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.storageReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the storage + // task as not-pending, ready for rescheduling + req.timeout.Stop() + if req.subTask != nil { + req.subTask.req = nil + } else { + for i, account := range req.accounts { + req.mainTask.stateTasks[account] = req.roots[i] + } + } +} + +// scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) { + select { + case req.revert <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertTrienodeHealRequest cleans up a trienode heal request and returns all +// failed retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertTrienodeHealRequest. +func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) { + log.Debug("Reverting trienode heal request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.trienodeHealReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the trie node + // retrievals as not-pending, ready for rescheduling + req.timeout.Stop() + for i, path := range req.paths { + req.task.trieTasks[path] = req.hashes[i] + } +} + +// scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) { + select { + case req.revert <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertBytecodeHealRequest cleans up a bytecode heal request and returns all +// failed retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertBytecodeHealRequest. +func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) { + log.Debug("Reverting bytecode heal request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.bytecodeHealReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the code + // retrievals as not-pending, ready for rescheduling + req.timeout.Stop() + for _, hash := range req.hashes { + req.task.codeTasks[hash] = struct{}{} + } +} + +// processAccountResponse integrates an already validated account range response +// into the account tasks. +func (s *Syncer) processAccountResponse(res *accountResponse) { + // Switch the task from pending to filling + res.task.req = nil + res.task.res = res + + // Ensure that the response doesn't overflow into the subsequent task + last := res.task.Last.Big() + for i, hash := range res.hashes { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + cmp := hash.Big().Cmp(last) + if cmp == 0 { + res.cont = false + continue + } + if cmp > 0 { + // Chunk overflown, cut off excess + res.hashes = res.hashes[:i] + res.accounts = res.accounts[:i] + res.cont = false // Mark range completed + break + } + } + // Iterate over all the accounts and assemble which ones need further sub- + // filling before the entire account range can be persisted. + res.task.needCode = make([]bool, len(res.accounts)) + res.task.needState = make([]bool, len(res.accounts)) + res.task.needHeal = make([]bool, len(res.accounts)) + + res.task.codeTasks = make(map[common.Hash]struct{}) + res.task.stateTasks = make(map[common.Hash]common.Hash) + + resumed := make(map[common.Hash]struct{}) + + res.task.pend = 0 + for i, account := range res.accounts { + // Check if the account is a contract with an unknown code + if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { + if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) { + res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{} + res.task.needCode[i] = true + res.task.pend++ + } + } + // Check if the account is a contract with an unknown storage trie + if account.Root != types.EmptyRootHash { + //if !ethrawdb.HasTrieNode(s.db, res.hashes[i], nil, account.Root, s.scheme) { + // If there was a previous large state retrieval in progress, + // don't restart it from scratch. This happens if a sync cycle + // is interrupted and resumed later. However, *do* update the + // previous root hash. + if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok { + log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root) + for _, subtask := range subtasks { + subtask.root = account.Root + } + res.task.needHeal[i] = true + resumed[res.hashes[i]] = struct{}{} + } else { + res.task.stateTasks[res.hashes[i]] = account.Root + } + res.task.needState[i] = true + res.task.pend++ + } + //} + } + // Delete any subtasks that have been aborted but not resumed. This may undo + // some progress if a new peer gives us less accounts than an old one, but for + // now we have to live with that. + for hash := range res.task.SubTasks { + if _, ok := resumed[hash]; !ok { + log.Debug("Aborting suspended storage retrieval", "account", hash) + delete(res.task.SubTasks, hash) + } + } + // If the account range contained no contracts, or all have been fully filled + // beforehand, short circuit storage filling and forward to the next task + if res.task.pend == 0 { + s.forwardAccountTask(res.task) + return + } + // Some accounts are incomplete, leave as is for the storage and contract + // task assigners to pick up and fill +} + +// processBytecodeResponse integrates an already validated bytecode response +// into the account tasks. +func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) { + batch := s.db.NewBatch() + + var ( + codes uint64 + ) + for i, hash := range res.hashes { + code := res.codes[i] + + // If the bytecode was not delivered, reschedule it + if code == nil { + res.task.codeTasks[hash] = struct{}{} + continue + } + // Code was delivered, mark it not needed any more + for j, account := range res.task.res.accounts { + if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) { + res.task.needCode[j] = false + res.task.pend-- + } + } + // Push the bytecode into a database batch + codes++ + rawdb.WriteCode(batch, hash, code) + } + bytes := common.StorageSize(batch.ValueSize()) + if err := batch.Write(); err != nil { + log.Crit("Failed to persist bytecodes", "err", err) + } + s.bytecodeSynced += codes + s.bytecodeBytes += bytes + + log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes) + + // If this delivery completed the last pending task, forward the account task + // to the next chunk + if res.task.pend == 0 { + s.forwardAccountTask(res.task) + return + } + // Some accounts are still incomplete, leave as is for the storage and contract + // task assigners to pick up and fill. +} + +// processStorageResponse integrates an already validated storage response +// into the account tasks. +func (s *Syncer) processStorageResponse(res *storageResponse) { + // Switch the subtask from pending to idle + if res.subTask != nil { + res.subTask.req = nil + } + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + var ( + slots int + oldStorageBytes = s.storageBytes + ) + // Iterate over all the accounts and reconstruct their storage tries from the + // delivered slots + for i, account := range res.accounts { + // If the account was not delivered, reschedule it + if i >= len(res.hashes) { + res.mainTask.stateTasks[account] = res.roots[i] + continue + } + // State was delivered, if complete mark as not needed any more, otherwise + // mark the account as needing healing + for j, hash := range res.mainTask.res.hashes { + if account != hash { + continue + } + acc := res.mainTask.res.accounts[j] + + // If the packet contains multiple contract storage slots, all + // but the last are surely complete. The last contract may be + // chunked, so check it's continuation flag. + if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) { + res.mainTask.needState[j] = false + res.mainTask.pend-- + smallStorageGauge.Inc(1) + } + // If the last contract was chunked, mark it as needing healing + // to avoid writing it out to disk prematurely. + if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont { + res.mainTask.needHeal[j] = true + } + // If the last contract was chunked, we need to switch to large + // contract handling mode + if res.subTask == nil && i == len(res.hashes)-1 && res.cont { + // If we haven't yet started a large-contract retrieval, create + // the subtasks for it within the main account task + if tasks, ok := res.mainTask.SubTasks[account]; !ok { + var ( + keys = res.hashes[i] + chunks = uint64(storageConcurrency) + lastKey common.Hash + ) + if len(keys) > 0 { + lastKey = keys[len(keys)-1] + } + // If the number of slots remaining is low, decrease the + // number of chunks. Somewhere on the order of 10-15K slots + // fit into a packet of 500KB. A key/slot pair is maximum 64 + // bytes, so pessimistically maxRequestSize/64 = 8K. + // + // Chunk so that at least 2 packets are needed to fill a task. + if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil { + if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks { + chunks = n + 1 + } + log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "remaining", estimate, "chunks", chunks) + } else { + log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks) + } + r := newHashRange(lastKey, chunks) + if chunks == 1 { + smallStorageGauge.Inc(1) + } else { + largeStorageGauge.Inc(1) + } + // Our first task is the one that was just filled by this response. + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + owner := account // local assignment for stacktrie writer closure + options := trie.NewStackTrieOptions() + options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, owner, path, hash, blob, s.scheme) + }) + if s.scheme == rawdb.PathScheme { + options = options.WithCleaner(func(path []byte) { + s.cleanPath(batch, owner, path) + }) + // Keep the left boundary as it's the first range. + // Skip the right boundary if it's not the last range. + options = options.WithSkipBoundary(false, r.End() != common.MaxHash, boundaryStorageNodesGauge) + } + tasks = append(tasks, &storageTask{ + Next: common.Hash{}, + Last: r.End(), + root: acc.Root, + genBatch: batch, + genTrie: trie.NewStackTrie(options), + }) + for r.Next() { + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + options := trie.NewStackTrieOptions() + options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, owner, path, hash, blob, s.scheme) + }) + if s.scheme == rawdb.PathScheme { + // Configure the dangling node cleaner and also filter out boundary nodes + // only in the context of the path scheme. Deletion is forbidden in the + // hash scheme, as it can disrupt state completeness. + options = options.WithCleaner(func(path []byte) { + s.cleanPath(batch, owner, path) + }) + // Skip the left boundary as it's not the first range + // Skip the right boundary if it's not the last range. + options = options.WithSkipBoundary(true, r.End() != common.MaxHash, boundaryStorageNodesGauge) + } + tasks = append(tasks, &storageTask{ + Next: r.Start(), + Last: r.End(), + root: acc.Root, + genBatch: batch, + genTrie: trie.NewStackTrie(options), + }) + } + for _, task := range tasks { + log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", task.Next, "last", task.Last) + } + res.mainTask.SubTasks[account] = tasks + + // Since we've just created the sub-tasks, this response + // is surely for the first one (zero origin) + res.subTask = tasks[0] + } + } + // If we're in large contract delivery mode, forward the subtask + if res.subTask != nil { + // Ensure the response doesn't overflow into the subsequent task + last := res.subTask.Last.Big() + // Find the first overflowing key. While at it, mark res as complete + // if we find the range to include or pass the 'last' + index := sort.Search(len(res.hashes[i]), func(k int) bool { + cmp := res.hashes[i][k].Big().Cmp(last) + if cmp >= 0 { + res.cont = false + } + return cmp > 0 + }) + if index >= 0 { + // cut off excess + res.hashes[i] = res.hashes[i][:index] + res.slots[i] = res.slots[i][:index] + } + // Forward the relevant storage chunk (even if created just now) + if res.cont { + res.subTask.Next = incHash(res.hashes[i][len(res.hashes[i])-1]) + } else { + res.subTask.done = true + } + } + } + // Iterate over all the complete contracts, reconstruct the trie nodes and + // push them to disk. If the contract is chunked, the trie nodes will be + // reconstructed later. + slots += len(res.hashes[i]) + + if i < len(res.hashes)-1 || res.subTask == nil { + // no need to make local reassignment of account: this closure does not outlive the loop + options := trie.NewStackTrieOptions() + options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, account, path, hash, blob, s.scheme) + }) + if s.scheme == rawdb.PathScheme { + // Configure the dangling node cleaner only in the context of the + // path scheme. Deletion is forbidden in the hash scheme, as it can + // disrupt state completeness. + // + // Notably, boundary nodes can be also kept because the whole storage + // trie is complete. + options = options.WithCleaner(func(path []byte) { + s.cleanPath(batch, account, path) + }) + } + tr := trie.NewStackTrie(options) + for j := 0; j < len(res.hashes[i]); j++ { + tr.Update(res.hashes[i][j][:], res.slots[i][j]) + } + tr.Commit() + } + // Persist the received storage segments. These flat state maybe + // outdated during the sync, but it can be fixed later during the + // snapshot generation. + for j := 0; j < len(res.hashes[i]); j++ { + rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j]) + + // If we're storing large contracts, generate the trie nodes + // on the fly to not trash the gluing points + if i == len(res.hashes)-1 && res.subTask != nil { + res.subTask.genTrie.Update(res.hashes[i][j][:], res.slots[i][j]) + } + } + } + // Large contracts could have generated new trie nodes, flush them to disk + if res.subTask != nil { + if res.subTask.done { + root := res.subTask.genTrie.Commit() + if err := res.subTask.genBatch.Write(); err != nil { + log.Error("Failed to persist stack slots", "err", err) + } + res.subTask.genBatch.Reset() + + // If the chunk's root is an overflown but full delivery, + // clear the heal request. + accountHash := res.accounts[len(res.accounts)-1] + if root == res.subTask.root && rawdb.HasStorageTrieNode(s.db, accountHash, nil, root) { + for i, account := range res.mainTask.res.hashes { + if account == accountHash { + res.mainTask.needHeal[i] = false + skipStorageHealingGauge.Inc(1) + } + } + } + } + if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize { + if err := res.subTask.genBatch.Write(); err != nil { + log.Error("Failed to persist stack slots", "err", err) + } + res.subTask.genBatch.Reset() + } + } + // Flush anything written just now and update the stats + if err := batch.Write(); err != nil { + log.Crit("Failed to persist storage slots", "err", err) + } + s.storageSynced += uint64(slots) + + log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "bytes", s.storageBytes-oldStorageBytes) + + // If this delivery completed the last pending task, forward the account task + // to the next chunk + if res.mainTask.pend == 0 { + s.forwardAccountTask(res.mainTask) + return + } + // Some accounts are still incomplete, leave as is for the storage and contract + // task assigners to pick up and fill. +} + +// processTrienodeHealResponse integrates an already validated trienode response +// into the healer tasks. +func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) { + var ( + start = time.Now() + fills int + ) + for i, hash := range res.hashes { + node := res.nodes[i] + + // If the trie node was not delivered, reschedule it + if node == nil { + res.task.trieTasks[res.paths[i]] = res.hashes[i] + continue + } + fills++ + + // Push the trie node into the state syncer + s.trienodeHealSynced++ + s.trienodeHealBytes += common.StorageSize(len(node)) + + err := s.healer.scheduler.ProcessNode(trie.NodeSyncResult{Path: res.paths[i], Data: node}) + switch err { + case nil: + case trie.ErrAlreadyProcessed: + s.trienodeHealDups++ + case trie.ErrNotRequested: + s.trienodeHealNops++ + default: + log.Error("Invalid trienode processed", "hash", hash, "err", err) + } + } + s.commitHealer(false) + + // Calculate the processing rate of one filled trie node + rate := float64(fills) / (float64(time.Since(start)) / float64(time.Second)) + + // Update the currently measured trienode queueing and processing throughput. + // + // The processing rate needs to be updated uniformly independent if we've + // processed 1x100 trie nodes or 100x1 to keep the rate consistent even in + // the face of varying network packets. As such, we cannot just measure the + // time it took to process N trie nodes and update once, we need one update + // per trie node. + // + // Naively, that would be: + // + // for i:=0; i time.Second { + // Periodically adjust the trie node throttler + if float64(pending) > 2*s.trienodeHealRate { + s.trienodeHealThrottle *= trienodeHealThrottleIncrease + } else { + s.trienodeHealThrottle /= trienodeHealThrottleDecrease + } + if s.trienodeHealThrottle > maxTrienodeHealThrottle { + s.trienodeHealThrottle = maxTrienodeHealThrottle + } else if s.trienodeHealThrottle < minTrienodeHealThrottle { + s.trienodeHealThrottle = minTrienodeHealThrottle + } + s.trienodeHealThrottled = time.Now() + + log.Debug("Updated trie node heal throttler", "rate", s.trienodeHealRate, "pending", pending, "throttle", s.trienodeHealThrottle) + } +} + +func (s *Syncer) commitHealer(force bool) { + if !force && s.healer.scheduler.MemSize() < ethdb.IdealBatchSize { + return + } + batch := s.db.NewBatch() + if err := s.healer.scheduler.Commit(batch); err != nil { + log.Error("Failed to commit healing data", "err", err) + } + if err := batch.Write(); err != nil { + log.Crit("Failed to persist healing data", "err", err) + } + log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize())) +} + +// processBytecodeHealResponse integrates an already validated bytecode response +// into the healer tasks. +func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) { + for i, hash := range res.hashes { + node := res.codes[i] + + // If the trie node was not delivered, reschedule it + if node == nil { + res.task.codeTasks[hash] = struct{}{} + continue + } + // Push the trie node into the state syncer + s.bytecodeHealSynced++ + s.bytecodeHealBytes += common.StorageSize(len(node)) + + err := s.healer.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node}) + switch err { + case nil: + case trie.ErrAlreadyProcessed: + s.bytecodeHealDups++ + case trie.ErrNotRequested: + s.bytecodeHealNops++ + default: + log.Error("Invalid bytecode processed", "hash", hash, "err", err) + } + } + s.commitHealer(false) +} + +// forwardAccountTask takes a filled account task and persists anything available +// into the database, after which it forwards the next account marker so that the +// task's next chunk may be filled. +func (s *Syncer) forwardAccountTask(task *accountTask) { + // Remove any pending delivery + res := task.res + if res == nil { + return // nothing to forward + } + task.res = nil + + // Persist the received account segments. These flat state maybe + // outdated during the sync, but it can be fixed later during the + // snapshot generation. + oldAccountBytes := s.accountBytes + + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.accountBytes += common.StorageSize(len(key) + len(value)) + }, + } + for i, hash := range res.hashes { + if task.needCode[i] || task.needState[i] { + break + } + slim := types.SlimAccountRLP(*res.accounts[i]) + rawdb.WriteAccountSnapshot(batch, hash, slim) + + // If the task is complete, drop it into the stack trie to generate + // account trie nodes for it + if !task.needHeal[i] { + full, err := types.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted + if err != nil { + panic(err) // Really shouldn't ever happen + } + task.genTrie.Update(hash[:], full) + } + } + // Flush anything written just now and update the stats + if err := batch.Write(); err != nil { + log.Crit("Failed to persist accounts", "err", err) + } + s.accountSynced += uint64(len(res.accounts)) + + // Task filling persisted, push it the chunk marker forward to the first + // account still missing data. + for i, hash := range res.hashes { + if task.needCode[i] || task.needState[i] { + return + } + task.Next = incHash(hash) + } + // All accounts marked as complete, track if the entire task is done + task.done = !res.cont + + // Stack trie could have generated trie nodes, push them to disk (we need to + // flush after finalizing task.done. It's fine even if we crash and lose this + // write as it will only cause more data to be downloaded during heal. + if task.done { + task.genTrie.Commit() + } + if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done { + if err := task.genBatch.Write(); err != nil { + log.Error("Failed to persist stack account", "err", err) + } + task.genBatch.Reset() + } + log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes) +} + +// OnAccounts is a callback method to invoke when a range of accounts are +// received from a remote peer. +func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error { + size := common.StorageSize(len(hashes) * common.HashLength) + for _, account := range accounts { + size += common.StorageSize(len(account)) + } + for _, node := range proof { + size += common.StorageSize(len(node)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + defer func() { + s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.peers[peer.ID()]; ok { + s.accountIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + }() + s.lock.Lock() + // Ensure the response is for a valid request + req, ok := s.accountReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected account range packet") + s.lock.Unlock() + return nil + } + delete(s.accountReqs, id) + s.rates.Update(peer.ID(), AccountRangeMsg, time.Since(req.time), int(size)) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For account range queries that means the state being + // retrieved was either already pruned remotely, or the peer is not yet + // synced to our head. + if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 { + logger.Debug("Peer rejected account range request", "root", s.root) + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertAccountRequest(req) + return nil + } + root := s.root + s.lock.Unlock() + + // Reconstruct a partial trie from the response and verify it + keys := make([][]byte, len(hashes)) + for i, key := range hashes { + keys[i] = common.CopyBytes(key[:]) + } + nodes := make(trienode.ProofList, len(proof)) + for i, node := range proof { + nodes[i] = node + } + cont, err := trie.VerifyRangeProof(root, req.origin[:], keys, accounts, nodes.Set()) + if err != nil { + logger.Warn("Account range failed proof", "err", err) + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertAccountRequest(req) + return err + } + accs := make([]*types.StateAccount, len(accounts)) + for i, account := range accounts { + acc := new(types.StateAccount) + if err := rlp.DecodeBytes(account, acc); err != nil { + panic(err) // We created these blobs, we must be able to decode them + } + accs[i] = acc + } + response := &accountResponse{ + task: req.task, + hashes: hashes, + accounts: accs, + cont: cont, + } + select { + case req.deliver <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// OnByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer. +func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error { + s.lock.RLock() + syncing := !s.snapped + s.lock.RUnlock() + + if syncing { + return s.onByteCodes(peer, id, bytecodes) + } + return s.onHealByteCodes(peer, id, bytecodes) +} + +// onByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer in the syncing phase. +func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error { + var size common.StorageSize + for _, code := range bytecodes { + size += common.StorageSize(len(code)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + defer func() { + s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.peers[peer.ID()]; ok { + s.bytecodeIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + }() + s.lock.Lock() + // Ensure the response is for a valid request + req, ok := s.bytecodeReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected bytecode packet") + s.lock.Unlock() + return nil + } + delete(s.bytecodeReqs, id) + s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes)) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(bytecodes) == 0 { + logger.Debug("Peer rejected bytecode request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeRequest(req) + return nil + } + s.lock.Unlock() + + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + + codes := make([][]byte, len(req.hashes)) + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) { + j++ + } + if j < len(req.hashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i) + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeRequest(req) + return errors.New("unexpected bytecode") + } + // Response validated, send it to the scheduler for filling + response := &bytecodeResponse{ + task: req.task, + hashes: req.hashes, + codes: codes, + } + select { + case req.deliver <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// OnStorage is a callback method to invoke when ranges of storage slots +// are received from a remote peer. +func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error { + // Gather some trace stats to aid in debugging issues + var ( + hashCount int + slotCount int + size common.StorageSize + ) + for _, hashset := range hashes { + size += common.StorageSize(common.HashLength * len(hashset)) + hashCount += len(hashset) + } + for _, slotset := range slots { + for _, slot := range slotset { + size += common.StorageSize(len(slot)) + } + slotCount += len(slotset) + } + for _, node := range proof { + size += common.StorageSize(len(node)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + defer func() { + s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.peers[peer.ID()]; ok { + s.storageIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + }() + s.lock.Lock() + // Ensure the response is for a valid request + req, ok := s.storageReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected storage ranges packet") + s.lock.Unlock() + return nil + } + delete(s.storageReqs, id) + s.rates.Update(peer.ID(), StorageRangesMsg, time.Since(req.time), int(size)) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Reject the response if the hash sets and slot sets don't match, or if the + // peer sent more data than requested. + if len(hashes) != len(slots) { + s.lock.Unlock() + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots)) + return errors.New("hash and slot set size mismatch") + } + if len(hashes) > len(req.accounts) { + s.lock.Unlock() + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts)) + return errors.New("hash set larger than requested") + } + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For storage range queries that means the state being + // retrieved was either already pruned remotely, or the peer is not yet + // synced to our head. + if len(hashes) == 0 && len(proof) == 0 { + logger.Debug("Peer rejected storage request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + s.scheduleRevertStorageRequest(req) // reschedule request + return nil + } + s.lock.Unlock() + + // Reconstruct the partial tries from the response and verify them + var cont bool + + // If a proof was attached while the response is empty, it indicates that the + // requested range specified with 'origin' is empty. Construct an empty state + // response locally to finalize the range. + if len(hashes) == 0 && len(proof) > 0 { + hashes = append(hashes, []common.Hash{}) + slots = append(slots, [][]byte{}) + } + for i := 0; i < len(hashes); i++ { + // Convert the keys and proofs into an internal format + keys := make([][]byte, len(hashes[i])) + for j, key := range hashes[i] { + keys[j] = common.CopyBytes(key[:]) + } + nodes := make(trienode.ProofList, 0, len(proof)) + if i == len(hashes)-1 { + for _, node := range proof { + nodes = append(nodes, node) + } + } + var err error + if len(nodes) == 0 { + // No proof has been attached, the response must cover the entire key + // space and hash to the origin root. + _, err = trie.VerifyRangeProof(req.roots[i], nil, keys, slots[i], nil) + if err != nil { + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Storage slots failed proof", "err", err) + return err + } + } else { + // A proof was attached, the response is only partial, check that the + // returned data is indeed part of the storage trie + proofdb := nodes.Set() + + cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], keys, slots[i], proofdb) + if err != nil { + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Storage range failed proof", "err", err) + return err + } + } + } + // Partial tries reconstructed, send them to the scheduler for storage filling + response := &storageResponse{ + mainTask: req.mainTask, + subTask: req.subTask, + accounts: req.accounts, + roots: req.roots, + hashes: hashes, + slots: slots, + cont: cont, + } + select { + case req.deliver <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// OnTrieNodes is a callback method to invoke when a batch of trie nodes +// are received from a remote peer. +func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error { + var size common.StorageSize + for _, node := range trienodes { + size += common.StorageSize(len(node)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + defer func() { + s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.peers[peer.ID()]; ok { + s.trienodeHealIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + }() + s.lock.Lock() + // Ensure the response is for a valid request + req, ok := s.trienodeHealReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected trienode heal packet") + s.lock.Unlock() + return nil + } + delete(s.trienodeHealReqs, id) + s.rates.Update(peer.ID(), TrieNodesMsg, time.Since(req.time), len(trienodes)) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(trienodes) == 0 { + logger.Debug("Peer rejected trienode heal request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertTrienodeHealRequest(req) + return nil + } + s.lock.Unlock() + + // Cross reference the requested trienodes with the response to find gaps + // that the serving node is missing + var ( + hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash = make([]byte, 32) + nodes = make([][]byte, len(req.hashes)) + fills uint64 + ) + for i, j := 0, 0; i < len(trienodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(trienodes[i]) + hasher.Read(hash) + + for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) { + j++ + } + if j < len(req.hashes) { + nodes[j] = trienodes[i] + fills++ + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i) + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertTrienodeHealRequest(req) + return errors.New("unexpected healing trienode") + } + // Response validated, send it to the scheduler for filling + s.trienodeHealPend.Add(fills) + defer func() { + s.trienodeHealPend.Add(^(fills - 1)) + }() + response := &trienodeHealResponse{ + paths: req.paths, + task: req.task, + hashes: req.hashes, + nodes: nodes, + } + select { + case req.deliver <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// onHealByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer in the healing phase. +func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error { + var size common.StorageSize + for _, code := range bytecodes { + size += common.StorageSize(len(code)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + defer func() { + s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.peers[peer.ID()]; ok { + s.bytecodeHealIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + }() + s.lock.Lock() + // Ensure the response is for a valid request + req, ok := s.bytecodeHealReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected bytecode heal packet") + s.lock.Unlock() + return nil + } + delete(s.bytecodeHealReqs, id) + s.rates.Update(peer.ID(), ByteCodesMsg, time.Since(req.time), len(bytecodes)) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(bytecodes) == 0 { + logger.Debug("Peer rejected bytecode heal request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeHealRequest(req) + return nil + } + s.lock.Unlock() + + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + + codes := make([][]byte, len(req.hashes)) + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) { + j++ + } + if j < len(req.hashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i) + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeHealRequest(req) + return errors.New("unexpected healing bytecode") + } + // Response validated, send it to the scheduler for filling + response := &bytecodeHealResponse{ + task: req.task, + hashes: req.hashes, + codes: codes, + } + select { + case req.deliver <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// onHealState is a callback method to invoke when a flat state(account +// or storage slot) is downloaded during the healing stage. The flat states +// can be persisted blindly and can be fixed later in the generation stage. +// Note it's not concurrent safe, please handle the concurrent issue outside. +func (s *Syncer) onHealState(paths [][]byte, value []byte) error { + if len(paths) == 1 { + var account types.StateAccount + if err := rlp.DecodeBytes(value, &account); err != nil { + return nil // Returning the error here would drop the remote peer + } + blob := types.SlimAccountRLP(account) + rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob) + s.accountHealed += 1 + s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob)) + } + if len(paths) == 2 { + rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value) + s.storageHealed += 1 + s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value)) + } + if s.stateWriter.ValueSize() > ethdb.IdealBatchSize { + s.stateWriter.Write() // It's fine to ignore the error here + s.stateWriter.Reset() + } + return nil +} + +// hashSpace is the total size of the 256 bit hash space for accounts. +var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil) + +// report calculates various status reports and provides it to the user. +func (s *Syncer) report(force bool) { + if len(s.tasks) > 0 { + s.reportSyncProgress(force) + return + } + s.reportHealProgress(force) +} + +// reportSyncProgress calculates various status reports and provides it to the user. +func (s *Syncer) reportSyncProgress(force bool) { + // Don't report all the events, just occasionally + if !force && time.Since(s.logTime) < 8*time.Second { + return + } + // Don't report anything until we have a meaningful progress + synced := s.accountBytes + s.bytecodeBytes + s.storageBytes + if synced == 0 { + return + } + accountGaps := new(big.Int) + for _, task := range s.tasks { + accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big())) + } + accountFills := new(big.Int).Sub(hashSpace, accountGaps) + if accountFills.BitLen() == 0 { + return + } + s.logTime = time.Now() + estBytes := float64(new(big.Int).Div( + new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace), + accountFills, + ).Uint64()) + // Don't report anything until we have a meaningful progress + if estBytes < 1.0 { + return + } + elapsed := time.Since(s.startTime) + estTime := elapsed / time.Duration(synced) * time.Duration(estBytes) + + // Create a mega progress report + var ( + progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes) + accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountSynced), s.accountBytes.TerminalString()) + storage = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageSynced), s.storageBytes.TerminalString()) + bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeSynced), s.bytecodeBytes.TerminalString()) + ) + log.Info("Syncing: state download in progress", "synced", progress, "state", synced, + "accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed)) +} + +// reportHealProgress calculates various status reports and provides it to the user. +func (s *Syncer) reportHealProgress(force bool) { + // Don't report all the events, just occasionally + if !force && time.Since(s.logTime) < 8*time.Second { + return + } + s.logTime = time.Now() + + // Create a mega progress report + var ( + trienode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.trienodeHealSynced), s.trienodeHealBytes.TerminalString()) + bytecode = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.bytecodeHealSynced), s.bytecodeHealBytes.TerminalString()) + accounts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.accountHealed), s.accountHealedBytes.TerminalString()) + storage = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(s.storageHealed), s.storageHealedBytes.TerminalString()) + ) + log.Info("Syncing: state healing in progress", "accounts", accounts, "slots", storage, + "codes", bytecode, "nodes", trienode, "pending", s.healer.scheduler.Pending()) +} + +// estimateRemainingSlots tries to determine roughly how many slots are left in +// a contract storage, based on the number of keys and the last hash. This method +// assumes that the hashes are lexicographically ordered and evenly distributed. +func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) { + if last == (common.Hash{}) { + return 0, errors.New("last hash empty") + } + space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes))) + space.Div(space, last.Big()) + if !space.IsUint64() { + // Gigantic address space probably due to too few or malicious slots + return 0, errors.New("too few slots for estimation") + } + return space.Uint64() - uint64(hashes), nil +} + +// capacitySort implements the Sort interface, allowing sorting by peer message +// throughput. Note, callers should use sort.Reverse to get the desired effect +// of highest capacity being at the front. +type capacitySort struct { + ids []string + caps []int +} + +func (s *capacitySort) Len() int { + return len(s.ids) +} + +func (s *capacitySort) Less(i, j int) bool { + return s.caps[i] < s.caps[j] +} + +func (s *capacitySort) Swap(i, j int) { + s.ids[i], s.ids[j] = s.ids[j], s.ids[i] + s.caps[i], s.caps[j] = s.caps[j], s.caps[i] +} + +// healRequestSort implements the Sort interface, allowing sorting trienode +// heal requests, which is a prerequisite for merging storage-requests. +type healRequestSort struct { + paths []string + hashes []common.Hash + syncPaths []trie.SyncPath +} + +func (t *healRequestSort) Len() int { + return len(t.hashes) +} + +func (t *healRequestSort) Less(i, j int) bool { + a := t.syncPaths[i] + b := t.syncPaths[j] + switch bytes.Compare(a[0], b[0]) { + case -1: + return true + case 1: + return false + } + // identical first part + if len(a) < len(b) { + return true + } + if len(b) < len(a) { + return false + } + if len(a) == 2 { + return bytes.Compare(a[1], b[1]) < 0 + } + return false +} + +func (t *healRequestSort) Swap(i, j int) { + t.paths[i], t.paths[j] = t.paths[j], t.paths[i] + t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i] + t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i] +} + +// Merge merges the pathsets, so that several storage requests concerning the +// same account are merged into one, to reduce bandwidth. +// OBS: This operation is moot if t has not first been sorted. +func (t *healRequestSort) Merge() []TrieNodePathSet { + var result []TrieNodePathSet + for _, path := range t.syncPaths { + pathset := TrieNodePathSet(path) + if len(path) == 1 { + // It's an account reference. + result = append(result, pathset) + } else { + // It's a storage reference. + end := len(result) - 1 + if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) { + // The account doesn't match last, create a new entry. + result = append(result, pathset) + } else { + // It's the same account as the previous one, add to the storage + // paths of that request. + result[end] = append(result[end], pathset[1]) + } + } + } + return result +} + +// sortByAccountPath takes hashes and paths, and sorts them. After that, it generates +// the TrieNodePaths and merges paths which belongs to the same account path. +func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) { + var syncPaths []trie.SyncPath + for _, path := range paths { + syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path))) + } + n := &healRequestSort{paths, hashes, syncPaths} + sort.Sort(n) + pathsets := n.Merge() + return n.paths, n.hashes, n.syncPaths, pathsets +} diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go new file mode 100644 index 0000000000..056a6bfa1a --- /dev/null +++ b/eth/protocols/snap/sync_test.go @@ -0,0 +1,1971 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "fmt" + "math/big" + mrand "math/rand" + "sync" + "testing" + "time" + + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/triedb/pathdb" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/trie" + "github.com/ava-labs/libevm/trie/testutil" + "github.com/ava-labs/libevm/trie/trienode" + "github.com/ava-labs/libevm/triedb" + "github.com/holiman/uint256" + "golang.org/x/crypto/sha3" + "golang.org/x/exp/slices" +) + +func TestHashing(t *testing.T) { + t.Parallel() + + var bytecodes = make([][]byte, 10) + for i := 0; i < len(bytecodes); i++ { + buf := make([]byte, 100) + rand.Read(buf) + bytecodes[i] = buf + } + var want, got string + var old = func() { + hasher := sha3.NewLegacyKeccak256() + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hash := hasher.Sum(nil) + got = fmt.Sprintf("%v\n%v", got, hash) + } + } + var new = func() { + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + var hash = make([]byte, 32) + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + want = fmt.Sprintf("%v\n%v", want, hash) + } + } + old() + new() + if want != got { + t.Errorf("want\n%v\ngot\n%v\n", want, got) + } +} + +func BenchmarkHashing(b *testing.B) { + var bytecodes = make([][]byte, 10000) + for i := 0; i < len(bytecodes); i++ { + buf := make([]byte, 100) + rand.Read(buf) + bytecodes[i] = buf + } + var old = func() { + hasher := sha3.NewLegacyKeccak256() + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Sum(nil) + } + } + var new = func() { + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + var hash = make([]byte, 32) + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + } + } + b.Run("old", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + old() + } + }) + b.Run("new", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + new() + } + }) +} + +type ( + accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error + storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error + trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error + codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error +) + +type testPeer struct { + id string + test *testing.T + remote *Syncer + logger log.Logger + accountTrie *trie.Trie + accountValues []*kv + storageTries map[common.Hash]*trie.Trie + storageValues map[common.Hash][]*kv + + accountRequestHandler accountHandlerFunc + storageRequestHandler storageHandlerFunc + trieRequestHandler trieHandlerFunc + codeRequestHandler codeHandlerFunc + term func() + + // counters + nAccountRequests int + nStorageRequests int + nBytecodeRequests int + nTrienodeRequests int +} + +func newTestPeer(id string, t *testing.T, term func()) *testPeer { + peer := &testPeer{ + id: id, + test: t, + logger: log.New("id", id), + accountRequestHandler: defaultAccountRequestHandler, + trieRequestHandler: defaultTrieRequestHandler, + storageRequestHandler: defaultStorageRequestHandler, + codeRequestHandler: defaultCodeRequestHandler, + term: term, + } + //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) + //peer.logger.SetHandler(stderrHandler) + return peer +} + +func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) { + t.storageTries = make(map[common.Hash]*trie.Trie) + for root, trie := range tries { + t.storageTries[root] = trie.Copy() + } +} + +func (t *testPeer) ID() string { return t.id } +func (t *testPeer) Log() log.Logger { return t.logger } + +func (t *testPeer) Stats() string { + return fmt.Sprintf(`Account requests: %d +Storage requests: %d +Bytecode requests: %d +Trienode requests: %d +`, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests) +} + +func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { + t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) + t.nAccountRequests++ + go t.accountRequestHandler(t, id, root, origin, limit, bytes) + return nil +} + +func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { + t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) + t.nTrienodeRequests++ + go t.trieRequestHandler(t, id, root, paths, bytes) + return nil +} + +func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { + t.nStorageRequests++ + if len(accounts) == 1 && origin != nil { + t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) + } else { + t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes)) + } + go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes) + return nil +} + +func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { + t.nBytecodeRequests++ + t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) + go t.codeRequestHandler(t, id, hashes, bytes) + return nil +} + +// defaultTrieRequestHandler is a well-behaving handler for trie healing requests +func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { + // Pass the response + var nodes [][]byte + for _, pathset := range paths { + switch len(pathset) { + case 1: + blob, _, err := t.accountTrie.GetNode(pathset[0]) + if err != nil { + t.logger.Info("Error handling req", "error", err) + break + } + nodes = append(nodes, blob) + default: + account := t.storageTries[(common.BytesToHash(pathset[0]))] + for _, path := range pathset[1:] { + blob, _, err := account.GetNode(path) + if err != nil { + t.logger.Info("Error handling req", "error", err) + break + } + nodes = append(nodes, blob) + } + } + } + t.remote.OnTrieNodes(t, requestId, nodes) + return nil +} + +// defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests +func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap) + if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + return err + } + return nil +} + +func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { + var size uint64 + if limit == (common.Hash{}) { + limit = common.MaxHash + } + for _, entry := range t.accountValues { + if size > cap { + break + } + if bytes.Compare(origin[:], entry.k) <= 0 { + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + } + // If we've exceeded the request threshold, abort + if bytes.Compare(entry.k, limit[:]) >= 0 { + break + } + } + // Unless we send the entire trie, we need to supply proofs + // Actually, we need to supply proofs either way! This seems to be an implementation + // quirk in go-ethereum + proof := trienode.NewProofSet() + if err := t.accountTrie.Prove(origin[:], proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := t.accountTrie.Prove(lastK, proof); err != nil { + t.logger.Error("Could not prove last item", "error", err) + } + } + for _, blob := range proof.List() { + proofs = append(proofs, blob) + } + return keys, vals, proofs +} + +// defaultStorageRequestHandler is a well-behaving storage request handler +func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + } + return nil +} + +func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + var bytecodes [][]byte + for _, h := range hashes { + bytecodes = append(bytecodes, getCodeByHash(h)) + } + if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + } + return nil +} + +func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 + for _, account := range accounts { + // The first account might start from a different origin and end sooner + var originHash common.Hash + if len(origin) > 0 { + originHash = common.BytesToHash(origin) + } + var limitHash = common.MaxHash + if len(limit) > 0 { + limitHash = common.BytesToHash(limit) + } + var ( + keys []common.Hash + vals [][]byte + abort bool + ) + for _, entry := range t.storageValues[account] { + if size >= max { + abort = true + break + } + if bytes.Compare(entry.k, originHash[:]) < 0 { + continue + } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + if bytes.Compare(entry.k, limitHash[:]) >= 0 { + break + } + } + if len(keys) > 0 { + hashes = append(hashes, keys) + slots = append(slots, vals) + } + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if originHash != (common.Hash{}) || (abort && len(keys) > 0) { + // If we're aborting, we need to prove the first and last item + // This terminates the response (and thus the loop) + proof := trienode.NewProofSet() + stTrie := t.storageTries[account] + + // Here's a potential gotcha: when constructing the proof, we cannot + // use the 'origin' slice directly, but must use the full 32-byte + // hash form. + if err := stTrie.Prove(originHash[:], proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := stTrie.Prove(lastK, proof); err != nil { + t.logger.Error("Could not prove last item", "error", err) + } + } + for _, blob := range proof.List() { + proofs = append(proofs, blob) + } + break + } + } + return hashes, slots, proofs +} + +// createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always +// supplies the proof for the last account, even if it is 'complete'. +func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var size uint64 + max = max * 3 / 4 + + var origin common.Hash + if len(bOrigin) > 0 { + origin = common.BytesToHash(bOrigin) + } + var exit bool + for i, account := range accounts { + var keys []common.Hash + var vals [][]byte + for _, entry := range t.storageValues[account] { + if bytes.Compare(entry.k, origin[:]) < 0 { + exit = true + } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + if size > max { + exit = true + } + } + if i == len(accounts)-1 { + exit = true + } + hashes = append(hashes, keys) + slots = append(slots, vals) + + if exit { + // If we're aborting, we need to prove the first and last item + // This terminates the response (and thus the loop) + proof := trienode.NewProofSet() + stTrie := t.storageTries[account] + + // Here's a potential gotcha: when constructing the proof, we cannot + // use the 'origin' slice directly, but must use the full 32-byte + // hash form. + if err := stTrie.Prove(origin[:], proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", origin, + "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := stTrie.Prove(lastK, proof); err != nil { + t.logger.Error("Could not prove last item", "error", err) + } + } + for _, blob := range proof.List() { + proofs = append(proofs, blob) + } + break + } + } + return hashes, slots, proofs +} + +// emptyRequestAccountRangeFn is a rejects AccountRangeRequests +func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + t.remote.OnAccounts(t, requestId, nil, nil, nil) + return nil +} + +func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + return nil +} + +func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { + t.remote.OnTrieNodes(t, requestId, nil) + return nil +} + +func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { + return nil +} + +func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + t.remote.OnStorage(t, requestId, nil, nil, nil) + return nil +} + +func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + return nil +} + +func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + } + return nil +} + +//func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { +// var bytecodes [][]byte +// t.remote.OnByteCodes(t, id, bytecodes) +// return nil +//} + +func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + var bytecodes [][]byte + for _, h := range hashes { + // Send back the hashes + bytecodes = append(bytecodes, h[:]) + } + if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + var bytecodes [][]byte + for _, h := range hashes[:1] { + bytecodes = append(bytecodes, getCodeByHash(h)) + } + // Missing bytecode can be retrieved again, no error expected + if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.term() + } + return nil +} + +// starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small +func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) +} + +func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500) +} + +//func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { +// return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) +//} + +func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap) + if len(proofs) > 0 { + proofs = proofs[1:] + } + if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +// corruptStorageRequestHandler doesn't provide good proofs +func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max) + if len(proofs) > 0 { + proofs = proofs[1:] + } + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +// TestSyncBloatedProof tests a scenario where we provide only _one_ value, but +// also ship the entire trie inside the proof. If the attack is successful, +// the remote side does not do any follow-up requests +func TestSyncBloatedProof(t *testing.T) { + t.Parallel() + + testSyncBloatedProof(t, rawdb.HashScheme) + testSyncBloatedProof(t, rawdb.PathScheme) +} + +func testSyncBloatedProof(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) + source := newTestPeer("source", t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + + source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error { + var ( + proofs [][]byte + keys []common.Hash + vals [][]byte + ) + // The values + for _, entry := range t.accountValues { + if bytes.Compare(entry.k, origin[:]) < 0 { + continue + } + if bytes.Compare(entry.k, limit[:]) > 0 { + continue + } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + } + // The proofs + proof := trienode.NewProofSet() + if err := t.accountTrie.Prove(origin[:], proof); err != nil { + t.logger.Error("Could not prove origin", "origin", origin, "error", err) + t.logger.Error("Could not prove origin", "origin", origin, "error", err) + } + // The bloat: add proof of every single element + for _, entry := range t.accountValues { + if err := t.accountTrie.Prove(entry.k, proof); err != nil { + t.logger.Error("Could not prove item", "error", err) + } + } + // And remove one item from the elements + if len(keys) > 2 { + keys = append(keys[:1], keys[2:]...) + vals = append(vals[:1], vals[2:]...) + } + for _, blob := range proof.List() { + proofs = append(proofs, blob) + } + if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + t.term() + // This is actually correct, signal to exit the test successfully + } + return nil + } + syncer := setupSyncer(nodeScheme, source) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil { + t.Fatal("No error returned from incomplete/cancelled sync") + } +} + +func setupSyncer(scheme string, peers ...*testPeer) *Syncer { + stateDb := rawdb.NewMemoryDatabase() + syncer := NewSyncer(stateDb, scheme) + for _, peer := range peers { + syncer.Register(peer) + peer.remote = syncer + } + return syncer +} + +// TestSync tests a basic sync with one peer +func TestSync(t *testing.T) { + t.Parallel() + + testSync(t, rawdb.HashScheme) + testSync(t, rawdb.PathScheme) +} + +func testSync(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + return source + } + syncer := setupSyncer(nodeScheme, mkSource("source")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a +// panic within the prover +func TestSyncTinyTriePanic(t *testing.T) { + t.Parallel() + + testSyncTinyTriePanic(t, rawdb.HashScheme) + testSyncTinyTriePanic(t, rawdb.PathScheme) +} + +func testSyncTinyTriePanic(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + return source + } + syncer := setupSyncer(nodeScheme, mkSource("source")) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestMultiSync tests a basic sync with multiple peers +func TestMultiSync(t *testing.T) { + t.Parallel() + + testMultiSync(t, rawdb.HashScheme) + testMultiSync(t, rawdb.PathScheme) +} + +func testMultiSync(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + return source + } + syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB")) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncWithStorage tests basic sync using accounts + storage + code +func TestSyncWithStorage(t *testing.T) { + t.Parallel() + + testSyncWithStorage(t, rawdb.HashScheme) + testSyncWithStorage(t, rawdb.PathScheme) +} + +func testSyncWithStorage(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + return source + } + syncer := setupSyncer(scheme, mkSource("sourceA")) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all +func TestMultiSyncManyUseless(t *testing.T) { + t.Parallel() + + testMultiSyncManyUseless(t, rawdb.HashScheme) + testMultiSyncManyUseless(t, rawdb.PathScheme) +} + +func testMultiSyncManyUseless(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false) + + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + + if !noAccount { + source.accountRequestHandler = emptyRequestAccountRangeFn + } + if !noStorage { + source.storageRequestHandler = emptyStorageRequestHandler + } + if !noTrieNode { + source.trieRequestHandler = emptyTrieRequestHandler + } + return source + } + + syncer := setupSyncer( + scheme, + mkSource("full", true, true, true), + mkSource("noAccounts", false, true, true), + mkSource("noStorage", true, false, true), + mkSource("noTrie", true, true, false), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all +func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { + t.Parallel() + + testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme) + testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme) +} + +func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false) + + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + + if !noAccount { + source.accountRequestHandler = emptyRequestAccountRangeFn + } + if !noStorage { + source.storageRequestHandler = emptyStorageRequestHandler + } + if !noTrieNode { + source.trieRequestHandler = emptyTrieRequestHandler + } + return source + } + + syncer := setupSyncer( + scheme, + mkSource("full", true, true, true), + mkSource("noAccounts", false, true, true), + mkSource("noStorage", true, false, true), + mkSource("noTrie", true, true, false), + ) + // We're setting the timeout to very low, to increase the chance of the timeout + // being triggered. This was previously a cause of panic, when a response + // arrived simultaneously as a timeout was triggered. + syncer.rates.OverrideTTLLimit = time.Millisecond + + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all +func TestMultiSyncManyUnresponsive(t *testing.T) { + t.Parallel() + + testMultiSyncManyUnresponsive(t, rawdb.HashScheme) + testMultiSyncManyUnresponsive(t, rawdb.PathScheme) +} + +func testMultiSyncManyUnresponsive(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false) + + mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + + if !noAccount { + source.accountRequestHandler = nonResponsiveRequestAccountRangeFn + } + if !noStorage { + source.storageRequestHandler = nonResponsiveStorageRequestHandler + } + if !noTrieNode { + source.trieRequestHandler = nonResponsiveTrieRequestHandler + } + return source + } + + syncer := setupSyncer( + scheme, + mkSource("full", true, true, true), + mkSource("noAccounts", false, true, true), + mkSource("noStorage", true, false, true), + mkSource("noTrie", true, true, false), + ) + // We're setting the timeout to very low, to make the test run a bit faster + syncer.rates.OverrideTTLLimit = time.Millisecond + + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +func checkStall(t *testing.T, term func()) chan struct{} { + testDone := make(chan struct{}) + go func() { + select { + case <-time.After(10 * time.Minute): // TODO(karalabe): Make tests smaller, this is too much + t.Log("Sync stalled") + term() + case <-testDone: + return + } + }() + return testDone +} + +// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the +// account trie has a few boundary elements. +func TestSyncBoundaryAccountTrie(t *testing.T) { + t.Parallel() + + testSyncBoundaryAccountTrie(t, rawdb.HashScheme) + testSyncBoundaryAccountTrie(t, rawdb.PathScheme) +} + +func testSyncBoundaryAccountTrie(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + return source + } + syncer := setupSyncer( + nodeScheme, + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is +// consistently returning very small results +func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { + t.Parallel() + + testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) + + mkSource := func(name string, slow bool) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + + if slow { + source.accountRequestHandler = starvingAccountRequestHandler + } + return source + } + + syncer := setupSyncer( + nodeScheme, + mkSource("nice-a", false), + mkSource("nice-b", false), + mkSource("nice-c", false), + mkSource("capped", true), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver +// code requests properly. +func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { + t.Parallel() + + testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) + + mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.codeRequestHandler = codeFn + return source + } + // One is capped, one is corrupt. If we don't use a capped one, there's a 50% + // chance that the full set of codes requested are sent only to the + // non-corrupt peer, which delivers everything in one go, and makes the + // test moot + syncer := setupSyncer( + nodeScheme, + mkSource("capped", cappedCodeRequestHandler), + mkSource("corrupt", corruptCodeRequestHandler), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { + t.Parallel() + + testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) + + mkSource := func(name string, accFn accountHandlerFunc) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.accountRequestHandler = accFn + return source + } + // One is capped, one is corrupt. If we don't use a capped one, there's a 50% + // chance that the full set of codes requested are sent only to the + // non-corrupt peer, which delivers everything in one go, and makes the + // test moot + syncer := setupSyncer( + nodeScheme, + mkSource("capped", defaultAccountRequestHandler), + mkSource("corrupt", corruptAccountRequestHandler), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes +// one by one +func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { + t.Parallel() + + testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) + + mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.codeRequestHandler = codeFn + return source + } + // Count how many times it's invoked. Remember, there are only 8 unique hashes, + // so it shouldn't be more than that + var counter int + syncer := setupSyncer( + nodeScheme, + mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + counter++ + return cappedCodeRequestHandler(t, id, hashes, max) + }), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + + // There are only 8 unique hashes, and 3K accounts. However, the code + // deduplication is per request batch. If it were a perfect global dedup, + // we would expect only 8 requests. If there were no dedup, there would be + // 3k requests. + // We expect somewhere below 100 requests for these 8 unique hashes. But + // the number can be flaky, so don't limit it so strictly. + if threshold := 100; counter > threshold { + t.Logf("Error, expected < %d invocations, got %d", threshold, counter) + } + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the +// storage trie has a few boundary elements. +func TestSyncBoundaryStorageTrie(t *testing.T) { + t.Parallel() + + testSyncBoundaryStorageTrie(t, rawdb.HashScheme) + testSyncBoundaryStorageTrie(t, rawdb.PathScheme) +} + +func testSyncBoundaryStorageTrie(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + return source + } + syncer := setupSyncer( + scheme, + mkSource("peer-a"), + mkSource("peer-b"), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is +// consistently returning very small results +func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { + t.Parallel() + + testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme) + testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false) + + mkSource := func(name string, slow bool) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + + if slow { + source.storageRequestHandler = starvingStorageRequestHandler + } + return source + } + + syncer := setupSyncer( + scheme, + mkSource("nice-a", false), + mkSource("slow", true), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is +// sometimes sending bad proofs +func TestSyncWithStorageAndCorruptPeer(t *testing.T) { + t.Parallel() + + testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme) + testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false) + + mkSource := func(name string, handler storageHandlerFunc) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + source.storageRequestHandler = handler + return source + } + + syncer := setupSyncer( + scheme, + mkSource("nice-a", defaultStorageRequestHandler), + mkSource("nice-b", defaultStorageRequestHandler), + mkSource("nice-c", defaultStorageRequestHandler), + mkSource("corrupt", corruptStorageRequestHandler), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { + t.Parallel() + + testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme) + testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false) + + mkSource := func(name string, handler storageHandlerFunc) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + source.storageRequestHandler = handler + return source + } + syncer := setupSyncer( + scheme, + mkSource("nice-a", defaultStorageRequestHandler), + mkSource("nice-b", defaultStorageRequestHandler), + mkSource("nice-c", defaultStorageRequestHandler), + mkSource("corrupt", noProofStorageRequestHandler), + ) + done := checkStall(t, term) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncWithStorage tests basic sync using accounts + storage + code, against +// a peer who insists on delivering full storage sets _and_ proofs. This triggered +// an error, where the recipient erroneously clipped the boundary nodes, but +// did not mark the account for healing. +func TestSyncWithStorageMisbehavingProve(t *testing.T) { + t.Parallel() + + testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme) + testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme) +} + +func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + source.setStorageTries(storageTries) + source.storageValues = storageElems + source.storageRequestHandler = proofHappyStorageRequestHandler + return source + } + syncer := setupSyncer(nodeScheme, mkSource("sourceA")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) +} + +// TestSyncWithUnevenStorage tests sync where the storage trie is not even +// and with a few empty ranges. +func TestSyncWithUnevenStorage(t *testing.T) { + t.Parallel() + + testSyncWithUnevenStorage(t, rawdb.HashScheme) + testSyncWithUnevenStorage(t, rawdb.PathScheme) +} + +func testSyncWithUnevenStorage(t *testing.T, scheme string) { + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = accountTrie.Copy() + source.accountValues = accounts + source.setStorageTries(storageTries) + source.storageValues = storageElems + source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode + } + return source + } + syncer := setupSyncer(scheme, mkSource("source")) + if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + verifyTrie(scheme, syncer.db, accountTrie.Hash(), t) +} + +type kv struct { + k, v []byte +} + +func (k *kv) cmp(other *kv) int { + return bytes.Compare(k.k, other.k) +} + +func key32(i uint64) []byte { + key := make([]byte, 32) + binary.LittleEndian.PutUint64(key, i) + return key +} + +var ( + codehashes = []common.Hash{ + crypto.Keccak256Hash([]byte{0}), + crypto.Keccak256Hash([]byte{1}), + crypto.Keccak256Hash([]byte{2}), + crypto.Keccak256Hash([]byte{3}), + crypto.Keccak256Hash([]byte{4}), + crypto.Keccak256Hash([]byte{5}), + crypto.Keccak256Hash([]byte{6}), + crypto.Keccak256Hash([]byte{7}), + } +) + +// getCodeHash returns a pseudo-random code hash +func getCodeHash(i uint64) []byte { + h := codehashes[int(i)%len(codehashes)] + return common.CopyBytes(h[:]) +} + +// getCodeByHash convenience function to lookup the code from the code hash +func getCodeByHash(hash common.Hash) []byte { + if hash == types.EmptyCodeHash { + return nil + } + for i, h := range codehashes { + if h == hash { + return []byte{byte(i)} + } + } + return nil +} + +// makeAccountTrieNoStorage spits out a trie, along with the leafs +func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) { + var ( + db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) + accTrie = trie.NewEmpty(db) + entries []*kv + ) + for i := uint64(1); i <= uint64(n); i++ { + value, _ := rlp.EncodeToBytes(&types.StateAccount{ + Nonce: i, + Balance: uint256.NewInt(i), + Root: types.EmptyRootHash, + CodeHash: getCodeHash(i), + }) + key := key32(i) + elem := &kv{key, value} + accTrie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + } + slices.SortFunc(entries, (*kv).cmp) + + // Commit the state changes into db and re-create the trie + // for accessing later. + root, nodes, _ := accTrie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + + accTrie, _ = trie.New(trie.StateTrieID(root), db) + return db.Scheme(), accTrie, entries +} + +// makeBoundaryAccountTrie constructs an account trie. Instead of filling +// accounts normally, this function will fill a few accounts which have +// boundary hash. +func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) { + var ( + entries []*kv + boundaries []common.Hash + + db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) + accTrie = trie.NewEmpty(db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(int64(accountConcurrency)), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.MaxHash + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary accounts + for i := 0; i < len(boundaries); i++ { + value, _ := rlp.EncodeToBytes(&types.StateAccount{ + Nonce: uint64(0), + Balance: uint256.NewInt(uint64(i)), + Root: types.EmptyRootHash, + CodeHash: getCodeHash(uint64(i)), + }) + elem := &kv{boundaries[i].Bytes(), value} + accTrie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other accounts if required + for i := uint64(1); i <= uint64(n); i++ { + value, _ := rlp.EncodeToBytes(&types.StateAccount{ + Nonce: i, + Balance: uint256.NewInt(i), + Root: types.EmptyRootHash, + CodeHash: getCodeHash(i), + }) + elem := &kv{key32(i), value} + accTrie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + } + slices.SortFunc(entries, (*kv).cmp) + + // Commit the state changes into db and re-create the trie + // for accessing later. + root, nodes, _ := accTrie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + + accTrie, _ = trie.New(trie.StateTrieID(root), db) + return db.Scheme(), accTrie, entries +} + +// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts +// has a unique storage set. +func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { + var ( + db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) + accTrie = trie.NewEmpty(db) + entries []*kv + storageRoots = make(map[common.Hash]common.Hash) + storageTries = make(map[common.Hash]*trie.Trie) + storageEntries = make(map[common.Hash][]*kv) + nodes = trienode.NewMergedNodeSet() + ) + // Create n accounts in the trie + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + codehash := types.EmptyCodeHash.Bytes() + if code { + codehash = getCodeHash(i) + } + // Create a storage trie + stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db) + nodes.Merge(stNodes) + + value, _ := rlp.EncodeToBytes(&types.StateAccount{ + Nonce: i, + Balance: uint256.NewInt(i), + Root: stRoot, + CodeHash: codehash, + }) + elem := &kv{key, value} + accTrie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + + storageRoots[common.BytesToHash(key)] = stRoot + storageEntries[common.BytesToHash(key)] = stEntries + } + slices.SortFunc(entries, (*kv).cmp) + + // Commit account trie + root, set, _ := accTrie.Commit(true) + nodes.Merge(set) + + // Commit gathered dirty nodes into database + db.Update(root, types.EmptyRootHash, 0, nodes, nil) + + // Re-create tries with new root + accTrie, _ = trie.New(trie.StateTrieID(root), db) + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)]) + trie, _ := trie.New(id, db) + storageTries[common.BytesToHash(key)] = trie + } + return db.Scheme(), accTrie, entries, storageTries, storageEntries +} + +// makeAccountTrieWithStorage spits out a trie, along with the leafs +func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { + var ( + db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) + accTrie = trie.NewEmpty(db) + entries []*kv + storageRoots = make(map[common.Hash]common.Hash) + storageTries = make(map[common.Hash]*trie.Trie) + storageEntries = make(map[common.Hash][]*kv) + nodes = trienode.NewMergedNodeSet() + ) + // Create n accounts in the trie + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + codehash := types.EmptyCodeHash.Bytes() + if code { + codehash = getCodeHash(i) + } + // Make a storage trie + var ( + stRoot common.Hash + stNodes *trienode.NodeSet + stEntries []*kv + ) + if boundary { + stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db) + } else if uneven { + stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db) + } else { + stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db) + } + nodes.Merge(stNodes) + + value, _ := rlp.EncodeToBytes(&types.StateAccount{ + Nonce: i, + Balance: uint256.NewInt(i), + Root: stRoot, + CodeHash: codehash, + }) + elem := &kv{key, value} + accTrie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + + // we reuse the same one for all accounts + storageRoots[common.BytesToHash(key)] = stRoot + storageEntries[common.BytesToHash(key)] = stEntries + } + slices.SortFunc(entries, (*kv).cmp) + + // Commit account trie + root, set, _ := accTrie.Commit(true) + nodes.Merge(set) + + // Commit gathered dirty nodes into database + db.Update(root, types.EmptyRootHash, 0, nodes, nil) + + // Re-create tries with new root + accTrie, err := trie.New(trie.StateTrieID(root), db) + if err != nil { + panic(err) + } + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)]) + trie, err := trie.New(id, db) + if err != nil { + panic(err) + } + storageTries[common.BytesToHash(key)] = trie + } + return accTrie, entries, storageTries, storageEntries +} + +// makeStorageTrieWithSeed fills a storage trie with n items, returning the +// not-yet-committed trie and the sorted entries. The seeds can be used to ensure +// that tries are unique. +func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) { + trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db) + var entries []*kv + for i := uint64(1); i <= n; i++ { + // store 'x' at slot 'x' + slotValue := key32(i + seed) + rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) + + slotKey := key32(i) + key := crypto.Keccak256Hash(slotKey[:]) + + elem := &kv{key[:], rlpSlotValue} + trie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + } + slices.SortFunc(entries, (*kv).cmp) + root, nodes, _ := trie.Commit(false) + return root, nodes, entries +} + +// makeBoundaryStorageTrie constructs a storage trie. Instead of filling +// storage slots normally, this function will fill a few slots which have +// boundary hash. +func makeBoundaryStorageTrie(owner common.Hash, n int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) { + var ( + entries []*kv + boundaries []common.Hash + trie, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db) + ) + // Initialize boundaries + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(int64(accountConcurrency)), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + last = common.MaxHash + } + boundaries = append(boundaries, last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + // Fill boundary slots + for i := 0; i < len(boundaries); i++ { + key := boundaries[i] + val := []byte{0xde, 0xad, 0xbe, 0xef} + + elem := &kv{key[:], val} + trie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + } + // Fill other slots if required + for i := uint64(1); i <= uint64(n); i++ { + slotKey := key32(i) + key := crypto.Keccak256Hash(slotKey[:]) + + slotValue := key32(i) + rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) + + elem := &kv{key[:], rlpSlotValue} + trie.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + } + slices.SortFunc(entries, (*kv).cmp) + root, nodes, _ := trie.Commit(false) + return root, nodes, entries +} + +// makeUnevenStorageTrie constructs a storage tries will states distributed in +// different range unevenly. +func makeUnevenStorageTrie(owner common.Hash, slots int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) { + var ( + entries []*kv + tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db) + chosen = make(map[byte]struct{}) + ) + for i := 0; i < 3; i++ { + var n int + for { + n = mrand.Intn(15) // the last range is set empty deliberately + if _, ok := chosen[byte(n)]; ok { + continue + } + chosen[byte(n)] = struct{}{} + break + } + for j := 0; j < slots/3; j++ { + key := append([]byte{byte(n)}, testutil.RandBytes(31)...) + val, _ := rlp.EncodeToBytes(testutil.RandBytes(32)) + + elem := &kv{key, val} + tr.MustUpdate(elem.k, elem.v) + entries = append(entries, elem) + } + } + slices.SortFunc(entries, (*kv).cmp) + root, nodes, _ := tr.Commit(false) + return root, nodes, entries +} + +func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) { + t.Helper() + triedb := triedb.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme)) + accTrie, err := trie.New(trie.StateTrieID(root), triedb) + if err != nil { + t.Fatal(err) + } + accounts, slots := 0, 0 + accIt := trie.NewIterator(accTrie.MustNodeIterator(nil)) + for accIt.Next() { + var acc types.StateAccount + if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { + log.Crit("Invalid account encountered during snapshot creation", "err", err) + } + accounts++ + if acc.Root != types.EmptyRootHash { + id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root) + storeTrie, err := trie.NewStateTrie(id, triedb) + if err != nil { + t.Fatal(err) + } + storeIt := trie.NewIterator(storeTrie.MustNodeIterator(nil)) + for storeIt.Next() { + slots++ + } + if err := storeIt.Err; err != nil { + t.Fatal(err) + } + } + } + if err := accIt.Err; err != nil { + t.Fatal(err) + } + t.Logf("accounts: %d, slots: %d", accounts, slots) +} + +// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing +// state healing +func TestSyncAccountPerformance(t *testing.T) { + //t.Parallel() + + testSyncAccountPerformance(t, rawdb.HashScheme) + testSyncAccountPerformance(t, rawdb.PathScheme) +} + +func testSyncAccountPerformance(t *testing.T, scheme string) { + // Set the account concurrency to 1. This _should_ result in the + // range root to become correct, and there should be no healing needed + defer func(old int) { accountConcurrency = old }(accountConcurrency) + accountConcurrency = 1 + + var ( + once sync.Once + cancel = make(chan struct{}) + term = func() { + once.Do(func() { + close(cancel) + }) + } + ) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, term) + source.accountTrie = sourceAccountTrie.Copy() + source.accountValues = elems + return source + } + src := mkSource("source") + syncer := setupSyncer(nodeScheme, src) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) + // The trie root will always be requested, since it is added when the snap + // sync cycle starts. When popping the queue, we do not look it up again. + // Doing so would bring this number down to zero in this artificial testcase, + // but only add extra IO for no reason in practice. + if have, want := src.nTrienodeRequests, 1; have != want { + fmt.Print(src.Stats()) + t.Errorf("trie node heal requests wrong, want %d, have %d", want, have) + } +} + +func TestSlotEstimation(t *testing.T) { + for i, tc := range []struct { + last common.Hash + count int + want uint64 + }{ + { + // Half the space + common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + 100, + 100, + }, + { + // 1 / 16th + common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + 100, + 1500, + }, + { + // Bit more than 1 / 16th + common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"), + 100, + 1499, + }, + { + // Almost everything + common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"), + 100, + 6, + }, + { + // Almost nothing -- should lead to error + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), + 1, + 0, + }, + { + // Nothing -- should lead to error + common.Hash{}, + 100, + 0, + }, + } { + have, _ := estimateRemainingSlots(tc.count, tc.last) + if want := tc.want; have != want { + t.Errorf("test %d: have %d want %d", i, have, want) + } + } +} + +func newDbConfig(scheme string) *triedb.Config { + if scheme == rawdb.HashScheme { + return &triedb.Config{} + } + return &triedb.Config{DBOverride: pathdb.Defaults.BackendConstructor} +} diff --git a/eth/protocols/snap/tracker.go b/eth/protocols/snap/tracker.go new file mode 100644 index 0000000000..f4b6b779df --- /dev/null +++ b/eth/protocols/snap/tracker.go @@ -0,0 +1,26 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "time" + + "github.com/ava-labs/libevm/p2p/tracker" +) + +// requestTracker is a singleton tracker for request times. +var requestTracker = tracker.New(ProtocolName, time.Minute) diff --git a/eth/state_accessor.go b/eth/state_accessor.go index 37363f4e50..ea5afa3b85 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -172,8 +172,9 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u if err != nil { return nil, nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) } - // Note: In subnet-evm, the state reference is held by passing true to [statedb.Commit]. - // Drop the parent state to prevent accumulating too many nodes in memory. + // Hold the state reference and also drop the parent state + // to prevent accumulating too many nodes in memory. + tdb.Reference(root, common.Hash{}) if parent != (common.Hash{}) { tdb.Dereference(parent) } diff --git a/go.mod b/go.mod index 2cb61bc575..afca046650 100644 --- a/go.mod +++ b/go.mod @@ -4,11 +4,12 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.0-initial-poc.9.0.20241129191047-0a4ba1bb7045 + github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8 github.com/ava-labs/libevm v1.13.14-0.1.0.rc-2 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 + github.com/google/gofuzz v1.2.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.5.0 github.com/hashicorp/go-bexpr v0.1.10 @@ -29,12 +30,13 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.25.7 go.uber.org/goleak v1.3.0 - go.uber.org/mock v0.4.0 - golang.org/x/crypto v0.26.0 + go.uber.org/mock v0.5.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20231127185646-65229373498e - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 golang.org/x/time v0.3.0 + golang.org/x/tools v0.22.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -121,11 +123,10 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect diff --git a/go.sum b/go.sum index 50b81abf9f..2c5b99975c 100644 --- a/go.sum +++ b/go.sum @@ -56,8 +56,8 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.12.0-initial-poc.9.0.20241129191047-0a4ba1bb7045 h1:oOR9jYAlNm3FgWWatjCZIeOslLlmmbnCrqAErQDHngk= -github.com/ava-labs/avalanchego v1.12.0-initial-poc.9.0.20241129191047-0a4ba1bb7045/go.mod h1:tubX8cWRjtVe6s6sm/2xeljfuhEISuJiVzvOiPiXSuI= +github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8 h1:qN3MOBHB//Ynhgt5Vys3iVe42Sr0EWSeN18VL3ecXzE= +github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8/go.mod h1:2B7+E5neLvkOr2zursGhebjU26d4AfB7RazPxBs8hHg= github.com/ava-labs/libevm v1.13.14-0.1.0.rc-2 h1:CVbn0hSsPCl6gCkTCnqwuN4vtJgdVbkCqLXzYAE7qF8= github.com/ava-labs/libevm v1.13.14-0.1.0.rc-2/go.mod h1:yBctIV/wnxXTF38h95943jvpuk4aj07TrjbpoGor6LQ= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= @@ -590,8 +590,8 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= @@ -609,8 +609,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -649,8 +649,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -695,8 +695,8 @@ golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -718,8 +718,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -786,12 +786,12 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -802,8 +802,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -865,8 +865,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/miner/worker.go b/miner/worker.go index 0187a39712..b73190f9e3 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -265,14 +265,15 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte } func (w *worker) createCurrentEnvironment(predicateContext *precompileconfig.PredicateContext, parent *types.Header, header *types.Header, tstart time.Time) (*environment, error) { - state, err := w.chain.StateAt(parent.Root) + currentState, err := w.chain.StateAt(parent.Root) if err != nil { return nil, err } - state.StartPrefetcher("miner", w.eth.BlockChain().CacheConfig().TriePrefetcherParallelism) + numPrefetchers := w.chain.CacheConfig().TriePrefetcherParallelism + currentState.StartPrefetcher("miner", state.WithConcurrentWorkers(numPrefetchers)) return &environment{ signer: types.MakeSigner(w.chainConfig, header.Number, header.Time), - state: state, + state: currentState, parent: parent, header: header, tcount: 0, diff --git a/params/avalanche_params.go b/params/avalanche_params.go index afd7feb74d..93b66eb99a 100644 --- a/params/avalanche_params.go +++ b/params/avalanche_params.go @@ -6,7 +6,6 @@ package params import ( "math/big" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/coreth/predicate" ) @@ -17,8 +16,6 @@ const ( LaunchMinGasPrice int64 = 470 * GWei ApricotPhase1MinGasPrice int64 = 225 * GWei - AvalancheAtomicTxFee = units.MilliAvax - ApricotPhase1GasLimit uint64 = 8_000_000 CortinaGasLimit uint64 = 15_000_000 @@ -37,9 +34,6 @@ const ( // After Durango, the extra data past the dynamic fee rollup window represents predicate results. DynamicFeeExtraDataSize = predicate.DynamicFeeExtraDataSize RollupWindow uint64 = 10 - - // The base cost to charge per atomic transaction. Added in Apricot Phase 5. - AtomicTxBaseCost uint64 = 10_000 ) // The atomic gas limit specifies the maximum amount of gas that can be consumed by the atomic diff --git a/peer/network.go b/peer/network.go index a82de85932..8c5d6d95b7 100644 --- a/peer/network.go +++ b/peer/network.go @@ -56,9 +56,6 @@ type Network interface { // by calling OnPeerConnected for each peer Shutdown() - // SetGossipHandler sets the provided gossip handler as the gossip handler - SetGossipHandler(handler message.GossipHandler) - // SetRequestHandler sets the provided request handler as the request handler SetRequestHandler(handler message.RequestHandler) @@ -73,6 +70,10 @@ type Network interface { NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client // AddHandler registers a server handler for an application protocol AddHandler(protocol uint64, handler p2p.Handler) error + + // AddConnector adds a listener for Connected/Disconnected events. + // When a connector is added it will be called for all currently connected peers. + AddConnector(validators.Connector) error } // network is an implementation of Network that processes message requests for @@ -87,9 +88,9 @@ type network struct { appSender common.AppSender // avalanchego AppSender for sending messages codec codec.Manager // Codec used for parsing messages appRequestHandler message.RequestHandler // maps request type => handler - gossipHandler message.GossipHandler // maps gossip type => handler peers *peerTracker // tracking of peers & bandwidth appStats stats.RequestHandlerStats // Provide request handler metrics + connectors []validators.Connector // List of connectors to notify on Connected/Disconnected events // Set to true when Shutdown is called, after which all operations on this // struct are no-ops. @@ -110,13 +111,28 @@ func NewNetwork(p2pNetwork *p2p.Network, appSender common.AppSender, codec codec outstandingRequestHandlers: make(map[uint32]message.ResponseHandler), activeAppRequests: semaphore.NewWeighted(maxActiveAppRequests), p2pNetwork: p2pNetwork, - gossipHandler: message.NoopMempoolGossipHandler{}, appRequestHandler: message.NoopRequestHandler{}, peers: NewPeerTracker(), appStats: stats.NewRequestHandlerStats(), } } +func (n *network) AddConnector(connector validators.Connector) error { + n.lock.Lock() + defer n.lock.Unlock() + + n.connectors = append(n.connectors, connector) + + // Notify the connector of all currently connected peers + for peerID, peer := range n.peers.peers { + if err := connector.Connected(context.Background(), peerID, peer.version); err != nil { + return fmt.Errorf("failed to notify connector of connected peer %s: %w", peerID, err) + } + } + + return nil +} + // SendAppRequestAny synchronously sends request to an arbitrary peer with a // node version greater than or equal to minVersion. If minVersion is nil, // the request will be sent to any peer regardless of their version. @@ -345,14 +361,7 @@ func (n *network) markRequestFulfilled(requestID uint32) (message.ResponseHandle // from a peer. An error returned by this function is treated as fatal by the // engine. func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { - var gossipMsg message.GossipMessage - if _, err := n.codec.Unmarshal(gossipBytes, &gossipMsg); err != nil { - log.Debug("forwarding AppGossip to SDK network", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) - return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) - } - - log.Debug("processing AppGossip from node", "nodeID", nodeID, "msg", gossipMsg) - return gossipMsg.Handle(n.gossipHandler, nodeID) + return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) } // Connected adds the given nodeID to the peer list so that it can receive messages @@ -371,6 +380,12 @@ func (n *network) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion n.peers.Connected(nodeID, nodeVersion) } + for _, connector := range n.connectors { + if err := connector.Connected(ctx, nodeID, nodeVersion); err != nil { + return fmt.Errorf("failed to notify connector of connected peer %s: %w", nodeID, err) + } + } + return n.p2pNetwork.Connected(ctx, nodeID, nodeVersion) } @@ -389,6 +404,12 @@ func (n *network) Disconnected(ctx context.Context, nodeID ids.NodeID) error { n.peers.Disconnected(nodeID) } + for _, connector := range n.connectors { + if err := connector.Disconnected(ctx, nodeID); err != nil { + return fmt.Errorf("failed to notify connector of disconnected peer %s: %w", nodeID, err) + } + } + return n.p2pNetwork.Disconnected(ctx, nodeID) } @@ -407,13 +428,6 @@ func (n *network) Shutdown() { n.closed.Set(true) // mark network as closed } -func (n *network) SetGossipHandler(handler message.GossipHandler) { - n.lock.Lock() - defer n.lock.Unlock() - - n.gossipHandler = handler -} - func (n *network) SetRequestHandler(handler message.RequestHandler) { n.lock.Lock() defer n.lock.Unlock() diff --git a/peer/network_test.go b/peer/network_test.go index c792cf9064..6f357be874 100644 --- a/peer/network_test.go +++ b/peer/network_test.go @@ -30,6 +30,10 @@ import ( "github.com/ava-labs/avalanchego/version" ) +const ( + codecVersion uint16 = 0 +) + var ( defaultPeerVersion = &version.Application{ Major: 1, @@ -46,9 +50,7 @@ var ( _ message.RequestHandler = &HelloGreetingRequestHandler{} _ message.RequestHandler = &testRequestHandler{} - _ common.AppSender = testAppSender{} - _ message.GossipMessage = HelloGossip{} - _ message.GossipHandler = &testGossipHandler{} + _ common.AppSender = testAppSender{} _ p2p.Handler = &testSDKHandler{} ) @@ -393,7 +395,7 @@ func TestRequestMinVersion(t *testing.T) { go func() { time.Sleep(200 * time.Millisecond) atomic.AddUint32(&callNum, 1) - responseBytes, err := codecManager.Marshal(message.Version, TestMessage{Message: "this is a response"}) + responseBytes, err := codecManager.Marshal(codecVersion, TestMessage{Message: "this is a response"}) if err != nil { panic(err) } @@ -503,7 +505,6 @@ func TestHandleInvalidMessages(t *testing.T) { p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") require.NoError(t, err) clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, ids.EmptyNodeID, 1) - clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{}) assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -511,7 +512,8 @@ func TestHandleInvalidMessages(t *testing.T) { defer clientNetwork.Shutdown() // Ensure a valid gossip message sent as any App specific message type does not trigger a fatal error - gossipMsg, err := buildGossip(codecManager, HelloGossip{Msg: "hello there!"}) + marshaller := helloGossipMarshaller{codec: codecManager} + gossipMsg, err := marshaller.MarshalGossip(&HelloGossip{Msg: "hello there!"}) assert.NoError(t, err) // Ensure a valid request message sent as any App specific message type does not trigger a fatal error @@ -552,7 +554,6 @@ func TestNetworkPropagatesRequestHandlerError(t *testing.T) { p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") require.NoError(t, err) clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, ids.EmptyNodeID, 1) - clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{err: errors.New("fail")}) // Return an error from the request handler assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -615,18 +616,14 @@ func buildCodec(t *testing.T, types ...interface{}) codec.Manager { for _, typ := range types { assert.NoError(t, c.RegisterType(typ)) } - assert.NoError(t, codecManager.RegisterCodec(message.Version, c)) + assert.NoError(t, codecManager.RegisterCodec(codecVersion, c)) return codecManager } // marshalStruct is a helper method used to marshal an object as `interface{}` // so that the codec is able to include the TypeID in the resulting bytes func marshalStruct(codec codec.Manager, obj interface{}) ([]byte, error) { - return codec.Marshal(message.Version, &obj) -} - -func buildGossip(codec codec.Manager, msg message.GossipMessage) ([]byte, error) { - return codec.Marshal(message.Version, &msg) + return codec.Marshal(codecVersion, &obj) } type testAppSender struct { @@ -696,11 +693,11 @@ type HelloGreetingRequestHandler struct { } func (h *HelloGreetingRequestHandler) HandleHelloRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request *HelloRequest) ([]byte, error) { - return h.codec.Marshal(message.Version, HelloResponse{Response: "Hi"}) + return h.codec.Marshal(codecVersion, HelloResponse{Response: "Hi"}) } func (h *HelloGreetingRequestHandler) HandleGreetingRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request *GreetingRequest) ([]byte, error) { - return h.codec.Marshal(message.Version, GreetingResponse{Greet: "Hey there"}) + return h.codec.Marshal(codecVersion, GreetingResponse{Greet: "Hey there"}) } type TestMessage struct { @@ -719,34 +716,22 @@ type HelloGossip struct { Msg string `serialize:"true"` } -func (h HelloGossip) Handle(handler message.GossipHandler, nodeID ids.NodeID) error { - return handler.HandleEthTxs(nodeID, message.EthTxsGossip{}) +func (tx *HelloGossip) GossipID() ids.ID { + return ids.FromStringOrPanic(tx.Msg) } -func (h HelloGossip) String() string { - return fmt.Sprintf("HelloGossip(%s)", h.Msg) -} - -func (h HelloGossip) Bytes() []byte { - // no op - return nil -} - -type testGossipHandler struct { - received bool - nodeID ids.NodeID +type helloGossipMarshaller struct { + codec codec.Manager } -func (t *testGossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGossip) error { - t.received = true - t.nodeID = nodeID - return nil +func (g helloGossipMarshaller) MarshalGossip(tx *HelloGossip) ([]byte, error) { + return g.codec.Marshal(0, tx) } -func (t *testGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { - t.received = true - t.nodeID = nodeID - return nil +func (g helloGossipMarshaller) UnmarshalGossip(bytes []byte) (*HelloGossip, error) { + h := &HelloGossip{} + _, err := g.codec.Unmarshal(bytes, h) + return h, err } type testRequestHandler struct { diff --git a/plugin/evm/admin.go b/plugin/evm/admin.go index 746b6d8250..bd3308e7ee 100644 --- a/plugin/evm/admin.go +++ b/plugin/evm/admin.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/utils/profiler" + "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ava-labs/libevm/log" ) @@ -65,11 +66,7 @@ func (p *Admin) LockProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) err return p.profiler.LockProfile() } -type SetLogLevelArgs struct { - Level string `json:"level"` -} - -func (p *Admin) SetLogLevel(_ *http.Request, args *SetLogLevelArgs, reply *api.EmptyReply) error { +func (p *Admin) SetLogLevel(_ *http.Request, args *client.SetLogLevelArgs, reply *api.EmptyReply) error { log.Info("EVM: SetLogLevel called", "logLevel", args.Level) p.vm.ctx.Lock.Lock() @@ -81,11 +78,7 @@ func (p *Admin) SetLogLevel(_ *http.Request, args *SetLogLevelArgs, reply *api.E return nil } -type ConfigReply struct { - Config *Config `json:"config"` -} - -func (p *Admin) GetVMConfig(_ *http.Request, _ *struct{}, reply *ConfigReply) error { +func (p *Admin) GetVMConfig(_ *http.Request, _ *struct{}, reply *client.ConfigReply) error { reply.Config = &p.vm.config return nil } diff --git a/plugin/evm/service.go b/plugin/evm/api.go similarity index 79% rename from plugin/evm/service.go rename to plugin/evm/api.go index e043c10c77..d9a36a787f 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/api.go @@ -12,11 +12,12 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/hexutil" "github.com/ava-labs/libevm/log" @@ -91,24 +92,11 @@ func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionRepl return nil } -// ExportKeyArgs are arguments for ExportKey -type ExportKeyArgs struct { - api.UserPass - Address string `json:"address"` -} - -// ExportKeyReply is the response for ExportKey -type ExportKeyReply struct { - // The decrypted PrivateKey for the Address provided in the arguments - PrivateKey *secp256k1.PrivateKey `json:"privateKey"` - PrivateKeyHex string `json:"privateKeyHex"` -} - // ExportKey returns a private key from the provided user -func (service *AvaxAPI) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { +func (service *AvaxAPI) ExportKey(r *http.Request, args *client.ExportKeyArgs, reply *client.ExportKeyReply) error { log.Info("EVM: ExportKey called") - address, err := ParseEthAddress(args.Address) + address, err := client.ParseEthAddress(args.Address) if err != nil { return fmt.Errorf("couldn't parse %s to address: %s", args.Address, err) } @@ -131,21 +119,15 @@ func (service *AvaxAPI) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E return nil } -// ImportKeyArgs are arguments for ImportKey -type ImportKeyArgs struct { - api.UserPass - PrivateKey *secp256k1.PrivateKey `json:"privateKey"` -} - // ImportKey adds a private key to the provided user -func (service *AvaxAPI) ImportKey(r *http.Request, args *ImportKeyArgs, reply *api.JSONAddress) error { +func (service *AvaxAPI) ImportKey(r *http.Request, args *client.ImportKeyArgs, reply *api.JSONAddress) error { log.Info("EVM: ImportKey called", "username", args.Username) if args.PrivateKey == nil { return errMissingPrivateKey } - reply.Address = GetEthAddress(args.PrivateKey).Hex() + reply.Address = args.PrivateKey.EthAddress().Hex() service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() @@ -163,28 +145,14 @@ func (service *AvaxAPI) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a return nil } -// ImportArgs are arguments for passing into Import requests -type ImportArgs struct { - api.UserPass - - // Fee that should be used when creating the tx - BaseFee *hexutil.Big `json:"baseFee"` - - // Chain the funds are coming from - SourceChain string `json:"sourceChain"` - - // The address that will receive the imported funds - To common.Address `json:"to"` -} - // ImportAVAX is a deprecated name for Import. -func (service *AvaxAPI) ImportAVAX(_ *http.Request, args *ImportArgs, response *api.JSONTxID) error { +func (service *AvaxAPI) ImportAVAX(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { return service.Import(nil, args, response) } // Import issues a transaction to import AVAX from the X-chain. The AVAX // must have already been exported from the X-Chain. -func (service *AvaxAPI) Import(_ *http.Request, args *ImportArgs, response *api.JSONTxID) error { +func (service *AvaxAPI) Import(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { log.Info("EVM: ImportAVAX called") chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) @@ -228,48 +196,22 @@ func (service *AvaxAPI) Import(_ *http.Request, args *ImportArgs, response *api. if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) return nil } -// ExportAVAXArgs are the arguments to ExportAVAX -type ExportAVAXArgs struct { - api.UserPass - - // Fee that should be used when creating the tx - BaseFee *hexutil.Big `json:"baseFee"` - - // Amount of asset to send - Amount json.Uint64 `json:"amount"` - - // Chain the funds are going to. Optional. Used if To address does not - // include the chainID. - TargetChain string `json:"targetChain"` - - // ID of the address that will receive the AVAX. This address may include - // the chainID, which is used to determine what the destination chain is. - To string `json:"to"` -} - // ExportAVAX exports AVAX from the C-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *api.JSONTxID) error { - return service.Export(nil, &ExportArgs{ +func (service *AvaxAPI) ExportAVAX(_ *http.Request, args *client.ExportAVAXArgs, response *api.JSONTxID) error { + return service.Export(nil, &client.ExportArgs{ ExportAVAXArgs: *args, AssetID: service.vm.ctx.AVAXAssetID.String(), }, response) } -// ExportArgs are the arguments to Export -type ExportArgs struct { - ExportAVAXArgs - // AssetID of the tokens - AssetID string `json:"assetID"` -} - // Export exports an asset from the C-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) Export(_ *http.Request, args *ExportArgs, response *api.JSONTxID) error { +func (service *AvaxAPI) Export(_ *http.Request, args *client.ExportArgs, response *api.JSONTxID) error { log.Info("EVM: Export called") assetID, err := service.parseAssetID(args.AssetID) @@ -338,7 +280,7 @@ func (service *AvaxAPI) Export(_ *http.Request, args *ExportArgs, response *api. if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) return nil } @@ -401,7 +343,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply reply.UTXOs = make([]string, len(utxos)) for i, utxo := range utxos { - b, err := service.vm.codec.Marshal(codecVersion, utxo) + b, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { return fmt.Errorf("problem marshalling UTXO: %w", err) } @@ -432,11 +374,11 @@ func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response return fmt.Errorf("problem decoding transaction: %w", err) } - tx := &Tx{} - if _, err := service.vm.codec.Unmarshal(txBytes, tx); err != nil { + tx := &atomic.Tx{} + if _, err := atomic.Codec.Unmarshal(txBytes, tx); err != nil { return fmt.Errorf("problem parsing transaction: %w", err) } - if err := tx.Sign(service.vm.codec, nil); err != nil { + if err := tx.Sign(atomic.Codec, nil); err != nil { return fmt.Errorf("problem initializing transaction: %w", err) } @@ -448,18 +390,12 @@ func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) return nil } -// GetAtomicTxStatusReply defines the GetAtomicTxStatus replies returned from the API -type GetAtomicTxStatusReply struct { - Status Status `json:"status"` - BlockHeight *json.Uint64 `json:"blockHeight,omitempty"` -} - // GetAtomicTxStatus returns the status of the specified transaction -func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, reply *GetAtomicTxStatusReply) error { +func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, reply *client.GetAtomicTxStatusReply) error { log.Info("EVM: GetAtomicTxStatus called", "txID", args.TxID) if args.TxID == ids.Empty { @@ -472,13 +408,13 @@ func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, r _, status, height, _ := service.vm.getAtomicTx(args.TxID) reply.Status = status - if status == Accepted { + if status == atomic.Accepted { // Since chain state updates run asynchronously with VM block acceptance, // avoid returning [Accepted] until the chain state reaches the block // containing the atomic tx. lastAccepted := service.vm.blockChain.LastAcceptedBlock() if height > lastAccepted.NumberU64() { - reply.Status = Processing + reply.Status = atomic.Processing return nil } @@ -509,7 +445,7 @@ func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply return err } - if status == Unknown { + if status == atomic.Unknown { return fmt.Errorf("could not find tx %s", args.TxID) } @@ -519,7 +455,7 @@ func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply } reply.Tx = txBytes reply.Encoding = args.Encoding - if status == Accepted { + if status == atomic.Accepted { // Since chain state updates run asynchronously with VM block acceptance, // avoid returning [Accepted] until the chain state reaches the block // containing the atomic tx. diff --git a/plugin/evm/codec.go b/plugin/evm/atomic/codec.go similarity index 91% rename from plugin/evm/codec.go rename to plugin/evm/atomic/codec.go index e4c38761e3..3376eeb049 100644 --- a/plugin/evm/codec.go +++ b/plugin/evm/atomic/codec.go @@ -1,9 +1,10 @@ // (c) 2019-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( + "errors" "fmt" "github.com/ava-labs/avalanchego/codec" @@ -12,8 +13,14 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -// Codec does serialization and deserialization -var Codec codec.Manager +const CodecVersion = uint16(0) + +var ( + // Codec does serialization and deserialization + Codec codec.Manager + + errMissingAtomicTxs = errors.New("cannot build a block with non-empty extra data and zero atomic transactions") +) func init() { Codec = codec.NewDefaultManager() @@ -35,7 +42,7 @@ func init() { lc.RegisterType(&secp256k1fx.Credential{}), lc.RegisterType(&secp256k1fx.Input{}), lc.RegisterType(&secp256k1fx.OutputOwners{}), - Codec.RegisterCodec(codecVersion, lc), + Codec.RegisterCodec(CodecVersion, lc), ) if errs.Errored() { panic(errs.Err) diff --git a/plugin/evm/export_tx.go b/plugin/evm/atomic/export_tx.go similarity index 57% rename from plugin/evm/export_tx.go rename to plugin/evm/atomic/export_tx.go index 79f98af435..0b092ee03a 100644 --- a/plugin/evm/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "context" @@ -9,15 +9,13 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/params/extras" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" + avalancheutils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" @@ -31,10 +29,15 @@ import ( ) var ( - _ UnsignedAtomicTx = &UnsignedExportTx{} - _ secp256k1fx.UnsignedTx = &UnsignedExportTx{} - errExportNonAVAXInputBanff = errors.New("export input cannot contain non-AVAX in Banff") - errExportNonAVAXOutputBanff = errors.New("export output cannot contain non-AVAX in Banff") + _ UnsignedAtomicTx = &UnsignedExportTx{} + _ secp256k1fx.UnsignedTx = &UnsignedExportTx{} + ErrExportNonAVAXInputBanff = errors.New("export input cannot contain non-AVAX in Banff") + ErrExportNonAVAXOutputBanff = errors.New("export output cannot contain non-AVAX in Banff") + ErrNoExportOutputs = errors.New("tx has no export outputs") + errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") + errOverflowExport = errors.New("overflow when computing export amount + txFee") + errInsufficientFunds = errors.New("insufficient funds") + errInvalidNonce = errors.New("invalid nonce") ) // UnsignedExportTx is an unsigned ExportTx @@ -74,13 +77,13 @@ func (utx *UnsignedExportTx) Verify( ) error { switch { case utx == nil: - return errNilTx + return ErrNilTx case len(utx.ExportedOutputs) == 0: - return errNoExportOutputs + return ErrNoExportOutputs case utx.NetworkID != ctx.NetworkID: - return errWrongNetworkID + return ErrWrongNetworkID case ctx.ChainID != utx.BlockchainID: - return errWrongBlockchainID + return ErrWrongChainID } // Make sure that the tx has a valid peer chain ID @@ -88,11 +91,11 @@ func (utx *UnsignedExportTx) Verify( // Note that SameSubnet verifies that [tx.DestinationChain] isn't this // chain's ID if err := verify.SameSubnet(context.TODO(), ctx, utx.DestinationChain); err != nil { - return errWrongChainID + return ErrWrongChainID } } else { if utx.DestinationChain != ctx.XChainID { - return errWrongChainID + return ErrWrongChainID } } @@ -101,7 +104,7 @@ func (utx *UnsignedExportTx) Verify( return err } if rules.IsBanff && in.AssetID != ctx.AVAXAssetID { - return errExportNonAVAXInputBanff + return ErrExportNonAVAXInputBanff } } @@ -111,17 +114,17 @@ func (utx *UnsignedExportTx) Verify( } assetID := out.AssetID() if assetID != ctx.AVAXAssetID && utx.DestinationChain == constants.PlatformChainID { - return errWrongChainID + return ErrWrongChainID } if rules.IsBanff && assetID != ctx.AVAXAssetID { - return errExportNonAVAXOutputBanff + return ErrExportNonAVAXOutputBanff } } if !avax.IsSortedTransferableOutputs(utx.ExportedOutputs, Codec) { - return errOutputsNotSorted + return ErrOutputsNotSorted } - if rules.IsApricotPhase1 && !utils.IsSortedAndUnique(utx.Ins) { - return errInputsNotSortedUnique + if rules.IsApricotPhase1 && !avalancheutils.IsSortedAndUnique(utx.Ins) { + return ErrInputsNotSortedUnique } return nil @@ -139,7 +142,7 @@ func (utx *UnsignedExportTx) GasUsed(fixedFee bool) (uint64, error) { return 0, err } if fixedFee { - cost, err = math.Add64(cost, params.AtomicTxBaseCost) + cost, err = math.Add64(cost, AtomicTxBaseCost) if err != nil { return 0, err } @@ -177,13 +180,14 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedExportTx) SemanticVerify( - vm *VM, + backend *Backend, stx *Tx, - _ *Block, + parent AtomicBlockContext, baseFee *big.Int, - rules extras.Rules, ) error { - if err := utx.Verify(vm.ctx, rules); err != nil { + ctx := backend.Ctx + rules := backend.Rules + if err := utx.Verify(ctx, rules); err != nil { return err } @@ -200,10 +204,10 @@ func (utx *UnsignedExportTx) SemanticVerify( if err != nil { return err } - fc.Produce(vm.ctx.AVAXAssetID, txFee) + fc.Produce(ctx.AVAXAssetID, txFee) // Apply fees to export transactions before Apricot Phase 3 default: - fc.Produce(vm.ctx.AVAXAssetID, params.AvalancheAtomicTxFee) + fc.Produce(ctx.AVAXAssetID, AvalancheAtomicTxFee) } for _, out := range utx.ExportedOutputs { fc.Produce(out.AssetID(), out.Output().Amount()) @@ -232,11 +236,11 @@ func (utx *UnsignedExportTx) SemanticVerify( if len(cred.Sigs) != 1 { return fmt.Errorf("expected one signature for EVM Input Credential, but found: %d", len(cred.Sigs)) } - pubKey, err := vm.secpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) + pubKey, err := backend.SecpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) if err != nil { return err } - if input.Address != PublicKeyToEthAddress(pubKey) { + if input.Address != pubKey.EthAddress() { return errPublicKeySignatureMismatch } } @@ -259,7 +263,7 @@ func (utx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { Out: out.Out, } - utxoBytes, err := Codec.Marshal(codecVersion, utxo) + utxoBytes, err := Codec.Marshal(CodecVersion, utxo) if err != nil { return ids.ID{}, nil, err } @@ -278,8 +282,11 @@ func (utx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { return utx.DestinationChain, &atomic.Requests{PutRequests: elems}, nil } -// newExportTx returns a new ExportTx -func (vm *VM) newExportTx( +// NewExportTx returns a new ExportTx +func NewExportTx( + ctx *snow.Context, + rules extras.Rules, + state StateDB, assetID ids.ID, // AssetID of the tokens to export amount uint64, // Amount of tokens to export chainID ids.ID, // Chain to send the UTXOs to @@ -307,8 +314,8 @@ func (vm *VM) newExportTx( ) // consume non-AVAX - if assetID != vm.ctx.AVAXAssetID { - ins, signers, err = vm.GetSpendableFunds(keys, assetID, amount) + if assetID != ctx.AVAXAssetID { + ins, signers, err = GetSpendableFunds(ctx, state, keys, assetID, amount) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) } @@ -316,18 +323,17 @@ func (vm *VM) newExportTx( avaxNeeded = amount } - rules := vm.currentRules() switch { case rules.IsApricotPhase3: utx := &UnsignedExportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, DestinationChain: chainID, Ins: ins, ExportedOutputs: outs, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, nil); err != nil { + if err := tx.Sign(Codec, nil); err != nil { return nil, err } @@ -337,14 +343,14 @@ func (vm *VM) newExportTx( return nil, err } - avaxIns, avaxSigners, err = vm.GetSpendableAVAXWithFee(keys, avaxNeeded, cost, baseFee) + avaxIns, avaxSigners, err = GetSpendableAVAXWithFee(ctx, state, keys, avaxNeeded, cost, baseFee) default: var newAvaxNeeded uint64 - newAvaxNeeded, err = math.Add64(avaxNeeded, params.AvalancheAtomicTxFee) + newAvaxNeeded, err = math.Add64(avaxNeeded, AvalancheAtomicTxFee) if err != nil { return nil, errOverflowExport } - avaxIns, avaxSigners, err = vm.GetSpendableFunds(keys, vm.ctx.AVAXAssetID, newAvaxNeeded) + avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, ctx.AVAXAssetID, newAvaxNeeded) } if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) @@ -352,26 +358,26 @@ func (vm *VM) newExportTx( ins = append(ins, avaxIns...) signers = append(signers, avaxSigners...) - avax.SortTransferableOutputs(outs, vm.codec) + avax.SortTransferableOutputs(outs, Codec) SortEVMInputsAndSigners(ins, signers) // Create the transaction utx := &UnsignedExportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, DestinationChain: chainID, Ins: ins, ExportedOutputs: outs, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, signers); err != nil { + if err := tx.Sign(Codec, signers); err != nil { return nil, err } - return tx, utx.Verify(vm.ctx, vm.currentRules()) + return tx, utx.Verify(ctx, rules) } // EVMStateTransfer executes the state update from the atomic export transaction -func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { +func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) error { addrs := map[[20]byte]uint64{} for _, from := range utx.Ins { if from.AssetID == ctx.AVAXAssetID { @@ -380,7 +386,7 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.St // denomination before export. amount := new(uint256.Int).Mul( uint256.NewInt(from.Amount), - uint256.NewInt(x2cRate.Uint64()), + uint256.NewInt(X2CRate.Uint64()), ) if state.GetBalance(from.Address).Cmp(amount) < 0 { return errInsufficientFunds @@ -404,3 +410,151 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.St } return nil } + +// GetSpendableFunds returns a list of EVMInputs and keys (in corresponding +// order) to total [amount] of [assetID] owned by [keys]. +// Note: we return [][]*secp256k1.PrivateKey even though each input +// corresponds to a single key, so that the signers can be passed in to +// [tx.Sign] which supports multiple keys on a single input. +func GetSpendableFunds( + ctx *snow.Context, + state StateDB, + keys []*secp256k1.PrivateKey, + assetID ids.ID, + amount uint64, +) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { + inputs := []EVMInput{} + signers := [][]*secp256k1.PrivateKey{} + // Note: we assume that each key in [keys] is unique, so that iterating over + // the keys will not produce duplicated nonces in the returned EVMInput slice. + for _, key := range keys { + if amount == 0 { + break + } + addr := key.EthAddress() + var balance uint64 + if assetID == ctx.AVAXAssetID { + // If the asset is AVAX, we divide by the x2cRate to convert back to the correct + // denomination of AVAX that can be exported. + balance = new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() + } else { + balance = state.GetBalanceMultiCoin(addr, common.Hash(assetID)).Uint64() + } + if balance == 0 { + continue + } + if amount < balance { + balance = amount + } + nonce := state.GetNonce(addr) + + inputs = append(inputs, EVMInput{ + Address: addr, + Amount: balance, + AssetID: assetID, + Nonce: nonce, + }) + signers = append(signers, []*secp256k1.PrivateKey{key}) + amount -= balance + } + + if amount > 0 { + return nil, nil, errInsufficientFunds + } + + return inputs, signers, nil +} + +// GetSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding +// order) to total [amount] + [fee] of [AVAX] owned by [keys]. +// This function accounts for the added cost of the additional inputs needed to +// create the transaction and makes sure to skip any keys with a balance that is +// insufficient to cover the additional fee. +// Note: we return [][]*secp256k1.PrivateKey even though each input +// corresponds to a single key, so that the signers can be passed in to +// [tx.Sign] which supports multiple keys on a single input. +func GetSpendableAVAXWithFee( + ctx *snow.Context, + state StateDB, + keys []*secp256k1.PrivateKey, + amount uint64, + cost uint64, + baseFee *big.Int, +) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { + initialFee, err := CalculateDynamicFee(cost, baseFee) + if err != nil { + return nil, nil, err + } + + newAmount, err := math.Add64(amount, initialFee) + if err != nil { + return nil, nil, err + } + amount = newAmount + + inputs := []EVMInput{} + signers := [][]*secp256k1.PrivateKey{} + // Note: we assume that each key in [keys] is unique, so that iterating over + // the keys will not produce duplicated nonces in the returned EVMInput slice. + for _, key := range keys { + if amount == 0 { + break + } + + prevFee, err := CalculateDynamicFee(cost, baseFee) + if err != nil { + return nil, nil, err + } + + newCost := cost + EVMInputGas + newFee, err := CalculateDynamicFee(newCost, baseFee) + if err != nil { + return nil, nil, err + } + + additionalFee := newFee - prevFee + + addr := key.EthAddress() + // Since the asset is AVAX, we divide by the x2cRate to convert back to + // the correct denomination of AVAX that can be exported. + balance := new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() + // If the balance for [addr] is insufficient to cover the additional cost + // of adding an input to the transaction, skip adding the input altogether + if balance <= additionalFee { + continue + } + + // Update the cost for the next iteration + cost = newCost + + newAmount, err := math.Add64(amount, additionalFee) + if err != nil { + return nil, nil, err + } + amount = newAmount + + // Use the entire [balance] as an input, but if the required [amount] + // is less than the balance, update the [inputAmount] to spend the + // minimum amount to finish the transaction. + inputAmount := balance + if amount < balance { + inputAmount = amount + } + nonce := state.GetNonce(addr) + + inputs = append(inputs, EVMInput{ + Address: addr, + Amount: inputAmount, + AssetID: ctx.AVAXAssetID, + Nonce: nonce, + }) + signers = append(signers, []*secp256k1.PrivateKey{key}) + amount -= inputAmount + } + + if amount > 0 { + return nil, nil, errInsufficientFunds + } + + return inputs, signers, nil +} diff --git a/plugin/evm/atomic/gossip.go b/plugin/evm/atomic/gossip.go new file mode 100644 index 0000000000..2c6cb35da2 --- /dev/null +++ b/plugin/evm/atomic/gossip.go @@ -0,0 +1,35 @@ +// (c) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" +) + +var ( + _ gossip.Gossipable = (*GossipAtomicTx)(nil) + _ gossip.Marshaller[*GossipAtomicTx] = (*GossipAtomicTxMarshaller)(nil) +) + +type GossipAtomicTxMarshaller struct{} + +func (g GossipAtomicTxMarshaller) MarshalGossip(tx *GossipAtomicTx) ([]byte, error) { + return tx.Tx.SignedBytes(), nil +} + +func (g GossipAtomicTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipAtomicTx, error) { + tx, err := ExtractAtomicTx(bytes, Codec) + return &GossipAtomicTx{ + Tx: tx, + }, err +} + +type GossipAtomicTx struct { + Tx *Tx +} + +func (tx *GossipAtomicTx) GossipID() ids.ID { + return tx.Tx.ID() +} diff --git a/plugin/evm/atomic/gossip_test.go b/plugin/evm/atomic/gossip_test.go new file mode 100644 index 0000000000..5140f6bc7e --- /dev/null +++ b/plugin/evm/atomic/gossip_test.go @@ -0,0 +1,117 @@ +// (c) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func TestGossipAtomicTxMarshaller(t *testing.T) { + require := require.New(t) + + want := &GossipAtomicTx{ + Tx: &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{}, + Creds: []verify.Verifiable{}, + }, + } + marshaller := GossipAtomicTxMarshaller{} + + key0, err := secp256k1.NewPrivateKey() + require.NoError(err) + err = want.Tx.Sign(Codec, [][]*secp256k1.PrivateKey{{key0}}) + require.NoError(err) + + bytes, err := marshaller.MarshalGossip(want) + require.NoError(err) + + got, err := marshaller.UnmarshalGossip(bytes) + require.NoError(err) + require.Equal(want.GossipID(), got.GossipID()) +} + +func TestAtomicMempoolIterate(t *testing.T) { + txs := []*GossipAtomicTx{ + { + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + { + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + } + + tests := []struct { + name string + add []*GossipAtomicTx + f func(tx *GossipAtomicTx) bool + expectedTxs []*GossipAtomicTx + }{ + { + name: "func matches nothing", + add: txs, + f: func(*GossipAtomicTx) bool { + return false + }, + expectedTxs: []*GossipAtomicTx{}, + }, + { + name: "func matches all", + add: txs, + f: func(*GossipAtomicTx) bool { + return true + }, + expectedTxs: txs, + }, + { + name: "func matches subset", + add: txs, + f: func(tx *GossipAtomicTx) bool { + return tx.Tx == txs[0].Tx + }, + expectedTxs: []*GossipAtomicTx{txs[0]}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) + require.NoError(err) + + for _, add := range tt.add { + require.NoError(m.Add(add)) + } + + matches := make([]*GossipAtomicTx, 0) + f := func(tx *GossipAtomicTx) bool { + match := tt.f(tx) + + if match { + matches = append(matches, tx) + } + + return match + } + + m.Iterate(f) + + require.ElementsMatch(tt.expectedTxs, matches) + }) + } +} diff --git a/plugin/evm/import_tx.go b/plugin/evm/atomic/import_tx.go similarity index 69% rename from plugin/evm/import_tx.go rename to plugin/evm/atomic/import_tx.go index c896795706..2d98e53fbb 100644 --- a/plugin/evm/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "context" @@ -10,8 +10,6 @@ import ( "math/big" "slices" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/params/extras" "github.com/holiman/uint256" @@ -32,8 +30,19 @@ import ( var ( _ UnsignedAtomicTx = &UnsignedImportTx{} _ secp256k1fx.UnsignedTx = &UnsignedImportTx{} - errImportNonAVAXInputBanff = errors.New("import input cannot contain non-AVAX in Banff") - errImportNonAVAXOutputBanff = errors.New("import output cannot contain non-AVAX in Banff") + ErrImportNonAVAXInputBanff = errors.New("import input cannot contain non-AVAX in Banff") + ErrImportNonAVAXOutputBanff = errors.New("import output cannot contain non-AVAX in Banff") + ErrNoImportInputs = errors.New("tx has no imported inputs") + ErrConflictingAtomicInputs = errors.New("invalid block due to conflicting atomic inputs") + ErrWrongChainID = errors.New("tx has wrong chain ID") + ErrNoEVMOutputs = errors.New("tx has no EVM outputs") + ErrInputsNotSortedUnique = errors.New("inputs not sorted and unique") + ErrOutputsNotSortedUnique = errors.New("outputs not sorted and unique") + ErrOutputsNotSorted = errors.New("tx outputs not sorted") + ErrAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") + errInsufficientFundsForFee = errors.New("insufficient AVAX funds to pay transaction fee") + errRejectedParent = errors.New("rejected parent") ) // UnsignedImportTx is an unsigned ImportTx @@ -67,15 +76,15 @@ func (utx *UnsignedImportTx) Verify( ) error { switch { case utx == nil: - return errNilTx + return ErrNilTx case len(utx.ImportedInputs) == 0: - return errNoImportInputs + return ErrNoImportInputs case utx.NetworkID != ctx.NetworkID: - return errWrongNetworkID + return ErrWrongNetworkID case ctx.ChainID != utx.BlockchainID: - return errWrongBlockchainID + return ErrWrongChainID case rules.IsApricotPhase3 && len(utx.Outs) == 0: - return errNoEVMOutputs + return ErrNoEVMOutputs } // Make sure that the tx has a valid peer chain ID @@ -83,11 +92,11 @@ func (utx *UnsignedImportTx) Verify( // Note that SameSubnet verifies that [tx.SourceChain] isn't this // chain's ID if err := verify.SameSubnet(context.TODO(), ctx, utx.SourceChain); err != nil { - return errWrongChainID + return ErrWrongChainID } } else { if utx.SourceChain != ctx.XChainID { - return errWrongChainID + return ErrWrongChainID } } @@ -96,7 +105,7 @@ func (utx *UnsignedImportTx) Verify( return fmt.Errorf("EVM Output failed verification: %w", err) } if rules.IsBanff && out.AssetID != ctx.AVAXAssetID { - return errImportNonAVAXOutputBanff + return ErrImportNonAVAXOutputBanff } } @@ -105,20 +114,20 @@ func (utx *UnsignedImportTx) Verify( return fmt.Errorf("atomic input failed verification: %w", err) } if rules.IsBanff && in.AssetID() != ctx.AVAXAssetID { - return errImportNonAVAXInputBanff + return ErrImportNonAVAXInputBanff } } if !utils.IsSortedAndUnique(utx.ImportedInputs) { - return errInputsNotSortedUnique + return ErrInputsNotSortedUnique } if rules.IsApricotPhase2 { if !utils.IsSortedAndUnique(utx.Outs) { - return errOutputsNotSortedUnique + return ErrOutputsNotSortedUnique } } else if rules.IsApricotPhase1 { if !slices.IsSortedFunc(utx.Outs, EVMOutput.Compare) { - return errOutputsNotSorted + return ErrOutputsNotSorted } } @@ -141,7 +150,7 @@ func (utx *UnsignedImportTx) GasUsed(fixedFee bool) (uint64, error) { } } if fixedFee { - cost, err = math.Add64(cost, params.AtomicTxBaseCost) + cost, err = math.Add64(cost, AtomicTxBaseCost) if err != nil { return 0, err } @@ -178,13 +187,14 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedImportTx) SemanticVerify( - vm *VM, + backend *Backend, stx *Tx, - parent *Block, + parent AtomicBlockContext, baseFee *big.Int, - rules extras.Rules, ) error { - if err := utx.Verify(vm.ctx, rules); err != nil { + ctx := backend.Ctx + rules := backend.Rules + if err := utx.Verify(ctx, rules); err != nil { return err } @@ -201,11 +211,11 @@ func (utx *UnsignedImportTx) SemanticVerify( if err != nil { return err } - fc.Produce(vm.ctx.AVAXAssetID, txFee) + fc.Produce(ctx.AVAXAssetID, txFee) // Apply fees to import transactions as of Apricot Phase 2 case rules.IsApricotPhase2: - fc.Produce(vm.ctx.AVAXAssetID, params.AvalancheAtomicTxFee) + fc.Produce(ctx.AVAXAssetID, AvalancheAtomicTxFee) } for _, out := range utx.Outs { fc.Produce(out.AssetID, out.Amount) @@ -222,7 +232,7 @@ func (utx *UnsignedImportTx) SemanticVerify( return fmt.Errorf("import tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.ImportedInputs), len(stx.Creds)) } - if !vm.bootstrapped { + if !backend.Bootstrapped { // Allow for force committing during bootstrapping return nil } @@ -233,7 +243,7 @@ func (utx *UnsignedImportTx) SemanticVerify( utxoIDs[i] = inputID[:] } // allUTXOBytes is guaranteed to be the same length as utxoIDs - allUTXOBytes, err := vm.ctx.SharedMemory.Get(utx.SourceChain, utxoIDs) + allUTXOBytes, err := ctx.SharedMemory.Get(utx.SourceChain, utxoIDs) if err != nil { return fmt.Errorf("failed to fetch import UTXOs from %s due to: %w", utx.SourceChain, err) } @@ -242,7 +252,7 @@ func (utx *UnsignedImportTx) SemanticVerify( utxoBytes := allUTXOBytes[i] utxo := &avax.UTXO{} - if _, err := vm.codec.Unmarshal(utxoBytes, utxo); err != nil { + if _, err := Codec.Unmarshal(utxoBytes, utxo); err != nil { return fmt.Errorf("failed to unmarshal UTXO: %w", err) } @@ -251,15 +261,15 @@ func (utx *UnsignedImportTx) SemanticVerify( utxoAssetID := utxo.AssetID() inAssetID := in.AssetID() if utxoAssetID != inAssetID { - return errAssetIDMismatch + return ErrAssetIDMismatch } - if err := vm.fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { + if err := backend.Fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { return fmt.Errorf("import tx transfer failed verification: %w", err) } } - return vm.conflicts(utx.InputUTXOs(), parent) + return conflicts(backend, utx.InputUTXOs(), parent) } // AtomicOps returns imported inputs spent on this transaction @@ -276,28 +286,11 @@ func (utx *UnsignedImportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { return utx.SourceChain, &atomic.Requests{RemoveRequests: utxoIDs}, nil } -// newImportTx returns a new ImportTx -func (vm *VM) newImportTx( - chainID ids.ID, // chain to import from - to common.Address, // Address of recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Keys to import the funds -) (*Tx, error) { - kc := secp256k1fx.NewKeychain() - for _, key := range keys { - kc.Add(key) - } - - atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) - if err != nil { - return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) - } - - return vm.newImportTxWithUTXOs(chainID, to, baseFee, kc, atomicUTXOs) -} - -// newImportTx returns a new ImportTx -func (vm *VM) newImportTxWithUTXOs( +// NewImportTx returns a new ImportTx +func NewImportTx( + ctx *snow.Context, + rules extras.Rules, + time uint64, chainID ids.ID, // chain to import from to common.Address, // Address of recipient baseFee *big.Int, // fee to use post-AP3 @@ -308,9 +301,8 @@ func (vm *VM) newImportTxWithUTXOs( signers := [][]*secp256k1.PrivateKey{} importedAmount := make(map[ids.ID]uint64) - now := vm.clock.Unix() for _, utxo := range atomicUTXOs { - inputIntf, utxoSigners, err := kc.Spend(utxo.Out, now) + inputIntf, utxoSigners, err := kc.Spend(utxo.Out, time) if err != nil { continue } @@ -331,7 +323,7 @@ func (vm *VM) newImportTxWithUTXOs( signers = append(signers, utxoSigners) } avax.SortTransferableInputsWithSigners(importedInputs, signers) - importedAVAXAmount := importedAmount[vm.ctx.AVAXAssetID] + importedAVAXAmount := importedAmount[ctx.AVAXAssetID] outs := make([]EVMOutput, 0, len(importedAmount)) // This will create unique outputs (in the context of sorting) @@ -339,7 +331,7 @@ func (vm *VM) newImportTxWithUTXOs( for assetID, amount := range importedAmount { // Skip the AVAX amount since it is included separately to account for // the fee - if assetID == vm.ctx.AVAXAssetID || amount == 0 { + if assetID == ctx.AVAXAssetID || amount == 0 { continue } outs = append(outs, EVMOutput{ @@ -349,8 +341,6 @@ func (vm *VM) newImportTxWithUTXOs( }) } - rules := vm.currentRules() - var ( txFeeWithoutChange uint64 txFeeWithChange uint64 @@ -361,14 +351,14 @@ func (vm *VM) newImportTxWithUTXOs( return nil, errNilBaseFeeApricotPhase3 } utx := &UnsignedImportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, Outs: outs, ImportedInputs: importedInputs, SourceChain: chainID, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, nil); err != nil { + if err := tx.Sign(Codec, nil); err != nil { return nil, err } @@ -387,8 +377,8 @@ func (vm *VM) newImportTxWithUTXOs( return nil, err } case rules.IsApricotPhase2: - txFeeWithoutChange = params.AvalancheAtomicTxFee - txFeeWithChange = params.AvalancheAtomicTxFee + txFeeWithoutChange = AvalancheAtomicTxFee + txFeeWithChange = AvalancheAtomicTxFee } // AVAX output @@ -400,7 +390,7 @@ func (vm *VM) newImportTxWithUTXOs( outs = append(outs, EVMOutput{ Address: to, Amount: importedAVAXAmount - txFeeWithChange, - AssetID: vm.ctx.AVAXAssetID, + AssetID: ctx.AVAXAssetID, }) } @@ -408,35 +398,35 @@ func (vm *VM) newImportTxWithUTXOs( // Note: this can happen if there is exactly enough AVAX to pay the // transaction fee, but no other funds to be imported. if len(outs) == 0 { - return nil, errNoEVMOutputs + return nil, ErrNoEVMOutputs } utils.Sort(outs) // Create the transaction utx := &UnsignedImportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, Outs: outs, ImportedInputs: importedInputs, SourceChain: chainID, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, signers); err != nil { + if err := tx.Sign(Codec, signers); err != nil { return nil, err } - return tx, utx.Verify(vm.ctx, vm.currentRules()) + return tx, utx.Verify(ctx, rules) } // EVMStateTransfer performs the state transfer to increase the balances of // accounts accordingly with the imported EVMOutputs -func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { +func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) error { for _, to := range utx.Outs { if to.AssetID == ctx.AVAXAssetID { log.Debug("import_tx", "src", utx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", "AVAX") // If the asset is AVAX, convert the input amount in nAVAX to gWei by // multiplying by the x2c rate. - amount := new(uint256.Int).Mul(uint256.NewInt(to.Amount), x2cRate) + amount := new(uint256.Int).Mul(uint256.NewInt(to.Amount), X2CRate) state.AddBalance(to.Address, amount) } else { log.Debug("import_tx", "src", utx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", to.AssetID) @@ -446,3 +436,43 @@ func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state *state.St } return nil } + +// conflicts returns an error if [inputs] conflicts with any of the atomic inputs contained in [ancestor] +// or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is +// accepted, then nil will be returned immediately. +// If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. +func conflicts(backend *Backend, inputs set.Set[ids.ID], ancestor AtomicBlockContext) error { + fetcher := backend.BlockFetcher + lastAcceptedBlock := fetcher.LastAcceptedBlockInternal() + lastAcceptedHeight := lastAcceptedBlock.Height() + for ancestor.Height() > lastAcceptedHeight { + // If any of the atomic transactions in the ancestor conflict with [inputs] + // return an error. + for _, atomicTx := range ancestor.AtomicTxs() { + if inputs.Overlaps(atomicTx.InputUTXOs()) { + return ErrConflictingAtomicInputs + } + } + + // Move up the chain. + nextAncestorID := ancestor.Parent() + // If the ancestor is unknown, then the parent failed + // verification when it was called. + // If the ancestor is rejected, then this block shouldn't be + // inserted into the canonical chain because the parent is + // will be missing. + // If the ancestor is processing, then the block may have + // been verified. + nextAncestorIntf, err := fetcher.GetBlockInternal(context.TODO(), nextAncestorID) + if err != nil { + return errRejectedParent + } + nextAncestor, ok := nextAncestorIntf.(AtomicBlockContext) + if !ok { + return fmt.Errorf("ancestor block %s had unexpected type %T", nextAncestor.ID(), nextAncestorIntf) + } + ancestor = nextAncestor + } + + return nil +} diff --git a/plugin/evm/mempool.go b/plugin/evm/atomic/mempool.go similarity index 92% rename from plugin/evm/mempool.go rename to plugin/evm/atomic/mempool.go index 92a95fa3e0..30b5f511c6 100644 --- a/plugin/evm/mempool.go +++ b/plugin/evm/atomic/mempool.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "errors" @@ -15,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/libevm/log" ) @@ -23,8 +24,11 @@ const ( ) var ( - errTxAlreadyKnown = errors.New("tx already known") - errNoGasUsed = errors.New("no gas used") + errTxAlreadyKnown = errors.New("tx already known") + errNoGasUsed = errors.New("no gas used") + ErrConflictingAtomicTx = errors.New("conflicting atomic tx present") + ErrInsufficientAtomicTxFee = errors.New("atomic tx fee too low for atomic mempool") + ErrTooManyAtomicTx = errors.New("too many atomic tx") _ gossip.Set[*GossipAtomicTx] = (*Mempool)(nil) ) @@ -82,7 +86,11 @@ type Mempool struct { // NewMempool returns a Mempool with [maxSize] func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *Tx) error) (*Mempool, error) { - bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) if err != nil { return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) } @@ -136,35 +144,19 @@ func (m *Mempool) Add(tx *GossipAtomicTx) error { m.ctx.Lock.RLock() defer m.ctx.Lock.RUnlock() - m.lock.Lock() - defer m.lock.Unlock() - - err := m.addTx(tx.Tx, false) - if errors.Is(err, errTxAlreadyKnown) { - return err - } - - if err != nil { - txID := tx.Tx.ID() - m.discardedTxs.Put(txID, tx.Tx) - log.Debug("failed to issue remote tx to mempool", - "txID", txID, - "err", err, - ) - } - - return err + return m.AddRemoteTx(tx.Tx) } -// AddTx attempts to add [tx] to the mempool and returns an error if +// AddRemoteTx attempts to add [tx] to the mempool and returns an error if // it could not be added to the mempool. -func (m *Mempool) AddTx(tx *Tx) error { +func (m *Mempool) AddRemoteTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - err := m.addTx(tx, false) + err := m.addTx(tx, false, false) + // Do not attempt to discard the tx if it was already known if errors.Is(err, errTxAlreadyKnown) { - return nil + return err } if err != nil { @@ -184,7 +176,7 @@ func (m *Mempool) AddLocalTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - err := m.addTx(tx, false) + err := m.addTx(tx, true, false) if errors.Is(err, errTxAlreadyKnown) { return nil } @@ -192,17 +184,12 @@ func (m *Mempool) AddLocalTx(tx *Tx) error { return err } -// forceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. +// ForceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. func (m *Mempool) ForceAddTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - err := m.addTx(tx, true) - if errors.Is(err, errTxAlreadyKnown) { - return nil - } - - return nil + return m.addTx(tx, true, true) } // checkConflictTx checks for any transactions in the mempool that spend the same input UTXOs as [tx]. @@ -239,7 +226,7 @@ func (m *Mempool) checkConflictTx(tx *Tx) (uint64, ids.ID, []*Tx, error) { // addTx adds [tx] to the mempool. Assumes [m.lock] is held. // If [force], skips conflict checks within the mempool. -func (m *Mempool) addTx(tx *Tx, force bool) error { +func (m *Mempool) addTx(tx *Tx, local bool, force bool) error { txID := tx.ID() // If [txID] has already been issued or is in the currentTxs map // there's no need to add it. @@ -252,6 +239,11 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { if _, exists := m.txHeap.Get(txID); exists { return fmt.Errorf("%w: tx %s is pending", errTxAlreadyKnown, tx.ID()) } + if !local { + if _, exists := m.discardedTxs.Get(txID); exists { + return fmt.Errorf("%w: tx %s was discarded", errTxAlreadyKnown, tx.ID()) + } + } if !force && m.verify != nil { if err := m.verify(tx); err != nil { return err @@ -270,7 +262,7 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { if highestGasPrice >= gasPrice { return fmt.Errorf( "%w: issued tx (%s) gas price %d <= conflict tx (%s) gas price %d (%d total conflicts in mempool)", - errConflictingAtomicTx, + ErrConflictingAtomicTx, txID, gasPrice, highestGasPriceConflictTxID, @@ -295,7 +287,7 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { if minGasPrice >= gasPrice { return fmt.Errorf( "%w currentMin=%d provided=%d", - errInsufficientAtomicTxFee, + ErrInsufficientAtomicTxFee, minGasPrice, gasPrice, ) @@ -305,7 +297,7 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { } else { // This could occur if we have used our entire size allowance on // transactions that are currently processing. - return errTooManyAtomicTx + return ErrTooManyAtomicTx } } @@ -329,7 +321,7 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { } m.bloom.Add(&GossipAtomicTx{Tx: tx}) - reset, err := gossip.ResetBloomFilterIfNeeded(m.bloom, m.length()*txGossipBloomChurnMultiplier) + reset, err := gossip.ResetBloomFilterIfNeeded(m.bloom, m.length()*config.TxGossipBloomChurnMultiplier) if err != nil { return err } diff --git a/plugin/evm/mempool_test.go b/plugin/evm/atomic/mempool_test.go similarity index 98% rename from plugin/evm/mempool_test.go rename to plugin/evm/atomic/mempool_test.go index a56c43bbee..9334853a5e 100644 --- a/plugin/evm/mempool_test.go +++ b/plugin/evm/atomic/mempool_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "testing" diff --git a/plugin/evm/metadata.go b/plugin/evm/atomic/metadata.go similarity index 98% rename from plugin/evm/metadata.go rename to plugin/evm/atomic/metadata.go index 2665d329bc..7cd570f7ec 100644 --- a/plugin/evm/metadata.go +++ b/plugin/evm/atomic/metadata.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "github.com/ava-labs/avalanchego/ids" diff --git a/plugin/evm/atomic/params.go b/plugin/evm/atomic/params.go new file mode 100644 index 0000000000..b154d0bfc0 --- /dev/null +++ b/plugin/evm/atomic/params.go @@ -0,0 +1,13 @@ +// (c) 2025 Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import "github.com/ava-labs/avalanchego/utils/units" + +const ( + AvalancheAtomicTxFee = units.MilliAvax + + // The base cost to charge per atomic transaction. Added in Apricot Phase 5. + AtomicTxBaseCost uint64 = 10_000 +) diff --git a/plugin/evm/status.go b/plugin/evm/atomic/status.go similarity index 95% rename from plugin/evm/status.go rename to plugin/evm/atomic/status.go index 14d1b009a7..c7c72d0987 100644 --- a/plugin/evm/status.go +++ b/plugin/evm/atomic/status.go @@ -1,16 +1,14 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "errors" "fmt" ) -var ( - errUnknownStatus = errors.New("unknown status") -) +var errUnknownStatus = errors.New("unknown status") // Status ... type Status uint32 diff --git a/plugin/evm/test_tx.go b/plugin/evm/atomic/test_tx.go similarity index 66% rename from plugin/evm/test_tx.go rename to plugin/evm/atomic/test_tx.go index 134a843431..de23a8b09e 100644 --- a/plugin/evm/test_tx.go +++ b/plugin/evm/atomic/test_tx.go @@ -1,29 +1,45 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "math/big" "math/rand" - "github.com/ava-labs/avalanchego/utils" - - "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" + + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params/extras" ) +var TestTxCodec codec.Manager + +func init() { + TestTxCodec = codec.NewDefaultManager() + c := linearcodec.NewDefault() + + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&TestUnsignedTx{}), + TestTxCodec.RegisterCodec(CodecVersion, c), + ) + + if errs.Errored() { + panic(errs.Err) + } +} + type TestUnsignedTx struct { - GasUsedV uint64 `serialize:"true"` - AcceptRequestsBlockchainIDV ids.ID `serialize:"true"` - AcceptRequestsV *atomic.Requests `serialize:"true"` + GasUsedV uint64 `serialize:"true"` + AcceptRequestsBlockchainIDV ids.ID `serialize:"true"` + AcceptRequestsV *avalancheatomic.Requests `serialize:"true"` VerifyV error IDV ids.ID `serialize:"true" json:"id"` BurnedV uint64 `serialize:"true"` @@ -43,7 +59,7 @@ func (t *TestUnsignedTx) GasUsed(fixedFee bool) (uint64, error) { return t.GasUs func (t *TestUnsignedTx) Verify(ctx *snow.Context, rules extras.Rules) error { return t.VerifyV } // AtomicOps implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) AtomicOps() (ids.ID, *atomic.Requests, error) { +func (t *TestUnsignedTx) AtomicOps() (ids.ID, *avalancheatomic.Requests, error) { return t.AcceptRequestsBlockchainIDV, t.AcceptRequestsV, nil } @@ -66,41 +82,40 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules extras.Rules) error { +func (t *TestUnsignedTx) SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error { return t.SemanticVerifyV } // EVMStateTransfer implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { +func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state StateDB) error { return t.EVMStateTransferV } -func testTxCodec() codec.Manager { - codec := codec.NewDefaultManager() - c := linearcodec.NewDefault() +var TestBlockchainID = ids.GenerateTestID() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&TestUnsignedTx{}), - c.RegisterType(&atomic.Element{}), - c.RegisterType(&atomic.Requests{}), - codec.RegisterCodec(codecVersion, c), - ) - - if errs.Errored() { - panic(errs.Err) +func GenerateTestImportTxWithGas(gasUsed uint64, burned uint64) *Tx { + return &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + GasUsedV: gasUsed, + BurnedV: burned, + AcceptRequestsBlockchainIDV: TestBlockchainID, + AcceptRequestsV: &avalancheatomic.Requests{ + RemoveRequests: [][]byte{ + utils.RandomBytes(32), + utils.RandomBytes(32), + }, + }, + }, } - return codec } -var blockChainID = ids.GenerateTestID() - -func testDataImportTx() *Tx { +func GenerateTestImportTx() *Tx { return &Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), - AcceptRequestsBlockchainIDV: blockChainID, - AcceptRequestsV: &atomic.Requests{ + AcceptRequestsBlockchainIDV: TestBlockchainID, + AcceptRequestsV: &avalancheatomic.Requests{ RemoveRequests: [][]byte{ utils.RandomBytes(32), utils.RandomBytes(32), @@ -110,13 +125,13 @@ func testDataImportTx() *Tx { } } -func testDataExportTx() *Tx { +func GenerateTestExportTx() *Tx { return &Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), - AcceptRequestsBlockchainIDV: blockChainID, - AcceptRequestsV: &atomic.Requests{ - PutRequests: []*atomic.Element{ + AcceptRequestsBlockchainIDV: TestBlockchainID, + AcceptRequestsV: &avalancheatomic.Requests{ + PutRequests: []*avalancheatomic.Element{ { Key: utils.RandomBytes(16), Value: utils.RandomBytes(24), @@ -131,22 +146,22 @@ func testDataExportTx() *Tx { } } -func newTestTx() *Tx { +func NewTestTx() *Tx { txType := rand.Intn(2) switch txType { case 0: - return testDataImportTx() + return GenerateTestImportTx() case 1: - return testDataExportTx() + return GenerateTestExportTx() default: panic("rng generated unexpected value for tx type") } } -func newTestTxs(numTxs int) []*Tx { +func NewTestTxs(numTxs int) []*Tx { txs := make([]*Tx, 0, numTxs) for i := 0; i < numTxs; i++ { - txs = append(txs, newTestTx()) + txs = append(txs, NewTestTx()) } return txs diff --git a/plugin/evm/tx.go b/plugin/evm/atomic/tx.go similarity index 75% rename from plugin/evm/tx.go rename to plugin/evm/atomic/tx.go index 54f917da63..72337a4829 100644 --- a/plugin/evm/tx.go +++ b/plugin/evm/atomic/tx.go @@ -1,44 +1,50 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "bytes" + "context" "errors" "fmt" "math/big" "sort" "github.com/ava-labs/libevm/common" + "github.com/holiman/uint256" - "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params/extras" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +const ( + X2CRateUint64 uint64 = 1_000_000_000 + x2cRateMinus1Uint64 uint64 = X2CRateUint64 - 1 +) + var ( - errWrongBlockchainID = errors.New("wrong blockchain ID provided") - errWrongNetworkID = errors.New("tx was issued with a different network ID") - errNilTx = errors.New("tx is nil") - errNoValueOutput = errors.New("output has no value") - errNoValueInput = errors.New("input has no value") - errNilOutput = errors.New("nil output") - errNilInput = errors.New("nil input") - errEmptyAssetID = errors.New("empty asset ID is not valid") - errNilBaseFee = errors.New("cannot calculate dynamic fee with nil baseFee") - errFeeOverflow = errors.New("overflow occurred while calculating the fee") + ErrWrongNetworkID = errors.New("tx was issued with a different network ID") + ErrNilTx = errors.New("tx is nil") + errNoValueOutput = errors.New("output has no value") + ErrNoValueInput = errors.New("input has no value") + errNilOutput = errors.New("nil output") + errNilInput = errors.New("nil input") + errEmptyAssetID = errors.New("empty asset ID is not valid") + errNilBaseFee = errors.New("cannot calculate dynamic fee with nil baseFee") + errFeeOverflow = errors.New("overflow occurred while calculating the fee") ) // Constants for calculating the gas consumed by atomic transactions @@ -46,6 +52,12 @@ var ( TxBytesGas uint64 = 1 EVMOutputGas uint64 = (common.AddressLength + wrappers.LongLen + hashing.HashLen) * TxBytesGas EVMInputGas uint64 = (common.AddressLength+wrappers.LongLen+hashing.HashLen+wrappers.LongLen)*TxBytesGas + secp256k1fx.CostPerSignature + // X2CRate is the conversion rate between the smallest denomination on the X-Chain + // 1 nAVAX and the smallest denomination on the C-Chain 1 wei. Where 1 nAVAX = 1 gWei. + // This is only required for AVAX because the denomination of 1 AVAX is 9 decimal + // places on the X and P chains, but is 18 decimal places within the EVM. + X2CRate = uint256.NewInt(X2CRateUint64) + x2cRateMinus1 = uint256.NewInt(x2cRateMinus1Uint64) ) // EVMOutput defines an output that is added to the EVM state created by import transactions @@ -98,7 +110,7 @@ func (in *EVMInput) Verify() error { case in == nil: return errNilInput case in.Amount == 0: - return errNoValueInput + return ErrNoValueInput case in.AssetID == ids.Empty: return errEmptyAssetID } @@ -115,6 +127,39 @@ type UnsignedTx interface { SignedBytes() []byte } +type Backend struct { + Ctx *snow.Context + Fx fx.Fx + Rules extras.Rules + Bootstrapped bool + BlockFetcher BlockFetcher + SecpCache *secp256k1.RecoverCache +} + +type BlockFetcher interface { + LastAcceptedBlockInternal() snowman.Block + GetBlockInternal(context.Context, ids.ID) (snowman.Block, error) +} + +type AtomicBlockContext interface { + AtomicTxs() []*Tx + snowman.Block +} + +type StateDB interface { + AddBalance(common.Address, *uint256.Int) + AddBalanceMultiCoin(common.Address, common.Hash, *big.Int) + + SubBalance(common.Address, *uint256.Int) + SubBalanceMultiCoin(common.Address, common.Hash, *big.Int) + + GetBalance(common.Address) *uint256.Int + GetBalanceMultiCoin(common.Address, common.Hash) *big.Int + + GetNonce(common.Address) uint64 + SetNonce(common.Address, uint64) +} + // UnsignedAtomicTx is an unsigned operation that can be atomically accepted type UnsignedAtomicTx interface { UnsignedTx @@ -124,13 +169,14 @@ type UnsignedAtomicTx interface { // Verify attempts to verify that the transaction is well formed Verify(ctx *snow.Context, rules extras.Rules) error // Attempts to verify this transaction with the provided state. - SemanticVerify(vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules extras.Rules) error + // SemanticVerify this transaction is valid. + SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error // AtomicOps returns the blockchainID and set of atomic requests that // must be applied to shared memory for this transaction to be accepted. // The set of atomic requests must be returned in a consistent order. AtomicOps() (ids.ID, *atomic.Requests, error) - EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error + EVMStateTransfer(ctx *snow.Context, state StateDB) error } // Tx is a signed transaction @@ -157,7 +203,7 @@ func (tx *Tx) Compare(other *Tx) int { // Sign this transaction with the provided signers func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { - unsignedBytes, err := c.Marshal(codecVersion, &tx.UnsignedAtomicTx) + unsignedBytes, err := c.Marshal(CodecVersion, &tx.UnsignedAtomicTx) if err != nil { return fmt.Errorf("couldn't marshal UnsignedAtomicTx: %w", err) } @@ -178,7 +224,7 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { tx.Creds = append(tx.Creds, cred) // Attach credential } - signedBytes, err := c.Marshal(codecVersion, tx) + signedBytes, err := c.Marshal(CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal Tx: %w", err) } @@ -216,7 +262,7 @@ func (tx *Tx) BlockFeeContribution(fixedFee bool, avaxAssetID ids.ID, baseFee *b // Calculate the amount of AVAX that has been burned above the required fee denominated // in C-Chain native 18 decimal places - blockFeeContribution := new(big.Int).Mul(new(big.Int).SetUint64(excessBurned), x2cRate.ToBig()) + blockFeeContribution := new(big.Int).Mul(new(big.Int).SetUint64(excessBurned), X2CRate.ToBig()) return blockFeeContribution, new(big.Int).SetUint64(gasUsed), nil } @@ -255,7 +301,7 @@ func CalculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { bigCost := new(big.Int).SetUint64(cost) fee := new(big.Int).Mul(bigCost, baseFee) feeToRoundUp := new(big.Int).Add(fee, x2cRateMinus1.ToBig()) - feeInNAVAX := new(big.Int).Div(feeToRoundUp, x2cRate.ToBig()) + feeInNAVAX := new(big.Int).Div(feeToRoundUp, X2CRate.ToBig()) if !feeInNAVAX.IsUint64() { // the fee is more than can fit in a uint64 return 0, errFeeOverflow @@ -266,25 +312,3 @@ func CalculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { func calcBytesCost(len int) uint64 { return uint64(len) * TxBytesGas } - -// mergeAtomicOps merges atomic requests represented by [txs] -// to the [output] map, depending on whether [chainID] is present in the map. -func mergeAtomicOps(txs []*Tx) (map[ids.ID]*atomic.Requests, error) { - if len(txs) > 1 { - // txs should be stored in order of txID to ensure consistency - // with txs initialized from the txID index. - copyTxs := make([]*Tx, len(txs)) - copy(copyTxs, txs) - utils.Sort(copyTxs) - txs = copyTxs - } - output := make(map[ids.ID]*atomic.Requests) - for _, tx := range txs { - chainID, txRequests, err := tx.UnsignedAtomicTx.AtomicOps() - if err != nil { - return nil, err - } - mergeAtomicOpsToMap(output, chainID, txRequests) - } - return output, nil -} diff --git a/plugin/evm/atomic/tx_heap.go b/plugin/evm/atomic/tx_heap.go new file mode 100644 index 0000000000..bcec314cd7 --- /dev/null +++ b/plugin/evm/atomic/tx_heap.go @@ -0,0 +1,163 @@ +// (c) 2020-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "container/heap" + + "github.com/ava-labs/avalanchego/ids" +) + +// txEntry is used to track the [gasPrice] transactions pay to be included in +// the mempool. +type txEntry struct { + id ids.ID + gasPrice uint64 + tx *Tx + index int +} + +// internalTxHeap is used to track pending atomic transactions by [gasPrice] +type internalTxHeap struct { + isMinHeap bool + items []*txEntry + lookup map[ids.ID]*txEntry +} + +func newInternalTxHeap(items int, isMinHeap bool) *internalTxHeap { + return &internalTxHeap{ + isMinHeap: isMinHeap, + items: make([]*txEntry, 0, items), + lookup: map[ids.ID]*txEntry{}, + } +} + +func (th internalTxHeap) Len() int { return len(th.items) } + +func (th internalTxHeap) Less(i, j int) bool { + if th.isMinHeap { + return th.items[i].gasPrice < th.items[j].gasPrice + } + return th.items[i].gasPrice > th.items[j].gasPrice +} + +func (th internalTxHeap) Swap(i, j int) { + th.items[i], th.items[j] = th.items[j], th.items[i] + th.items[i].index = i + th.items[j].index = j +} + +func (th *internalTxHeap) Push(x interface{}) { + entry := x.(*txEntry) + if th.Has(entry.id) { + return + } + th.items = append(th.items, entry) + th.lookup[entry.id] = entry +} + +func (th *internalTxHeap) Pop() interface{} { + n := len(th.items) + item := th.items[n-1] + th.items[n-1] = nil // avoid memory leak + th.items = th.items[0 : n-1] + delete(th.lookup, item.id) + return item +} + +func (th *internalTxHeap) Get(id ids.ID) (*txEntry, bool) { + entry, ok := th.lookup[id] + if !ok { + return nil, false + } + return entry, true +} + +func (th *internalTxHeap) Has(id ids.ID) bool { + _, has := th.Get(id) + return has +} + +type txHeap struct { + maxHeap *internalTxHeap + minHeap *internalTxHeap +} + +func newTxHeap(maxSize int) *txHeap { + return &txHeap{ + maxHeap: newInternalTxHeap(maxSize, false), + minHeap: newInternalTxHeap(maxSize, true), + } +} + +func (th *txHeap) Push(tx *Tx, gasPrice uint64) { + txID := tx.ID() + oldLen := th.Len() + heap.Push(th.maxHeap, &txEntry{ + id: txID, + gasPrice: gasPrice, + tx: tx, + index: oldLen, + }) + heap.Push(th.minHeap, &txEntry{ + id: txID, + gasPrice: gasPrice, + tx: tx, + index: oldLen, + }) +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PeekMax() (*Tx, uint64) { + txEntry := th.maxHeap.items[0] + return txEntry.tx, txEntry.gasPrice +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PeekMin() (*Tx, uint64) { + txEntry := th.minHeap.items[0] + return txEntry.tx, txEntry.gasPrice +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PopMax() *Tx { + return th.Remove(th.maxHeap.items[0].id) +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PopMin() *Tx { + return th.Remove(th.minHeap.items[0].id) +} + +func (th *txHeap) Remove(id ids.ID) *Tx { + maxEntry, ok := th.maxHeap.Get(id) + if !ok { + return nil + } + heap.Remove(th.maxHeap, maxEntry.index) + + minEntry, ok := th.minHeap.Get(id) + if !ok { + // This should never happen, as that would mean the heaps are out of + // sync. + return nil + } + return heap.Remove(th.minHeap, minEntry.index).(*txEntry).tx +} + +func (th *txHeap) Len() int { + return th.maxHeap.Len() +} + +func (th *txHeap) Get(id ids.ID) (*Tx, bool) { + txEntry, ok := th.maxHeap.Get(id) + if !ok { + return nil, false + } + return txEntry.tx, true +} + +func (th *txHeap) Has(id ids.ID) bool { + return th.maxHeap.Has(id) +} diff --git a/plugin/evm/atomic/tx_heap_test.go b/plugin/evm/atomic/tx_heap_test.go new file mode 100644 index 0000000000..c9f602ccea --- /dev/null +++ b/plugin/evm/atomic/tx_heap_test.go @@ -0,0 +1,142 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTxHeap(t *testing.T) { + var ( + tx0 = &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{ + NetworkID: 0, + }, + } + tx0Bytes = []byte{0} + + tx1 = &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{ + NetworkID: 1, + }, + } + tx1Bytes = []byte{1} + + tx2 = &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{ + NetworkID: 2, + }, + } + tx2Bytes = []byte{2} + ) + tx0.Initialize(tx0Bytes, tx0Bytes) + tx1.Initialize(tx1Bytes, tx1Bytes) + tx2.Initialize(tx2Bytes, tx2Bytes) + + id0 := tx0.ID() + id1 := tx1.ID() + id2 := tx2.ID() + + t.Run("add/remove single entry", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + assert := assert.New(t) + h.Push(tx0, 5) + assert.True(h.Has(id0)) + gTx0, gHas0 := h.Get(id0) + assert.Equal(tx0, gTx0) + assert.True(gHas0) + h.Remove(id0) + assert.False(h.Has(id0)) + assert.Zero(h.Len()) + h.Push(tx0, 5) + assert.True(h.Has(id0)) + assert.Equal(1, h.Len()) + }) + + t.Run("add other items", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + assert := assert.New(t) + h.Push(tx1, 10) + assert.True(h.Has(id1)) + gTx1, gHas1 := h.Get(id1) + assert.Equal(tx1, gTx1) + assert.True(gHas1) + + h.Push(tx2, 2) + assert.True(h.Has(id2)) + gTx2, gHas2 := h.Get(id2) + assert.Equal(tx2, gTx2) + assert.True(gHas2) + + assert.Equal(id1, h.PopMax().ID()) + assert.Equal(id2, h.PopMax().ID()) + + assert.False(h.Has(id0)) + gTx0, gHas0 := h.Get(id0) + assert.Nil(gTx0) + assert.False(gHas0) + + assert.False(h.Has(id1)) + gTx1, gHas1 = h.Get(id1) + assert.Nil(gTx1) + assert.False(gHas1) + + assert.False(h.Has(id2)) + gTx2, gHas2 = h.Get(id2) + assert.Nil(gTx2) + assert.False(gHas2) + }) + + verifyRemovalOrder := func(t *testing.T, h *txHeap) { + t.Helper() + + assert := assert.New(t) + assert.Equal(id2, h.PopMin().ID()) + assert.True(h.Has(id0)) + assert.True(h.Has(id1)) + assert.False(h.Has(id2)) + assert.Equal(id0, h.PopMin().ID()) + assert.False(h.Has(id0)) + assert.True(h.Has(id1)) + assert.False(h.Has(id2)) + assert.Equal(id1, h.PopMin().ID()) + assert.False(h.Has(id0)) + assert.False(h.Has(id1)) + assert.False(h.Has(id2)) + } + + t.Run("drop", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + h.Push(tx0, 5) + h.Push(tx1, 10) + h.Push(tx2, 2) + verifyRemovalOrder(t, h) + }) + t.Run("drop (alt order)", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + h.Push(tx0, 5) + h.Push(tx2, 2) + h.Push(tx1, 10) + verifyRemovalOrder(t, h) + }) + t.Run("drop (alt order 2)", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + h.Push(tx2, 2) + h.Push(tx0, 5) + h.Push(tx1, 10) + verifyRemovalOrder(t, h) + }) +} diff --git a/plugin/evm/atomic_backend.go b/plugin/evm/atomic_backend.go index bbdec521a3..ecbfb6c2b6 100644 --- a/plugin/evm/atomic_backend.go +++ b/plugin/evm/atomic_backend.go @@ -8,13 +8,15 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/log" @@ -32,7 +34,7 @@ type AtomicBackend interface { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. - InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) + InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) // Returns an AtomicState corresponding to a block hash that has been inserted // but not Accepted or Rejected yet. @@ -73,7 +75,7 @@ type atomicBackend struct { bonusBlocks map[uint64]ids.ID // Map of height to blockID for blocks to skip indexing db *versiondb.Database // Underlying database metadataDB database.Database // Underlying database containing the atomic trie metadata - sharedMemory atomic.SharedMemory + sharedMemory avalancheatomic.SharedMemory repo AtomicTxRepository atomicTrie AtomicTrie @@ -84,7 +86,7 @@ type atomicBackend struct { // NewAtomicBackend creates an AtomicBackend from the specified dependencies func NewAtomicBackend( - db *versiondb.Database, sharedMemory atomic.SharedMemory, + db *versiondb.Database, sharedMemory avalancheatomic.SharedMemory, bonusBlocks map[uint64]ids.ID, repo AtomicTxRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, ) (AtomicBackend, error) { @@ -150,7 +152,7 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { // iterate over the transactions, indexing them if the height is < commit height // otherwise, add the atomic operations from the transaction to the uncommittedOpsMap height = binary.BigEndian.Uint64(iter.Key()) - txs, err := ExtractAtomicTxs(iter.Value(), true, a.codec) + txs, err := atomic.ExtractAtomicTxs(iter.Value(), true, a.codec) if err != nil { return err } @@ -266,7 +268,7 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { it.Next() } - batchOps := make(map[ids.ID]*atomic.Requests) + batchOps := make(map[ids.ID]*avalancheatomic.Requests) for it.Next() { height := it.BlockNumber() if height > lastAcceptedBlock { @@ -318,7 +320,7 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { lastHeight = height lastBlockchainID = blockchainID putRequests, removeRequests = 0, 0 - batchOps = make(map[ids.ID]*atomic.Requests) + batchOps = make(map[ids.ID]*avalancheatomic.Requests) } } if err := it.Error(); err != nil { @@ -395,7 +397,7 @@ func (a *atomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. -func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) { +func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) { // access the atomic trie at the parent block parentRoot, err := a.getAtomicRootAt(parentHash) if err != nil { @@ -455,3 +457,36 @@ func (a *atomicBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool func (a *atomicBackend) AtomicTrie() AtomicTrie { return a.atomicTrie } + +// mergeAtomicOps merges atomic requests represented by [txs] +// to the [output] map, depending on whether [chainID] is present in the map. +func mergeAtomicOps(txs []*atomic.Tx) (map[ids.ID]*avalancheatomic.Requests, error) { + if len(txs) > 1 { + // txs should be stored in order of txID to ensure consistency + // with txs initialized from the txID index. + copyTxs := make([]*atomic.Tx, len(txs)) + copy(copyTxs, txs) + utils.Sort(copyTxs) + txs = copyTxs + } + output := make(map[ids.ID]*avalancheatomic.Requests) + for _, tx := range txs { + chainID, txRequests, err := tx.UnsignedAtomicTx.AtomicOps() + if err != nil { + return nil, err + } + mergeAtomicOpsToMap(output, chainID, txRequests) + } + return output, nil +} + +// mergeAtomicOps merges atomic ops for [chainID] represented by [requests] +// to the [output] map provided. +func mergeAtomicOpsToMap(output map[ids.ID]*avalancheatomic.Requests, chainID ids.ID, requests *avalancheatomic.Requests) { + if request, exists := output[chainID]; exists { + request.PutRequests = append(request.PutRequests, requests.PutRequests...) + request.RemoveRequests = append(request.RemoveRequests, requests.RemoveRequests...) + } else { + output[chainID] = requests + } +} diff --git a/plugin/evm/atomic_state.go b/plugin/evm/atomic_state.go index d7099f95f2..7647a4cbab 100644 --- a/plugin/evm/atomic_state.go +++ b/plugin/evm/atomic_state.go @@ -6,9 +6,10 @@ package evm import ( "fmt" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/log" ) @@ -25,7 +26,7 @@ type AtomicState interface { Root() common.Hash // Accept applies the state change to VM's persistent storage // Changes are persisted atomically along with the provided [commitBatch]. - Accept(commitBatch database.Batch, requests map[ids.ID]*atomic.Requests) error + Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error // Reject frees memory associated with the state change. Reject() error } @@ -36,8 +37,8 @@ type atomicState struct { backend *atomicBackend blockHash common.Hash blockHeight uint64 - txs []*Tx - atomicOps map[ids.ID]*atomic.Requests + txs []*atomic.Tx + atomicOps map[ids.ID]*avalancheatomic.Requests atomicRoot common.Hash } @@ -46,7 +47,7 @@ func (a *atomicState) Root() common.Hash { } // Accept applies the state change to VM's persistent storage. -func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*atomic.Requests) error { +func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error { // Add the new requests to the batch to be accepted for chainID, requests := range requests { mergeAtomicOpsToMap(a.atomicOps, chainID, requests) @@ -83,7 +84,7 @@ func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*at // to shared memory. if a.backend.IsBonus(a.blockHeight, a.blockHash) { log.Info("skipping atomic tx acceptance on bonus block", "block", a.blockHash) - return atomic.WriteAll(commitBatch, atomicChangesBatch) + return avalancheatomic.WriteAll(commitBatch, atomicChangesBatch) } // Otherwise, atomically commit pending changes in the version db with diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic_syncer_test.go index ed6ba46466..a3562c91ab 100644 --- a/plugin/evm/atomic_syncer_test.go +++ b/plugin/evm/atomic_syncer_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/handlers" @@ -64,7 +65,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight // next trie. for i, checkpoint := range checkpoints { // Create syncer targeting the current [syncTrie]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) + syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } @@ -91,7 +92,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight } // Create syncer targeting the current [targetRoot]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) + syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go index ec86498904..7966c79b72 100644 --- a/plugin/evm/atomic_trie.go +++ b/plugin/evm/atomic_trie.go @@ -7,9 +7,9 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/database" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -17,6 +17,8 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/ethdb" @@ -52,7 +54,7 @@ type AtomicTrie interface { OpenTrie(hash common.Hash) (*trie.Trie, error) // UpdateTrie updates [tr] to inlude atomicOps for height. - UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error + UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error // Iterator returns an AtomicTrieIterator to iterate the trie at the given // root hash starting at [cursor]. @@ -108,7 +110,7 @@ type AtomicTrieIterator interface { // AtomicOps returns a map of blockchainIDs to the set of atomic requests // for that blockchainID at the current block number - AtomicOps() *atomic.Requests + AtomicOps() *avalancheatomic.Requests // Error returns error, if any encountered during this iteration Error() error @@ -116,12 +118,12 @@ type AtomicTrieIterator interface { // atomicTrie implements the AtomicTrie interface type atomicTrie struct { - commitInterval uint64 // commit interval, same as commitHeightInterval by default - metadataDB database.Database // Underlying database containing the atomic trie metadata - trieDB *triedb.Database // Trie database - lastCommittedRoot common.Hash // trie root of the most recent commit - lastCommittedHeight uint64 // index height of the most recent commit - lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. + commitInterval uint64 // commit interval, same as commitHeightInterval by default + metadataDB avalanchedatabase.Database // Underlying database containing the atomic trie metadata + trieDB *triedb.Database // Trie database + lastCommittedRoot common.Hash // trie root of the most recent commit + lastCommittedHeight uint64 // index height of the most recent commit + lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. codec codec.Manager memoryCap common.StorageSize tipBuffer *core.BoundedBuffer[common.Hash] @@ -130,7 +132,7 @@ type atomicTrie struct { // newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. // Initializes the trie before returning it. func newAtomicTrie( - atomicTrieDB database.Database, metadataDB database.Database, + atomicTrieDB avalanchedatabase.Database, metadataDB avalanchedatabase.Database, codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, ) (*atomicTrie, error) { root, height, err := lastCommittedRootIfExists(metadataDB) @@ -152,7 +154,7 @@ func newAtomicTrie( } trieDB := triedb.NewDatabase( - rawdb.NewDatabase(Database{atomicTrieDB}), + rawdb.NewDatabase(database.WrapDatabase(atomicTrieDB)), &triedb.Config{ DBOverride: hashdb.Config{ CleanCacheSize: 64 * units.MiB, // Allocate 64MB of memory for clean cache @@ -181,17 +183,17 @@ func newAtomicTrie( // else returns empty common.Hash{} and 0 // returns error only if there are issues with the underlying data store // or if values present in the database are not as expected -func lastCommittedRootIfExists(db database.Database) (common.Hash, uint64, error) { +func lastCommittedRootIfExists(db avalanchedatabase.Database) (common.Hash, uint64, error) { // read the last committed entry if it exists and set the root hash lastCommittedHeightBytes, err := db.Get(lastCommittedKey) switch { - case err == database.ErrNotFound: + case err == avalanchedatabase.ErrNotFound: return common.Hash{}, 0, nil case err != nil: return common.Hash{}, 0, err } - height, err := database.ParseUInt64(lastCommittedHeightBytes) + height, err := avalanchedatabase.ParseUInt64(lastCommittedHeightBytes) if err != nil { return common.Hash{}, 0, fmt.Errorf("expected value at lastCommittedKey to be a valid uint64: %w", err) } @@ -221,9 +223,9 @@ func (a *atomicTrie) commit(height uint64, root common.Hash) error { return a.updateLastCommitted(root, height) } -func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error { +func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { for blockchainID, requests := range atomicOps { - valueBytes, err := a.codec.Marshal(codecVersion, requests) + valueBytes, err := a.codec.Marshal(atomic.CodecVersion, requests) if err != nil { // highly unlikely but possible if atomic.Element // has a change that is unsupported by the codec @@ -250,7 +252,7 @@ func (a *atomicTrie) LastCommitted() (common.Hash, uint64) { // updateLastCommitted adds [height] -> [root] to the index and marks it as the last committed // root/height pair. func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error { - heightBytes := database.PackUInt64(height) + heightBytes := avalanchedatabase.PackUInt64(height) // now save the trie hash against the height it was committed at if err := a.metadataDB.Put(heightBytes, root[:]); err != nil { @@ -296,7 +298,7 @@ func (a *atomicTrie) Root(height uint64) (common.Hash, error) { // getRoot is a helper function to return the committed atomic trie root hash at [height] // from [metadataDB]. -func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { +func getRoot(metadataDB avalanchedatabase.Database, height uint64) (common.Hash, error) { if height == 0 { // if root is queried at height == 0, return the empty root hash // this may occur if peers ask for the most recent state summary @@ -304,10 +306,10 @@ func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { return types.EmptyRootHash, nil } - heightBytes := database.PackUInt64(height) + heightBytes := avalanchedatabase.PackUInt64(height) hash, err := metadataDB.Get(heightBytes) switch { - case err == database.ErrNotFound: + case err == avalanchedatabase.ErrNotFound: return common.Hash{}, nil case err != nil: return common.Hash{}, err diff --git a/plugin/evm/atomic_trie_iterator_test.go b/plugin/evm/atomic_trie_iterator_test.go index 6c6b9778b9..8da2e03127 100644 --- a/plugin/evm/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic_trie_iterator_test.go @@ -6,32 +6,33 @@ package evm import ( "testing" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func testSharedMemory() atomic.SharedMemory { - m := atomic.NewMemory(memdb.New()) +func testSharedMemory() avalancheatomic.SharedMemory { + m := avalancheatomic.NewMemory(memdb.New()) return m.NewSharedMemory(testCChainID) } func TestIteratorCanIterate(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) // create state with multiple transactions // since each test transaction generates random ID for blockchainID we should get // multiple blockchain IDs per block in the overall combined atomic operation map - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) // create an atomic trie @@ -64,14 +65,14 @@ func TestIteratorHandlesInvalidData(t *testing.T) { require := require.New(t) lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) require.NoError(err) // create state with multiple transactions // since each test transaction generates random ID for blockchainID we should get // multiple blockchain IDs per block in the overall combined atomic operation map - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) // create an atomic trie diff --git a/plugin/evm/atomic_trie_test.go b/plugin/evm/atomic_trie_test.go index b5705f65b9..071cbfd5c8 100644 --- a/plugin/evm/atomic_trie_test.go +++ b/plugin/evm/atomic_trie_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" @@ -19,24 +19,25 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" ) const testCommitInterval = 100 -func (tx *Tx) mustAtomicOps() map[ids.ID]*atomic.Requests { +func mustAtomicOps(tx *atomic.Tx) map[ids.ID]*avalancheatomic.Requests { id, reqs, err := tx.AtomicOps() if err != nil { panic(err) } - return map[ids.ID]*atomic.Requests{id: reqs} + return map[ids.ID]*avalancheatomic.Requests{id: reqs} } // indexAtomicTxs updates [tr] with entries in [atomicOps] at height by creating // a new snapshot, calculating a new root, and calling InsertTrie followed // by AcceptTrie on the new root. -func indexAtomicTxs(tr AtomicTrie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error { +func indexAtomicTxs(tr AtomicTrie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { snapshot, err := tr.OpenTrie(tr.LastAcceptedRoot()) if err != nil { return err @@ -138,12 +139,12 @@ func TestAtomicTrieInitialize(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) if err != nil { t.Fatal(err) } - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, test.lastAcceptedHeight+1, test.numTxsPerBlock, nil, operationsMap) // Construct the atomic trie for the first time @@ -187,7 +188,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // during the initialization phase will cause an invalid root when indexing continues. nextCommitHeight := nearestCommitHeight(test.lastAcceptedHeight+test.commitInterval, test.commitInterval) for i := test.lastAcceptedHeight + 1; i <= nextCommitHeight; i++ { - txs := newTestTxs(test.numTxsPerBlock(i)) + txs := atomic.NewTestTxs(test.numTxsPerBlock(i)) if err := repo.Write(i, txs); err != nil { t.Fatal(err) } @@ -227,10 +228,10 @@ func TestAtomicTrieInitialize(t *testing.T) { func TestIndexerInitializesOnlyOnce(t *testing.T) { lastAcceptedHeight := uint64(25) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository @@ -246,7 +247,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { // re-initialize the atomic trie since initialize is not supposed to run again the height // at the trie should still be the old height with the old commit hash without any changes. // This scenario is not realistic, but is used to test potential double initialization behavior. - err = repo.Write(15, []*Tx{testDataExportTx()}) + err = repo.Write(15, []*atomic.Tx{atomic.GenerateTestExportTx()}) assert.NoError(t, err) // Re-initialize the atomic trie @@ -261,7 +262,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { func newTestAtomicTrie(t *testing.T) AtomicTrie { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec(), 0) + repo, err := NewAtomicTxRepository(db, atomic.TestTxCodec, 0) if err != nil { t.Fatal(err) } @@ -281,7 +282,7 @@ func TestIndexerWriteAndRead(t *testing.T) { // process 305 blocks so that we get three commits (100, 200, 300) for height := uint64(1); height <= testCommitInterval*3+5; /*=305*/ height++ { - atomicRequests := testDataImportTx().mustAtomicOps() + atomicRequests := mustAtomicOps(atomic.GenerateTestImportTx()) err := indexAtomicTxs(atomicTrie, height, atomicRequests) assert.NoError(t, err) if height%testCommitInterval == 0 { @@ -312,11 +313,11 @@ func TestAtomicOpsAreNotTxOrderDependent(t *testing.T) { atomicTrie2 := newTestAtomicTrie(t) for height := uint64(0); height <= testCommitInterval; /*=205*/ height++ { - tx1 := testDataImportTx() - tx2 := testDataImportTx() - atomicRequests1, err := mergeAtomicOps([]*Tx{tx1, tx2}) + tx1 := atomic.GenerateTestImportTx() + tx2 := atomic.GenerateTestImportTx() + atomicRequests1, err := mergeAtomicOps([]*atomic.Tx{tx1, tx2}) assert.NoError(t, err) - atomicRequests2, err := mergeAtomicOps([]*Tx{tx2, tx1}) + atomicRequests2, err := mergeAtomicOps([]*atomic.Tx{tx2, tx1}) assert.NoError(t, err) err = indexAtomicTxs(atomicTrie1, height, atomicRequests1) @@ -338,12 +339,12 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { commitInterval := uint64(10) expectedCommitHeight := uint64(100) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) if err != nil { t.Fatal(err) } - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight, constTxsPerHeight(numTxsPerBlock), nil, operationsMap) bonusBlocks := map[uint64]ids.ID{ @@ -368,9 +369,9 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { func TestIndexingNilShouldNotImpactTrie(t *testing.T) { // operations to index - ops := make([]map[ids.ID]*atomic.Requests, 0) + ops := make([]map[ids.ID]*avalancheatomic.Requests, 0) for i := 0; i <= testCommitInterval; i++ { - ops = append(ops, testDataImportTx().mustAtomicOps()) + ops = append(ops, mustAtomicOps(atomic.GenerateTestImportTx())) } // without nils @@ -411,19 +412,19 @@ func TestIndexingNilShouldNotImpactTrie(t *testing.T) { } type sharedMemories struct { - thisChain atomic.SharedMemory - peerChain atomic.SharedMemory + thisChain avalancheatomic.SharedMemory + peerChain avalancheatomic.SharedMemory thisChainID ids.ID peerChainID ids.ID } -func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.Requests) error { +func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*avalancheatomic.Requests) error { for _, reqs := range ops { - puts := make(map[ids.ID]*atomic.Requests) - puts[s.thisChainID] = &atomic.Requests{} + puts := make(map[ids.ID]*avalancheatomic.Requests) + puts[s.thisChainID] = &avalancheatomic.Requests{} for _, key := range reqs.RemoveRequests { val := []byte{0x1} - puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &atomic.Element{Key: key, Value: val}) + puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &avalancheatomic.Element{Key: key, Value: val}) } if err := s.peerChain.Apply(puts); err != nil { return err @@ -432,7 +433,7 @@ func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.R return nil } -func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { +func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { t.Helper() for _, reqs := range ops { // should be able to get put requests @@ -452,7 +453,7 @@ func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.R } } -func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { +func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { t.Helper() for _, reqs := range ops { // should not be able to get put requests @@ -470,7 +471,7 @@ func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomi } } -func newSharedMemories(atomicMemory *atomic.Memory, thisChainID, peerChainID ids.ID) *sharedMemories { +func newSharedMemories(atomicMemory *avalancheatomic.Memory, thisChainID, peerChainID ids.ID) *sharedMemories { return &sharedMemories{ thisChain: atomicMemory.NewSharedMemory(thisChainID), peerChain: atomicMemory.NewSharedMemory(peerChainID), @@ -510,9 +511,9 @@ func TestApplyToSharedMemory(t *testing.T) { commitInterval: 10, lastAcceptedHeight: 25, setMarker: func(a *atomicBackend) error { - cursor := make([]byte, wrappers.LongLen+len(blockChainID[:])) + cursor := make([]byte, wrappers.LongLen+len(atomic.TestBlockchainID[:])) binary.BigEndian.PutUint64(cursor, 10) - copy(cursor[wrappers.LongLen:], blockChainID[:]) + copy(cursor[wrappers.LongLen:], atomic.TestBlockchainID[:]) return a.metadataDB.Put(appliedSharedMemoryCursorKey, cursor) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, @@ -526,15 +527,15 @@ func TestApplyToSharedMemory(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) assert.NoError(t, err) - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, test.lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository - m := atomic.NewMemory(db) - sharedMemories := newSharedMemories(m, testCChainID, blockChainID) + m := avalancheatomic.NewMemory(db) + sharedMemories := newSharedMemories(m, testCChainID, atomic.TestBlockchainID) backend, err := NewAtomicBackend(db, sharedMemories.thisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) atomicTrie := backend.AtomicTrie().(*atomicTrie) @@ -592,9 +593,9 @@ func TestApplyToSharedMemory(t *testing.T) { func BenchmarkAtomicTrieInit(b *testing.B) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) lastAcceptedHeight := uint64(25000) // add 25000 * 3 = 75000 transactions @@ -627,9 +628,9 @@ func BenchmarkAtomicTrieInit(b *testing.B) { func BenchmarkAtomicTrieIterate(b *testing.B) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) lastAcceptedHeight := uint64(25_000) // add 25000 * 3 = 75000 transactions @@ -706,7 +707,7 @@ func BenchmarkApplyToSharedMemory(b *testing.B) { func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks uint64) { db := versiondb.New(disk) - codec := testTxCodec() + codec := atomic.TestTxCodec sharedMemory := testSharedMemory() lastAcceptedHeight := blocks @@ -719,7 +720,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u } trie := backend.AtomicTrie() for height := uint64(1); height <= lastAcceptedHeight; height++ { - txs := newTestTxs(constTxsPerHeight(3)(height)) + txs := atomic.NewTestTxs(constTxsPerHeight(3)(height)) ops, err := mergeAtomicOps(txs) assert.NoError(b, err) assert.NoError(b, indexAtomicTxs(trie, height, ops)) diff --git a/plugin/evm/atomic_tx_repository.go b/plugin/evm/atomic_tx_repository.go index 588b9adcc1..08c4c4d5c4 100644 --- a/plugin/evm/atomic_tx_repository.go +++ b/plugin/evm/atomic_tx_repository.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) const ( @@ -39,10 +40,10 @@ var ( // atomic transactions type AtomicTxRepository interface { GetIndexHeight() (uint64, error) - GetByTxID(txID ids.ID) (*Tx, uint64, error) - GetByHeight(height uint64) ([]*Tx, error) - Write(height uint64, txs []*Tx) error - WriteBonus(height uint64, txs []*Tx) error + GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) + GetByHeight(height uint64) ([]*atomic.Tx, error) + Write(height uint64, txs []*atomic.Tx) error + WriteBonus(height uint64, txs []*atomic.Tx) error IterateByHeight(start uint64) database.Iterator Codec() codec.Manager @@ -136,7 +137,7 @@ func (a *atomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) er // Get the tx iter is pointing to, len(txs) == 1 is expected here. txBytes := iterValue[wrappers.LongLen+wrappers.IntLen:] - tx, err := ExtractAtomicTx(txBytes, a.codec) + tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) if err != nil { return err } @@ -198,10 +199,10 @@ func (a *atomicTxRepository) GetIndexHeight() (uint64, error) { return indexHeight, nil } -// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*Tx] object +// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*atomic.Tx] object // if an entry is found, and returns it with the block height the atomic tx it // represents was accepted on, along with an optional error. -func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { +func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { indexedTxBytes, err := a.acceptedAtomicTxDB.Get(txID[:]) if err != nil { return nil, 0, err @@ -215,7 +216,7 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { packer := wrappers.Packer{Bytes: indexedTxBytes} height := packer.UnpackLong() txBytes := packer.UnpackBytes() - tx, err := ExtractAtomicTx(txBytes, a.codec) + tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) if err != nil { return nil, 0, err } @@ -229,40 +230,40 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { // no atomic transactions in the block accepted at [height]. // If [height] is greater than the last accepted height, then this will always return // [database.ErrNotFound] -func (a *atomicTxRepository) GetByHeight(height uint64) ([]*Tx, error) { +func (a *atomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.getByHeightBytes(heightBytes) } -func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*Tx, error) { +func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { txsBytes, err := a.acceptedAtomicTxByHeightDB.Get(heightBytes) if err != nil { return nil, err } - return ExtractAtomicTxsBatch(txsBytes, a.codec) + return atomic.ExtractAtomicTxsBatch(txsBytes, a.codec) } // Write updates indexes maintained on atomic txs, so they can be queried // by txID or height. This method must be called only once per height, // and [txs] must include all atomic txs for the block accepted at the // corresponding height. -func (a *atomicTxRepository) Write(height uint64, txs []*Tx) error { +func (a *atomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, false) } // WriteBonus is similar to Write, except the [txID] => [height] is not // overwritten if already exists. -func (a *atomicTxRepository) WriteBonus(height uint64, txs []*Tx) error { +func (a *atomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, true) } -func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { +func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*Tx, len(txs)) + copyTxs := make([]*atomic.Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs @@ -300,8 +301,8 @@ func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { // indexTxByID writes [tx] into the [acceptedAtomicTxDB] stored as // [height] + [tx bytes] -func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { - txBytes, err := a.codec.Marshal(codecVersion, tx) +func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { + txBytes, err := a.codec.Marshal(atomic.CodecVersion, tx) if err != nil { return err } @@ -320,8 +321,8 @@ func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { } // indexTxsAtHeight adds [height] -> [txs] to the [acceptedAtomicTxByHeightDB] -func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) error { - txsBytes, err := a.codec.Marshal(codecVersion, txs) +func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { + txsBytes, err := a.codec.Marshal(atomic.CodecVersion, txs) if err != nil { return err } @@ -335,7 +336,7 @@ func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) err // [tx] to the slice of transactions stored there. // This function is used while initializing the atomic repository to re-index the atomic transactions // by txID into the height -> txs index. -func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *Tx) error { +func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { txs, err := a.getByHeightBytes(heightBytes) if err != nil && err != database.ErrNotFound { return err diff --git a/plugin/evm/atomic_tx_repository_test.go b/plugin/evm/atomic_tx_repository_test.go index 272ce50141..9c5616004b 100644 --- a/plugin/evm/atomic_tx_repository_test.go +++ b/plugin/evm/atomic_tx_repository_test.go @@ -7,11 +7,12 @@ import ( "encoding/binary" "testing" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" "github.com/ava-labs/avalanchego/codec" @@ -27,13 +28,13 @@ import ( // addTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) directly to [acceptedAtomicTxDB], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*atomic.Requests) { +func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { for height := fromHeight; height < toHeight; height++ { - txs := make([]*Tx, 0, txsPerHeight) + txs := make([]*atomic.Tx, 0, txsPerHeight) for i := 0; i < txsPerHeight; i++ { - tx := newTestTx() + tx := atomic.NewTestTx() txs = append(txs, tx) - txBytes, err := codec.Marshal(codecVersion, tx) + txBytes, err := codec.Marshal(atomic.CodecVersion, tx) assert.NoError(t, err) // Write atomic transactions to the [acceptedAtomicTxDB] @@ -70,10 +71,10 @@ func constTxsPerHeight(txCount int) func(uint64) int { // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight uint64, - txsPerHeight func(height uint64) int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*atomic.Requests, + txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { - txs := newTestTxs(txsPerHeight(height)) + txs := atomic.NewTestTxs(txsPerHeight(height)) if err := repo.Write(height, txs); err != nil { t.Fatal(err) } @@ -95,7 +96,7 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { +func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) @@ -115,7 +116,7 @@ func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { // verifyOperations creates an iterator over the atomicTrie at [rootHash] and verifies that the all of the operations in the trie in the interval [from, to] are identical to // the atomic operations contained in [operationsMap] on the same interval. -func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*atomic.Requests) { +func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { t.Helper() // Start the iterator at [from] @@ -182,12 +183,12 @@ func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(1), txMap, nil) verifyTxs(t, repo, txMap) @@ -195,12 +196,12 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(10), txMap, nil) verifyTxs(t, repo, txMap) @@ -208,10 +209,10 @@ func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { func TestAtomicRepositoryPreAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) if err := db.Commit(); err != nil { t.Fatal(err) @@ -233,10 +234,10 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { func TestAtomicRepositoryPostAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) addTxs(t, codec, acceptedAtomicTxDB, 100, 200, 10, txMap, nil) if err := db.Commit(); err != nil { @@ -258,10 +259,10 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeight int) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) addTxs(b, codec, acceptedAtomicTxDB, 0, maxHeight, txsPerHeight, txMap, nil) if err := db.Commit(); err != nil { diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 9003b7b7f3..bcc7e7e995 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/params/extras" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ava-labs/coreth/predicate" @@ -32,9 +33,7 @@ var ( _ block.WithVerifyContext = (*Block)(nil) ) -var ( - errMissingUTXOs = errors.New("missing UTXOs") -) +var errMissingUTXOs = errors.New("missing UTXOs") // readMainnetBonusBlocks returns maps of bonus block numbers to block IDs. // Note bonus blocks are indexed in the atomic trie. @@ -115,13 +114,13 @@ type Block struct { id ids.ID ethBlock *types.Block vm *VM - atomicTxs []*Tx + atomicTxs []*atomic.Tx } // newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { isApricotPhase5 := vm.chainConfigExtra().IsApricotPhase5(ethBlock.Time()) - atomicTxs, err := ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, vm.codec) + atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) if err != nil { return nil, err } @@ -137,24 +136,23 @@ func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { // ID implements the snowman.Block interface func (b *Block) ID() ids.ID { return b.id } +func (b *Block) AtomicTxs() []*atomic.Tx { return b.atomicTxs } + // Accept implements the snowman.Block interface func (b *Block) Accept(context.Context) error { vm := b.vm // Although returning an error from Accept is considered fatal, it is good // practice to cleanup the batch we were modifying in the case of an error. - defer vm.db.Abort() + defer vm.versiondb.Abort() log.Debug(fmt.Sprintf("Accepting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) // Call Accept for relevant precompile logs. Note we do this prior to // calling Accept on the blockChain so any side effects (eg warp signatures) - // take place before the accepted log is emitted to subscribers. Use of the - // sharedMemoryWriter ensures shared memory requests generated by - // precompiles are committed atomically with the vm's lastAcceptedKey. + // take place before the accepted log is emitted to subscribers. rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), params.IsMergeTODO, b.ethBlock.Timestamp()) - sharedMemoryWriter := NewSharedMemoryWriter() - if err := b.handlePrecompileAccept(*params.GetRulesExtra(rules), sharedMemoryWriter); err != nil { + if err := b.handlePrecompileAccept(*params.GetRulesExtra(rules)); err != nil { return err } if err := vm.blockChain.Accept(b.ethBlock); err != nil { @@ -178,23 +176,20 @@ func (b *Block) Accept(context.Context) error { return err } // Get pending operations on the vm's versionDB so we can apply them atomically - // with the shared memory requests. - vdbBatch, err := b.vm.db.CommitBatch() + // with the shared memory changes. + vdbBatch, err := b.vm.versiondb.CommitBatch() if err != nil { return fmt.Errorf("could not create commit batch processing block[%s]: %w", b.ID(), err) } - // Apply any shared memory requests that accumulated from processing the logs - // of the accepted block (generated by precompiles) atomically with other pending - // changes to the vm's versionDB. - return atomicState.Accept(vdbBatch, sharedMemoryWriter.requests) + // Apply any shared memory changes atomically with other pending changes to + // the vm's versionDB. + return atomicState.Accept(vdbBatch, nil) } // handlePrecompileAccept calls Accept on any logs generated with an active precompile address that implements // contract.Accepter -// This function assumes that the Accept function will ONLY operate on state maintained in the VM's versiondb. -// This ensures that any DB operations are performed atomically with marking the block as accepted. -func (b *Block) handlePrecompileAccept(rules extras.Rules, sharedMemoryWriter *sharedMemoryWriter) error { +func (b *Block) handlePrecompileAccept(rules extras.Rules) error { // Short circuit early if there are no precompile accepters to execute if len(rules.AccepterPrecompiles) == 0 { return nil @@ -208,9 +203,8 @@ func (b *Block) handlePrecompileAccept(rules extras.Rules, sharedMemoryWriter *s return fmt.Errorf("failed to fetch receipts for accepted block with non-empty root hash (%s) (Block: %s, Height: %d)", b.ethBlock.ReceiptHash(), b.ethBlock.Hash(), b.ethBlock.NumberU64()) } acceptCtx := &precompileconfig.AcceptContext{ - SnowCtx: b.vm.ctx, - SharedMemory: sharedMemoryWriter, - Warp: b.vm.warpBackend, + SnowCtx: b.vm.ctx, + Warp: b.vm.warpBackend, } for _, receipt := range receipts { for logIdx, log := range receipt.Logs { @@ -232,8 +226,9 @@ func (b *Block) handlePrecompileAccept(rules extras.Rules, sharedMemoryWriter *s func (b *Block) Reject(context.Context) error { log.Debug(fmt.Sprintf("Rejecting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) for _, tx := range b.atomicTxs { + // Re-issue the transaction in the mempool, continue even if it fails b.vm.mempool.RemoveTx(tx) - if err := b.vm.mempool.AddTx(tx); err != nil { + if err := b.vm.mempool.AddRemoteTx(tx); err != nil { log.Debug("Failed to re-issue transaction in rejected block", "txID", tx.ID(), "err", err) } } @@ -336,7 +331,7 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ // If the chain is still bootstrapping, we can assume that all blocks we are verifying have // been accepted by the network (so the predicate was validated by the network when the // block was originally verified). - if b.vm.bootstrapped { + if b.vm.bootstrapped.Get() { if err := b.verifyPredicates(predicateContext); err != nil { return fmt.Errorf("failed to verify predicates: %w", err) } @@ -408,7 +403,7 @@ func (b *Block) verifyUTXOsPresent() error { return nil } - if !b.vm.bootstrapped { + if !b.vm.bootstrapped.Get() { return nil } diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index ff8565b782..c621c7ff1b 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/snow" @@ -29,7 +30,7 @@ type blockBuilder struct { chainConfig *params.ChainConfig txPool *txpool.TxPool - mempool *Mempool + mempool *atomic.Mempool shutdownChan <-chan struct{} shutdownWg *sync.WaitGroup diff --git a/plugin/evm/client.go b/plugin/evm/client/client.go similarity index 79% rename from plugin/evm/client.go rename to plugin/evm/client/client.go index f64a0686b6..cb7b9194db 100644 --- a/plugin/evm/client.go +++ b/plugin/evm/client/client.go @@ -1,13 +1,15 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package client import ( "context" + "errors" "fmt" "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/common/hexutil" "golang.org/x/exp/slog" "github.com/ava-labs/avalanchego/api" @@ -17,15 +19,19 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" ) // Interface compliance var _ Client = (*client)(nil) +var errInvalidAddr = errors.New("invalid hex address") + // Client interface for interacting with EVM [chain] type Client interface { IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) - GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (Status, error) + GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (atomic.Status, error) GetAtomicTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, sourceChain string, limit uint32, startAddress ids.ShortID, startUTXOID ids.ID, options ...rpc.Option) ([][]byte, ids.ShortID, ids.ID, error) ExportKey(ctx context.Context, userPass api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) @@ -38,7 +44,7 @@ type Client interface { MemoryProfile(ctx context.Context, options ...rpc.Option) error LockProfile(ctx context.Context, options ...rpc.Option) error SetLogLevel(ctx context.Context, level slog.Level, options ...rpc.Option) error - GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) + GetVMConfig(ctx context.Context, options ...rpc.Option) (*config.Config, error) } // Client implementation for interacting with EVM [chain] @@ -74,8 +80,14 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Opt return res.TxID, err } +// GetAtomicTxStatusReply defines the GetAtomicTxStatus replies returned from the API +type GetAtomicTxStatusReply struct { + Status atomic.Status `json:"status"` + BlockHeight *json.Uint64 `json:"blockHeight,omitempty"` +} + // GetAtomicTxStatus returns the status of [txID] -func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (Status, error) { +func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (atomic.Status, error) { res := &GetAtomicTxStatusReply{} err := c.requester.SendRequest(ctx, "avax.getAtomicTxStatus", &api.JSONTxID{ TxID: txID, @@ -131,6 +143,19 @@ func (c *client) GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, source return utxos, endAddr, endUTXOID, err } +// ExportKeyArgs are arguments for ExportKey +type ExportKeyArgs struct { + api.UserPass + Address string `json:"address"` +} + +// ExportKeyReply is the response for ExportKey +type ExportKeyReply struct { + // The decrypted PrivateKey for the Address provided in the arguments + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` + PrivateKeyHex string `json:"privateKeyHex"` +} + // ExportKey returns the private key corresponding to [addr] controlled by [user] // in both Avalanche standard format and hex format func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) { @@ -142,6 +167,12 @@ func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr common.A return res.PrivateKey, res.PrivateKeyHex, err } +// ImportKeyArgs are arguments for ImportKey +type ImportKeyArgs struct { + api.UserPass + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` +} + // ImportKey imports [privateKey] to [user] func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (common.Address, error) { res := &api.JSONAddress{} @@ -155,6 +186,20 @@ func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *s return ParseEthAddress(res.Address) } +// ImportArgs are arguments for passing into Import requests +type ImportArgs struct { + api.UserPass + + // Fee that should be used when creating the tx + BaseFee *hexutil.Big `json:"baseFee"` + + // Chain the funds are coming from + SourceChain string `json:"sourceChain"` + + // The address that will receive the imported funds + To common.Address `json:"to"` +} + // Import sends an import transaction to import funds from [sourceChain] and // returns the ID of the newly created transaction func (c *client) Import(ctx context.Context, user api.UserPass, to common.Address, sourceChain string, options ...rpc.Option) (ids.ID, error) { @@ -180,6 +225,32 @@ func (c *client) ExportAVAX( return c.Export(ctx, user, amount, to, targetChain, "AVAX", options...) } +// ExportAVAXArgs are the arguments to ExportAVAX +type ExportAVAXArgs struct { + api.UserPass + + // Fee that should be used when creating the tx + BaseFee *hexutil.Big `json:"baseFee"` + + // Amount of asset to send + Amount json.Uint64 `json:"amount"` + + // Chain the funds are going to. Optional. Used if To address does not + // include the chainID. + TargetChain string `json:"targetChain"` + + // ID of the address that will receive the AVAX. This address may include + // the chainID, which is used to determine what the destination chain is. + To string `json:"to"` +} + +// ExportArgs are the arguments to Export +type ExportArgs struct { + ExportAVAXArgs + // AssetID of the tokens + AssetID string `json:"assetID"` +} + // Export sends an asset from this chain to the P/C-Chain. // After this tx is accepted, the AVAX must be imported to the P/C-chain with an importTx. // Returns the ID of the newly created atomic transaction @@ -221,6 +292,10 @@ func (c *client) LockProfile(ctx context.Context, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.lockProfile", struct{}{}, &api.EmptyReply{}, options...) } +type SetLogLevelArgs struct { + Level string `json:"level"` +} + // SetLogLevel dynamically sets the log level for the C Chain func (c *client) SetLogLevel(ctx context.Context, level slog.Level, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.setLogLevel", &SetLogLevelArgs{ @@ -228,8 +303,12 @@ func (c *client) SetLogLevel(ctx context.Context, level slog.Level, options ...r }, &api.EmptyReply{}, options...) } +type ConfigReply struct { + Config *config.Config `json:"config"` +} + // GetVMConfig returns the current config of the VM -func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) { +func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*config.Config, error) { res := &ConfigReply{} err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res, options...) return res.Config, err diff --git a/plugin/evm/client_interface_test.go b/plugin/evm/client/client_interface_test.go similarity index 97% rename from plugin/evm/client_interface_test.go rename to plugin/evm/client/client_interface_test.go index d88c4926b4..332bb8bcf4 100644 --- a/plugin/evm/client_interface_test.go +++ b/plugin/evm/client/client_interface_test.go @@ -1,4 +1,4 @@ -package evm +package client import ( "reflect" diff --git a/plugin/evm/client/utils.go b/plugin/evm/client/utils.go new file mode 100644 index 0000000000..7d1e35e4ae --- /dev/null +++ b/plugin/evm/client/utils.go @@ -0,0 +1,13 @@ +// (c) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package client + +import "github.com/ava-labs/libevm/common" + +func ParseEthAddress(addrStr string) (common.Address, error) { + if !common.IsHexAddress(addrStr) { + return common.Address{}, errInvalidAddr + } + return common.HexToAddress(addrStr), nil +} diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 02034ffc8d..5ad18aa2f2 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -1,348 +1,21 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// (c) 2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package evm import ( - "encoding/json" - "fmt" - "time" - "github.com/ava-labs/coreth/core/txpool/legacypool" - "github.com/ava-labs/coreth/eth" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/common/hexutil" - "github.com/spf13/cast" -) - -const ( - defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay - defaultPruningEnabled = true - defaultCommitInterval = 4096 - defaultTrieCleanCache = 512 - defaultTrieDirtyCache = 512 - defaultTrieDirtyCommitTarget = 20 - defaultTriePrefetcherParallelism = 16 - defaultSnapshotCache = 256 - defaultSyncableCommitInterval = defaultCommitInterval * 4 - defaultSnapshotWait = false - defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit - defaultRpcTxFeeCap = 100 // 100 AVAX - defaultMetricsExpensiveEnabled = true - defaultApiMaxDuration = 0 // Default to no maximum API call duration - defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage - defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage - defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request - defaultContinuousProfilerFrequency = 15 * time.Minute - defaultContinuousProfilerMaxFiles = 5 - defaultPushGossipPercentStake = .9 - defaultPushGossipNumValidators = 100 - defaultPushGossipNumPeers = 0 - defaultPushRegossipNumValidators = 10 - defaultPushRegossipNumPeers = 0 - defaultPushGossipFrequency = 100 * time.Millisecond - defaultPullGossipFrequency = 1 * time.Second - defaultTxRegossipFrequency = 30 * time.Second - defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use - defaultLogLevel = "info" - defaultLogJSONFormat = false - defaultMaxOutboundActiveRequests = 16 - defaultPopulateMissingTriesParallelism = 1024 - defaultStateSyncServerTrieCache = 64 // MB - defaultAcceptedCacheSize = 32 // blocks - - // defaultStateSyncMinBlocks is the minimum number of blocks the blockchain - // should be ahead of local last accepted to perform state sync. - // This constant is chosen so normal bootstrapping is preferred when it would - // be faster than state sync. - // time assumptions: - // - normal bootstrap processing time: ~14 blocks / second - // - state sync time: ~6 hrs. - defaultStateSyncMinBlocks = 300_000 - defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request + "github.com/ava-labs/coreth/plugin/evm/config" ) -var ( - defaultEnabledAPIs = []string{ - "eth", - "eth-filter", - "net", - "web3", - "internal-eth", - "internal-blockchain", - "internal-transaction", - } - defaultAllowUnprotectedTxHashes = []common.Hash{ - common.HexToHash("0xfefb2da535e927b85fe68eb81cb2e4a5827c905f78381a01ef2322aa9b0aee8e"), // EIP-1820: https://eips.ethereum.org/EIPS/eip-1820 - } -) - -type Duration struct { - time.Duration -} - -// Config ... -type Config struct { - // Coreth APIs - SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - AdminAPIEnabled bool `json:"admin-api-enabled"` - AdminAPIDir string `json:"admin-api-dir"` - CorethAdminAPIEnabled bool `json:"coreth-admin-api-enabled"` // Deprecated: use AdminAPIEnabled instead - CorethAdminAPIDir string `json:"coreth-admin-api-dir"` // Deprecated: use AdminAPIDir instead - WarpAPIEnabled bool `json:"warp-api-enabled"` - - // EnabledEthAPIs is a list of Ethereum services that should be enabled - // If none is specified, then we use the default list [defaultEnabledAPIs] - EnabledEthAPIs []string `json:"eth-apis"` - - // Continuous Profiler - ContinuousProfilerDir string `json:"continuous-profiler-dir"` // If set to non-empty string creates a continuous profiler - ContinuousProfilerFrequency Duration `json:"continuous-profiler-frequency"` // Frequency to run continuous profiler if enabled - ContinuousProfilerMaxFiles int `json:"continuous-profiler-max-files"` // Maximum number of files to maintain - - // API Gas/Price Caps - RPCGasCap uint64 `json:"rpc-gas-cap"` - RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` - - // Cache settings - TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) - TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) - TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) - TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once - SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) - - // Eth Settings - Preimages bool `json:"preimages-enabled"` - SnapshotWait bool `json:"snapshot-wait"` - SnapshotVerify bool `json:"snapshot-verification-enabled"` - - // Pruning Settings - Pruning bool `json:"pruning-enabled"` // If enabled, trie roots are only persisted every 4096 blocks - AcceptorQueueLimit int `json:"accepted-queue-limit"` // Maximum blocks to queue before blocking during acceptance - CommitInterval uint64 `json:"commit-interval"` // Specifies the commit interval at which to persist EVM and atomic tries. - AllowMissingTries bool `json:"allow-missing-tries"` // If enabled, warnings preventing an incomplete trie index are suppressed - PopulateMissingTries *uint64 `json:"populate-missing-tries,omitempty"` // Sets the starting point for re-populating missing tries. Disables re-generation if nil. - PopulateMissingTriesParallelism int `json:"populate-missing-tries-parallelism"` // Number of concurrent readers to use when re-populating missing tries on startup. - PruneWarpDB bool `json:"prune-warp-db-enabled"` // Determines if the warpDB should be cleared on startup - - // Metric Settings - MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance - - // API Settings - LocalTxsEnabled bool `json:"local-txs-enabled"` - - TxPoolPriceLimit uint64 `json:"tx-pool-price-limit"` - TxPoolPriceBump uint64 `json:"tx-pool-price-bump"` - TxPoolAccountSlots uint64 `json:"tx-pool-account-slots"` - TxPoolGlobalSlots uint64 `json:"tx-pool-global-slots"` - TxPoolAccountQueue uint64 `json:"tx-pool-account-queue"` - TxPoolGlobalQueue uint64 `json:"tx-pool-global-queue"` - TxPoolLifetime Duration `json:"tx-pool-lifetime"` - - APIMaxDuration Duration `json:"api-max-duration"` - WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` - WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` - MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` - AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` - AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` - AllowUnprotectedTxHashes []common.Hash `json:"allow-unprotected-tx-hashes"` - - // Keystore Settings - KeystoreDirectory string `json:"keystore-directory"` // both absolute and relative supported - KeystoreExternalSigner string `json:"keystore-external-signer"` - KeystoreInsecureUnlockAllowed bool `json:"keystore-insecure-unlock-allowed"` - - // Gossip Settings - PushGossipPercentStake float64 `json:"push-gossip-percent-stake"` - PushGossipNumValidators int `json:"push-gossip-num-validators"` - PushGossipNumPeers int `json:"push-gossip-num-peers"` - PushRegossipNumValidators int `json:"push-regossip-num-validators"` - PushRegossipNumPeers int `json:"push-regossip-num-peers"` - PushGossipFrequency Duration `json:"push-gossip-frequency"` - PullGossipFrequency Duration `json:"pull-gossip-frequency"` - RegossipFrequency Duration `json:"regossip-frequency"` - TxRegossipFrequency Duration `json:"tx-regossip-frequency"` // Deprecated: use RegossipFrequency instead - - // Log - LogLevel string `json:"log-level"` - LogJSONFormat bool `json:"log-json-format"` - - // Offline Pruning Settings - OfflinePruning bool `json:"offline-pruning-enabled"` - OfflinePruningBloomFilterSize uint64 `json:"offline-pruning-bloom-filter-size"` - OfflinePruningDataDirectory string `json:"offline-pruning-data-directory"` - - // VM2VM network - MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` - - // Sync settings - StateSyncEnabled *bool `json:"state-sync-enabled"` // Pointer distinguishes false (no state sync) and not set (state sync only at genesis). - StateSyncSkipResume bool `json:"state-sync-skip-resume"` // Forces state sync to use the highest available summary block - StateSyncServerTrieCache int `json:"state-sync-server-trie-cache"` - StateSyncIDs string `json:"state-sync-ids"` - StateSyncCommitInterval uint64 `json:"state-sync-commit-interval"` - StateSyncMinBlocks uint64 `json:"state-sync-min-blocks"` - StateSyncRequestSize uint16 `json:"state-sync-request-size"` - - // Database Settings - InspectDatabase bool `json:"inspect-database"` // Inspects the database on startup if enabled. - - // SkipUpgradeCheck disables checking that upgrades must take place before the last - // accepted block. Skipping this check is useful when a node operator does not update - // their node before the network upgrade and their node accepts blocks that have - // identical state with the pre-upgrade ruleset. - SkipUpgradeCheck bool `json:"skip-upgrade-check"` - - // AcceptedCacheSize is the depth to keep in the accepted headers cache and the - // accepted logs cache at the accepted tip. - // - // This is particularly useful for improving the performance of eth_getLogs - // on RPC nodes. - AcceptedCacheSize int `json:"accepted-cache-size"` - - // TransactionHistory is the maximum number of blocks from head whose tx indices - // are reserved: - // * 0: means no limit - // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes - TransactionHistory uint64 `json:"transaction-history"` - // Deprecated, use 'TransactionHistory' instead. - TxLookupLimit uint64 `json:"tx-lookup-limit"` - - // SkipTxIndexing skips indexing transactions. - // This is useful for validators that don't need to index transactions. - // TxLookupLimit can be still used to control unindexing old transactions. - SkipTxIndexing bool `json:"skip-tx-indexing"` - - // WarpOffChainMessages encodes off-chain messages (unrelated to any on-chain event ie. block or AddressedCall) - // that the node should be willing to sign. - // Note: only supports AddressedCall payloads as defined here: - // https://github.com/ava-labs/avalanchego/tree/7623ffd4be915a5185c9ed5e11fa9be15a6e1f00/vms/platformvm/warp/payload#addressedcall - WarpOffChainMessages []hexutil.Bytes `json:"warp-off-chain-messages"` - - // RPC settings - HttpBodyLimit uint64 `json:"http-body-limit"` -} - -// EthAPIs returns an array of strings representing the Eth APIs that should be enabled -func (c Config) EthAPIs() []string { - return c.EnabledEthAPIs -} - -func (c Config) EthBackendSettings() eth.Settings { - return eth.Settings{MaxBlocksPerRequest: c.MaxBlocksPerRequest} -} - -func (c *Config) SetDefaults() { - c.EnabledEthAPIs = defaultEnabledAPIs - c.RPCGasCap = defaultRpcGasCap - c.RPCTxFeeCap = defaultRpcTxFeeCap - c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled - - c.TxPoolPriceLimit = legacypool.DefaultConfig.PriceLimit - c.TxPoolPriceBump = legacypool.DefaultConfig.PriceBump - c.TxPoolAccountSlots = legacypool.DefaultConfig.AccountSlots - c.TxPoolGlobalSlots = legacypool.DefaultConfig.GlobalSlots - c.TxPoolAccountQueue = legacypool.DefaultConfig.AccountQueue - c.TxPoolGlobalQueue = legacypool.DefaultConfig.GlobalQueue - c.TxPoolLifetime.Duration = legacypool.DefaultConfig.Lifetime - - c.APIMaxDuration.Duration = defaultApiMaxDuration - c.WSCPURefillRate.Duration = defaultWsCpuRefillRate - c.WSCPUMaxStored.Duration = defaultWsCpuMaxStored - c.MaxBlocksPerRequest = defaultMaxBlocksPerRequest - c.ContinuousProfilerFrequency.Duration = defaultContinuousProfilerFrequency - c.ContinuousProfilerMaxFiles = defaultContinuousProfilerMaxFiles - c.Pruning = defaultPruningEnabled - c.TrieCleanCache = defaultTrieCleanCache - c.TrieDirtyCache = defaultTrieDirtyCache - c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget - c.TriePrefetcherParallelism = defaultTriePrefetcherParallelism - c.SnapshotCache = defaultSnapshotCache - c.AcceptorQueueLimit = defaultAcceptorQueueLimit - c.CommitInterval = defaultCommitInterval - c.SnapshotWait = defaultSnapshotWait - c.PushGossipPercentStake = defaultPushGossipPercentStake - c.PushGossipNumValidators = defaultPushGossipNumValidators - c.PushGossipNumPeers = defaultPushGossipNumPeers - c.PushRegossipNumValidators = defaultPushRegossipNumValidators - c.PushRegossipNumPeers = defaultPushRegossipNumPeers - c.PushGossipFrequency.Duration = defaultPushGossipFrequency - c.PullGossipFrequency.Duration = defaultPullGossipFrequency - c.RegossipFrequency.Duration = defaultTxRegossipFrequency - c.OfflinePruningBloomFilterSize = defaultOfflinePruningBloomFilterSize - c.LogLevel = defaultLogLevel - c.LogJSONFormat = defaultLogJSONFormat - c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests - c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism - c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache - c.StateSyncCommitInterval = defaultSyncableCommitInterval - c.StateSyncMinBlocks = defaultStateSyncMinBlocks - c.StateSyncRequestSize = defaultStateSyncRequestSize - c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes - c.AcceptedCacheSize = defaultAcceptedCacheSize -} - -func (d *Duration) UnmarshalJSON(data []byte) (err error) { - var v interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - d.Duration, err = cast.ToDurationE(v) - return err -} - -// String implements the stringer interface. -func (d Duration) String() string { - return d.Duration.String() -} - -// String implements the stringer interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Duration.String()) -} - -// Validate returns an error if this is an invalid config. -func (c *Config) Validate() error { - if c.PopulateMissingTries != nil && (c.OfflinePruning || c.Pruning) { - return fmt.Errorf("cannot enable populate missing tries while offline pruning (enabled: %t)/pruning (enabled: %t) are enabled", c.OfflinePruning, c.Pruning) - } - if c.PopulateMissingTries != nil && c.PopulateMissingTriesParallelism < 1 { - return fmt.Errorf("cannot enable populate missing tries without at least one reader (parallelism: %d)", c.PopulateMissingTriesParallelism) - } - - if !c.Pruning && c.OfflinePruning { - return fmt.Errorf("cannot run offline pruning while pruning is disabled") - } - // If pruning is enabled, the commit interval must be non-zero so the node commits state tries every CommitInterval blocks. - if c.Pruning && c.CommitInterval == 0 { - return fmt.Errorf("cannot use commit interval of 0 with pruning enabled") - } - - if c.PushGossipPercentStake < 0 || c.PushGossipPercentStake > 1 { - return fmt.Errorf("push-gossip-percent-stake is %f but must be in the range [0, 1]", c.PushGossipPercentStake) - } - return nil -} - -func (c *Config) Deprecate() string { - msg := "" - // Deprecate the old config options and set the new ones. - if c.CorethAdminAPIEnabled { - msg += "coreth-admin-api-enabled is deprecated, use admin-api-enabled instead. " - c.AdminAPIEnabled = c.CorethAdminAPIEnabled - } - if c.CorethAdminAPIDir != "" { - msg += "coreth-admin-api-dir is deprecated, use admin-api-dir instead. " - c.AdminAPIDir = c.CorethAdminAPIDir - } - if c.TxRegossipFrequency != (Duration{}) { - msg += "tx-regossip-frequency is deprecated, use regossip-frequency instead. " - c.RegossipFrequency = c.TxRegossipFrequency - } - if c.TxLookupLimit != 0 { - msg += "tx-lookup-limit is deprecated, use transaction-history instead. " - c.TransactionHistory = c.TxLookupLimit - } - - return msg +// defaultTxPoolConfig uses [legacypool.DefaultConfig] to make a [config.TxPoolConfig] +// that can be passed to [config.Config.SetDefaults]. +var defaultTxPoolConfig = config.TxPoolConfig{ + PriceLimit: legacypool.DefaultConfig.PriceLimit, + PriceBump: legacypool.DefaultConfig.PriceBump, + AccountSlots: legacypool.DefaultConfig.AccountSlots, + GlobalSlots: legacypool.DefaultConfig.GlobalSlots, + AccountQueue: legacypool.DefaultConfig.AccountQueue, + GlobalQueue: legacypool.DefaultConfig.GlobalQueue, + Lifetime: legacypool.DefaultConfig.Lifetime, } diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go new file mode 100644 index 0000000000..a3c28f083d --- /dev/null +++ b/plugin/evm/config/config.go @@ -0,0 +1,369 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package config + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/common/hexutil" + "github.com/spf13/cast" +) + +const ( + defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay + defaultPruningEnabled = true + defaultCommitInterval = 4096 + defaultTrieCleanCache = 512 + defaultTrieDirtyCache = 512 + defaultTrieDirtyCommitTarget = 20 + defaultTriePrefetcherParallelism = 16 + defaultSnapshotCache = 256 + defaultSyncableCommitInterval = defaultCommitInterval + defaultSnapshotWait = false + defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit + defaultRpcTxFeeCap = 100 // 100 AVAX + defaultMetricsExpensiveEnabled = true + defaultApiMaxDuration = 0 // Default to no maximum API call duration + defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage + defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage + defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request + defaultContinuousProfilerFrequency = 15 * time.Minute + defaultContinuousProfilerMaxFiles = 5 + defaultPushGossipPercentStake = .9 + defaultPushGossipNumValidators = 100 + defaultPushGossipNumPeers = 0 + defaultPushRegossipNumValidators = 10 + defaultPushRegossipNumPeers = 0 + defaultPushGossipFrequency = 100 * time.Millisecond + defaultPullGossipFrequency = 1 * time.Second + defaultTxRegossipFrequency = 30 * time.Second + defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use + defaultLogLevel = "info" + defaultLogJSONFormat = false + defaultMaxOutboundActiveRequests = 16 + defaultPopulateMissingTriesParallelism = 1024 + defaultStateSyncServerTrieCache = 64 // MB + defaultAcceptedCacheSize = 32 // blocks + + // defaultStateSyncMinBlocks is the minimum number of blocks the blockchain + // should be ahead of local last accepted to perform state sync. + // This constant is chosen so normal bootstrapping is preferred when it would + // be faster than state sync. + // time assumptions: + // - normal bootstrap processing time: ~14 blocks / second + // - state sync time: ~6 hrs. + defaultStateSyncMinBlocks = 300_000 + DefaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request +) + +var ( + defaultEnabledAPIs = []string{ + "eth", + "eth-filter", + "net", + "web3", + "internal-eth", + "internal-blockchain", + "internal-transaction", + } + defaultAllowUnprotectedTxHashes = []common.Hash{ + common.HexToHash("0xfefb2da535e927b85fe68eb81cb2e4a5827c905f78381a01ef2322aa9b0aee8e"), // EIP-1820: https://eips.ethereum.org/EIPS/eip-1820 + } +) + +type Duration struct { + time.Duration +} + +// Config ... +type Config struct { + // Coreth APIs + SnowmanAPIEnabled bool `json:"snowman-api-enabled"` + AdminAPIEnabled bool `json:"admin-api-enabled"` + AdminAPIDir string `json:"admin-api-dir"` + CorethAdminAPIEnabled bool `json:"coreth-admin-api-enabled"` // Deprecated: use AdminAPIEnabled instead + CorethAdminAPIDir string `json:"coreth-admin-api-dir"` // Deprecated: use AdminAPIDir instead + WarpAPIEnabled bool `json:"warp-api-enabled"` + + // EnabledEthAPIs is a list of Ethereum services that should be enabled + // If none is specified, then we use the default list [defaultEnabledAPIs] + EnabledEthAPIs []string `json:"eth-apis"` + + // Continuous Profiler + ContinuousProfilerDir string `json:"continuous-profiler-dir"` // If set to non-empty string creates a continuous profiler + ContinuousProfilerFrequency Duration `json:"continuous-profiler-frequency"` // Frequency to run continuous profiler if enabled + ContinuousProfilerMaxFiles int `json:"continuous-profiler-max-files"` // Maximum number of files to maintain + + // API Gas/Price Caps + RPCGasCap uint64 `json:"rpc-gas-cap"` + RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` + + // Cache settings + TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) + TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) + TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) + TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once + SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) + + // Eth Settings + Preimages bool `json:"preimages-enabled"` + SnapshotWait bool `json:"snapshot-wait"` + SnapshotVerify bool `json:"snapshot-verification-enabled"` + + // Pruning Settings + Pruning bool `json:"pruning-enabled"` // If enabled, trie roots are only persisted every 4096 blocks + AcceptorQueueLimit int `json:"accepted-queue-limit"` // Maximum blocks to queue before blocking during acceptance + CommitInterval uint64 `json:"commit-interval"` // Specifies the commit interval at which to persist EVM and atomic tries. + AllowMissingTries bool `json:"allow-missing-tries"` // If enabled, warnings preventing an incomplete trie index are suppressed + PopulateMissingTries *uint64 `json:"populate-missing-tries,omitempty"` // Sets the starting point for re-populating missing tries. Disables re-generation if nil. + PopulateMissingTriesParallelism int `json:"populate-missing-tries-parallelism"` // Number of concurrent readers to use when re-populating missing tries on startup. + PruneWarpDB bool `json:"prune-warp-db-enabled"` // Determines if the warpDB should be cleared on startup + + // Metric Settings + MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance + + // API Settings + LocalTxsEnabled bool `json:"local-txs-enabled"` + + TxPoolPriceLimit uint64 `json:"tx-pool-price-limit"` + TxPoolPriceBump uint64 `json:"tx-pool-price-bump"` + TxPoolAccountSlots uint64 `json:"tx-pool-account-slots"` + TxPoolGlobalSlots uint64 `json:"tx-pool-global-slots"` + TxPoolAccountQueue uint64 `json:"tx-pool-account-queue"` + TxPoolGlobalQueue uint64 `json:"tx-pool-global-queue"` + TxPoolLifetime Duration `json:"tx-pool-lifetime"` + + APIMaxDuration Duration `json:"api-max-duration"` + WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` + WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` + MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` + AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` + AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` + AllowUnprotectedTxHashes []common.Hash `json:"allow-unprotected-tx-hashes"` + + // Keystore Settings + KeystoreDirectory string `json:"keystore-directory"` // both absolute and relative supported + KeystoreExternalSigner string `json:"keystore-external-signer"` + KeystoreInsecureUnlockAllowed bool `json:"keystore-insecure-unlock-allowed"` + + // Gossip Settings + PushGossipPercentStake float64 `json:"push-gossip-percent-stake"` + PushGossipNumValidators int `json:"push-gossip-num-validators"` + PushGossipNumPeers int `json:"push-gossip-num-peers"` + PushRegossipNumValidators int `json:"push-regossip-num-validators"` + PushRegossipNumPeers int `json:"push-regossip-num-peers"` + PushGossipFrequency Duration `json:"push-gossip-frequency"` + PullGossipFrequency Duration `json:"pull-gossip-frequency"` + RegossipFrequency Duration `json:"regossip-frequency"` + TxRegossipFrequency Duration `json:"tx-regossip-frequency"` // Deprecated: use RegossipFrequency instead + + // Log + LogLevel string `json:"log-level"` + LogJSONFormat bool `json:"log-json-format"` + + // Offline Pruning Settings + OfflinePruning bool `json:"offline-pruning-enabled"` + OfflinePruningBloomFilterSize uint64 `json:"offline-pruning-bloom-filter-size"` + OfflinePruningDataDirectory string `json:"offline-pruning-data-directory"` + + // VM2VM network + MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` + + // Sync settings + StateSyncEnabled *bool `json:"state-sync-enabled"` // Pointer distinguishes false (no state sync) and not set (state sync only at genesis). + StateSyncSkipResume bool `json:"state-sync-skip-resume"` // Forces state sync to use the highest available summary block + StateSyncServerTrieCache int `json:"state-sync-server-trie-cache"` + StateSyncIDs string `json:"state-sync-ids"` + StateSyncCommitInterval uint64 `json:"state-sync-commit-interval"` + StateSyncMinBlocks uint64 `json:"state-sync-min-blocks"` + StateSyncRequestSize uint16 `json:"state-sync-request-size"` + + // Database Settings + InspectDatabase bool `json:"inspect-database"` // Inspects the database on startup if enabled. + + // SkipUpgradeCheck disables checking that upgrades must take place before the last + // accepted block. Skipping this check is useful when a node operator does not update + // their node before the network upgrade and their node accepts blocks that have + // identical state with the pre-upgrade ruleset. + SkipUpgradeCheck bool `json:"skip-upgrade-check"` + + // AcceptedCacheSize is the depth to keep in the accepted headers cache and the + // accepted logs cache at the accepted tip. + // + // This is particularly useful for improving the performance of eth_getLogs + // on RPC nodes. + AcceptedCacheSize int `json:"accepted-cache-size"` + + // TransactionHistory is the maximum number of blocks from head whose tx indices + // are reserved: + // * 0: means no limit + // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes + TransactionHistory uint64 `json:"transaction-history"` + // Deprecated, use 'TransactionHistory' instead. + TxLookupLimit uint64 `json:"tx-lookup-limit"` + + // SkipTxIndexing skips indexing transactions. + // This is useful for validators that don't need to index transactions. + // TxLookupLimit can be still used to control unindexing old transactions. + SkipTxIndexing bool `json:"skip-tx-indexing"` + + // WarpOffChainMessages encodes off-chain messages (unrelated to any on-chain event ie. block or AddressedCall) + // that the node should be willing to sign. + // Note: only supports AddressedCall payloads as defined here: + // https://github.com/ava-labs/avalanchego/tree/7623ffd4be915a5185c9ed5e11fa9be15a6e1f00/vms/platformvm/warp/payload#addressedcall + WarpOffChainMessages []hexutil.Bytes `json:"warp-off-chain-messages"` + + // RPC settings + HttpBodyLimit uint64 `json:"http-body-limit"` + + // Experimental + StateSyncUseUpstream bool `json:"state-sync-use-upstream"` +} + +// TxPoolConfig contains the transaction pool config to be passed +// to [Config.SetDefaults]. +type TxPoolConfig struct { + PriceLimit uint64 + PriceBump uint64 + AccountSlots uint64 + GlobalSlots uint64 + AccountQueue uint64 + GlobalQueue uint64 + Lifetime time.Duration +} + +// EthAPIs returns an array of strings representing the Eth APIs that should be enabled +func (c Config) EthAPIs() []string { + return c.EnabledEthAPIs +} + +func (c *Config) SetDefaults(txPoolConfig TxPoolConfig) { + c.EnabledEthAPIs = defaultEnabledAPIs + c.RPCGasCap = defaultRpcGasCap + c.RPCTxFeeCap = defaultRpcTxFeeCap + c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled + + // TxPool settings + c.TxPoolPriceLimit = txPoolConfig.PriceLimit + c.TxPoolPriceBump = txPoolConfig.PriceBump + c.TxPoolAccountSlots = txPoolConfig.AccountSlots + c.TxPoolGlobalSlots = txPoolConfig.GlobalSlots + c.TxPoolAccountQueue = txPoolConfig.AccountQueue + c.TxPoolGlobalQueue = txPoolConfig.GlobalQueue + c.TxPoolLifetime.Duration = txPoolConfig.Lifetime + + c.APIMaxDuration.Duration = defaultApiMaxDuration + c.WSCPURefillRate.Duration = defaultWsCpuRefillRate + c.WSCPUMaxStored.Duration = defaultWsCpuMaxStored + c.MaxBlocksPerRequest = defaultMaxBlocksPerRequest + c.ContinuousProfilerFrequency.Duration = defaultContinuousProfilerFrequency + c.ContinuousProfilerMaxFiles = defaultContinuousProfilerMaxFiles + c.Pruning = defaultPruningEnabled + c.TrieCleanCache = defaultTrieCleanCache + c.TrieDirtyCache = defaultTrieDirtyCache + c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget + c.TriePrefetcherParallelism = defaultTriePrefetcherParallelism + c.SnapshotCache = defaultSnapshotCache + c.AcceptorQueueLimit = defaultAcceptorQueueLimit + c.CommitInterval = defaultCommitInterval + c.SnapshotWait = defaultSnapshotWait + c.PushGossipPercentStake = defaultPushGossipPercentStake + c.PushGossipNumValidators = defaultPushGossipNumValidators + c.PushGossipNumPeers = defaultPushGossipNumPeers + c.PushRegossipNumValidators = defaultPushRegossipNumValidators + c.PushRegossipNumPeers = defaultPushRegossipNumPeers + c.PushGossipFrequency.Duration = defaultPushGossipFrequency + c.PullGossipFrequency.Duration = defaultPullGossipFrequency + c.RegossipFrequency.Duration = defaultTxRegossipFrequency + c.OfflinePruningBloomFilterSize = defaultOfflinePruningBloomFilterSize + c.LogLevel = defaultLogLevel + c.LogJSONFormat = defaultLogJSONFormat + c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests + c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism + c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache + c.StateSyncCommitInterval = defaultSyncableCommitInterval + c.StateSyncMinBlocks = defaultStateSyncMinBlocks + c.StateSyncRequestSize = DefaultStateSyncRequestSize + c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes + c.AcceptedCacheSize = defaultAcceptedCacheSize +} + +func (d *Duration) UnmarshalJSON(data []byte) (err error) { + var v interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + d.Duration, err = cast.ToDurationE(v) + return err +} + +// String implements the stringer interface. +func (d Duration) String() string { + return d.Duration.String() +} + +// String implements the stringer interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} + +// Validate returns an error if this is an invalid config. +func (c *Config) Validate(networkID uint32) error { + // Ensure that non-standard commit interval is not allowed for production networks + if constants.ProductionNetworkIDs.Contains(networkID) { + if c.CommitInterval != defaultCommitInterval { + return fmt.Errorf("cannot start non-local network with commit interval %d different than %d", c.CommitInterval, defaultCommitInterval) + } + if c.StateSyncCommitInterval != defaultSyncableCommitInterval { + return fmt.Errorf("cannot start non-local network with syncable interval %d different than %d", c.StateSyncCommitInterval, defaultSyncableCommitInterval) + } + } + + if c.PopulateMissingTries != nil && (c.OfflinePruning || c.Pruning) { + return fmt.Errorf("cannot enable populate missing tries while offline pruning (enabled: %t)/pruning (enabled: %t) are enabled", c.OfflinePruning, c.Pruning) + } + if c.PopulateMissingTries != nil && c.PopulateMissingTriesParallelism < 1 { + return fmt.Errorf("cannot enable populate missing tries without at least one reader (parallelism: %d)", c.PopulateMissingTriesParallelism) + } + + if !c.Pruning && c.OfflinePruning { + return fmt.Errorf("cannot run offline pruning while pruning is disabled") + } + // If pruning is enabled, the commit interval must be non-zero so the node commits state tries every CommitInterval blocks. + if c.Pruning && c.CommitInterval == 0 { + return fmt.Errorf("cannot use commit interval of 0 with pruning enabled") + } + + if c.PushGossipPercentStake < 0 || c.PushGossipPercentStake > 1 { + return fmt.Errorf("push-gossip-percent-stake is %f but must be in the range [0, 1]", c.PushGossipPercentStake) + } + return nil +} + +func (c *Config) Deprecate() string { + msg := "" + // Deprecate the old config options and set the new ones. + if c.CorethAdminAPIEnabled { + msg += "coreth-admin-api-enabled is deprecated, use admin-api-enabled instead. " + c.AdminAPIEnabled = c.CorethAdminAPIEnabled + } + if c.CorethAdminAPIDir != "" { + msg += "coreth-admin-api-dir is deprecated, use admin-api-dir instead. " + c.AdminAPIDir = c.CorethAdminAPIDir + } + if c.TxRegossipFrequency != (Duration{}) { + msg += "tx-regossip-frequency is deprecated, use regossip-frequency instead. " + c.RegossipFrequency = c.TxRegossipFrequency + } + if c.TxLookupLimit != 0 { + msg += "tx-lookup-limit is deprecated, use transaction-history instead. " + c.TransactionHistory = c.TxLookupLimit + } + + return msg +} diff --git a/plugin/evm/config_test.go b/plugin/evm/config/config_test.go similarity index 99% rename from plugin/evm/config_test.go rename to plugin/evm/config/config_test.go index 6e01ed1c84..7b96a11742 100644 --- a/plugin/evm/config_test.go +++ b/plugin/evm/config/config_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package config import ( "encoding/json" diff --git a/plugin/evm/config/constants.go b/plugin/evm/config/constants.go new file mode 100644 index 0000000000..2e47489f1c --- /dev/null +++ b/plugin/evm/config/constants.go @@ -0,0 +1,8 @@ +package config + +const ( + TxGossipBloomMinTargetElements = 8 * 1024 + TxGossipBloomTargetFalsePositiveRate = 0.01 + TxGossipBloomResetFalsePositiveRate = 0.05 + TxGossipBloomChurnMultiplier = 3 +) diff --git a/plugin/evm/database.go b/plugin/evm/database/wrapped_database.go similarity index 55% rename from plugin/evm/database.go rename to plugin/evm/database/wrapped_database.go index b593b47533..86e3bbd239 100644 --- a/plugin/evm/database.go +++ b/plugin/evm/database/wrapped_database.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package database import ( "errors" @@ -11,25 +11,29 @@ import ( ) var ( - _ ethdb.KeyValueStore = &Database{} + _ ethdb.KeyValueStore = ðDbWrapper{} ErrSnapshotNotSupported = errors.New("snapshot is not supported") ) -// Database implements ethdb.Database -type Database struct{ database.Database } +// ethDbWrapper implements ethdb.Database +type ethDbWrapper struct{ database.Database } + +func WrapDatabase(db database.Database) ethdb.KeyValueStore { return ethDbWrapper{db} } // Stat implements ethdb.Database -func (db Database) Stat(string) (string, error) { return "", database.ErrNotFound } +func (db ethDbWrapper) Stat(string) (string, error) { return "", database.ErrNotFound } // NewBatch implements ethdb.Database -func (db Database) NewBatch() ethdb.Batch { return Batch{db.Database.NewBatch()} } +func (db ethDbWrapper) NewBatch() ethdb.Batch { return wrappedBatch{db.Database.NewBatch()} } // NewBatchWithSize implements ethdb.Database // TODO: propagate size through avalanchego Database interface -func (db Database) NewBatchWithSize(size int) ethdb.Batch { return Batch{db.Database.NewBatch()} } +func (db ethDbWrapper) NewBatchWithSize(size int) ethdb.Batch { + return wrappedBatch{db.Database.NewBatch()} +} -func (db Database) NewSnapshot() (ethdb.Snapshot, error) { +func (db ethDbWrapper) NewSnapshot() (ethdb.Snapshot, error) { return nil, ErrSnapshotNotSupported } @@ -37,7 +41,7 @@ func (db Database) NewSnapshot() (ethdb.Snapshot, error) { // // Note: This method assumes that the prefix is NOT part of the start, so there's // no need for the caller to prepend the prefix to the start. -func (db Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { +func (db ethDbWrapper) NewIterator(prefix []byte, start []byte) ethdb.Iterator { // avalanchego's database implementation assumes that the prefix is part of the // start, so it is added here (if it is provided). if len(prefix) > 0 { @@ -50,15 +54,15 @@ func (db Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { } // NewIteratorWithStart implements ethdb.Database -func (db Database) NewIteratorWithStart(start []byte) ethdb.Iterator { +func (db ethDbWrapper) NewIteratorWithStart(start []byte) ethdb.Iterator { return db.Database.NewIteratorWithStart(start) } -// Batch implements ethdb.Batch -type Batch struct{ database.Batch } +// wrappedBatch implements ethdb.wrappedBatch +type wrappedBatch struct{ database.Batch } // ValueSize implements ethdb.Batch -func (batch Batch) ValueSize() int { return batch.Batch.Size() } +func (batch wrappedBatch) ValueSize() int { return batch.Batch.Size() } // Replay implements ethdb.Batch -func (batch Batch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } +func (batch wrappedBatch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 84f1d448bb..df6103555a 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -9,7 +9,7 @@ import ( "math/big" "testing" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" engCommon "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" @@ -19,13 +19,14 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/params/extras" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" "github.com/holiman/uint256" ) // createExportTxOptions adds funds to shared memory, imports them, and returns a list of export transactions // that attempt to send the funds to each of the test keys (list of length 3). -func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *atomic.Memory) []*Tx { +func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { // Add a UTXO to shared memory utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, @@ -34,22 +35,22 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, Amt: uint64(50000000), OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -85,9 +86,13 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, } // Use the funds to create 3 conflicting export transactions sending the funds to each of the test addresses - exportTxs := make([]*Tx, 0, 3) + exportTxs := make([]*atomic.Tx, 0, 3) + state, err := vm.blockChain.State() + if err != nil { + t.Fatal(err) + } for _, addr := range testShortIDAddrs { - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -99,8 +104,8 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, func TestExportTxEVMStateTransfer(t *testing.T) { key := testKeys[0] - addr := key.PublicKey().Address() - ethAddr := GetEthAddress(key) + addr := key.Address() + ethAddr := key.EthAddress() avaxAmount := 50 * units.MilliAvax avaxUTXOID := avax.UTXOID{ @@ -129,7 +134,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { tests := []struct { name string - tx []EVMInput + tx []atomic.EVMInput avaxBalance *uint256.Int balances map[ids.ID]*big.Int expectedNonce uint64 @@ -138,7 +143,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { name: "no transfers", tx: nil, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(int64(customAmount)), }, @@ -147,7 +152,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend half AVAX", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxAmount / 2, @@ -155,7 +160,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount / 2 * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount / 2 * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(int64(customAmount)), }, @@ -164,7 +169,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend all AVAX", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxAmount, @@ -181,7 +186,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend too much AVAX", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxAmount + 1, @@ -198,7 +203,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend half custom", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount / 2, @@ -206,7 +211,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(int64(customAmount / 2)), }, @@ -215,7 +220,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend all custom", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -223,7 +228,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(0), }, @@ -232,7 +237,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend too much custom", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount + 1, @@ -240,7 +245,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(0), }, @@ -249,7 +254,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend everything", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -272,7 +277,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend everything wrong nonce", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -295,7 +300,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend everything changing nonces", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -338,18 +343,18 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, } - avaxUTXOBytes, err := vm.codec.Marshal(codecVersion, avaxUTXO) + avaxUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, avaxUTXO) if err != nil { t.Fatal(err) } - customUTXOBytes, err := vm.codec.Marshal(codecVersion, customUTXO) + customUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, customUTXO) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{ { Key: avaxInputID[:], Value: avaxUTXOBytes, @@ -396,7 +401,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - newTx := UnsignedExportTx{ + newTx := atomic.UnsignedExportTx{ Ins: test.tx, } @@ -447,7 +452,7 @@ func TestExportTxSemanticVerify(t *testing.T) { parent := vm.LastAcceptedBlockInternal().(*Block) key := testKeys[0] - addr := key.PublicKey().Address() + addr := key.Address() ethAddr := testEthAddrs[0] var ( @@ -458,11 +463,11 @@ func TestExportTxSemanticVerify(t *testing.T) { custom1AssetID = ids.ID{1, 2, 3, 4, 5, 6} ) - validExportTx := &UnsignedExportTx{ + validExportTx := &atomic.UnsignedExportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, DestinationChain: vm.ctx.XChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxBalance, @@ -496,11 +501,11 @@ func TestExportTxSemanticVerify(t *testing.T) { }, } - validAVAXExportTx := &UnsignedExportTx{ + validAVAXExportTx := &atomic.UnsignedExportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, DestinationChain: vm.ctx.XChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxBalance, @@ -524,7 +529,7 @@ func TestExportTxSemanticVerify(t *testing.T) { tests := []struct { name string - tx *Tx + tx *atomic.Tx signers [][]*secp256k1.PrivateKey baseFee *big.Int rules extras.Rules @@ -532,7 +537,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }{ { name: "valid", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key}, {key}, @@ -544,10 +549,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain before AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validAVAXExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -558,10 +563,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validAVAXExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -572,10 +577,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "random chain after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validAVAXExportTx validExportTx.DestinationChain = ids.GenerateTestID() - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -586,10 +591,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain multi-coin before AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -602,10 +607,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain multi-coin after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -618,10 +623,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "random chain multi-coin after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.DestinationChain = ids.GenerateTestID() - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -634,10 +639,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "no outputs", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = nil - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -650,10 +655,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "wrong networkID", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.NetworkID++ - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -666,10 +671,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "wrong chainID", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.BlockchainID = ids.GenerateTestID() - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -682,11 +687,11 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "invalid input", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx - validExportTx.Ins = append([]EVMInput{}, validExportTx.Ins...) + validExportTx.Ins = append([]atomic.EVMInput{}, validExportTx.Ins...) validExportTx.Ins[2].Amount = 0 - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -699,7 +704,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "invalid output", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = []*avax.TransferableOutput{{ Asset: avax.Asset{ID: custom0AssetID}, @@ -711,7 +716,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, }, }} - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -724,7 +729,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "unsorted outputs", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx exportOutputs := []*avax.TransferableOutput{ { @@ -749,10 +754,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, } // Sort the outputs and then swap the ordering to ensure that they are ordered incorrectly - avax.SortTransferableOutputs(exportOutputs, Codec) + avax.SortTransferableOutputs(exportOutputs, atomic.Codec) exportOutputs[0], exportOutputs[1] = exportOutputs[1], exportOutputs[0] validExportTx.ExportedOutputs = exportOutputs - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -765,11 +770,11 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "not unique inputs", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx - validExportTx.Ins = append([]EVMInput{}, validExportTx.Ins...) + validExportTx.Ins = append([]atomic.EVMInput{}, validExportTx.Ins...) validExportTx.Ins[2] = validExportTx.Ins[1] - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -782,7 +787,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "custom asset insufficient funds", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -796,7 +801,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, }, } - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -809,7 +814,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "avax insufficient funds", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -823,7 +828,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, }, } - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -836,7 +841,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too many signatures", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key}, {key}, @@ -849,7 +854,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too few signatures", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key}, {key}, @@ -860,7 +865,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too many signatures on credential", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key, testKeys[1]}, {key}, @@ -872,7 +877,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too few signatures on credential", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {}, {key}, @@ -884,7 +889,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "wrong signature on credential", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {testKeys[1]}, {key}, @@ -896,7 +901,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "no signatures", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{}, baseFee: initialBaseFee, rules: apricotRulesPhase3, @@ -904,15 +909,24 @@ func TestExportTxSemanticVerify(t *testing.T) { }, } for _, test := range tests { - if err := test.tx.Sign(vm.codec, test.signers); err != nil { + if err := test.tx.Sign(atomic.Codec, test.signers); err != nil { t.Fatal(err) } + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: test.rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + t.Run(test.name, func(t *testing.T) { tx := test.tx exportTx := tx.UnsignedAtomicTx - err := exportTx.SemanticVerify(vm, tx, parent, test.baseFee, test.rules) + err := exportTx.SemanticVerify(backend, tx, parent, test.baseFee) if test.shouldErr && err == nil { t.Fatalf("should have errored but returned valid") } @@ -935,7 +949,7 @@ func TestExportTxAccept(t *testing.T) { }() key := testKeys[0] - addr := key.PublicKey().Address() + addr := key.Address() ethAddr := testEthAddrs[0] var ( @@ -944,11 +958,11 @@ func TestExportTxAccept(t *testing.T) { custom0AssetID = ids.ID{1, 2, 3, 4, 5} ) - exportTx := &UnsignedExportTx{ + exportTx := &atomic.UnsignedExportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, DestinationChain: vm.ctx.XChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxBalance, @@ -986,7 +1000,7 @@ func TestExportTxAccept(t *testing.T) { }, } - tx := &Tx{UnsignedAtomicTx: exportTx} + tx := &atomic.Tx{UnsignedAtomicTx: exportTx} signers := [][]*secp256k1.PrivateKey{ {key}, @@ -994,11 +1008,11 @@ func TestExportTxAccept(t *testing.T) { {key}, } - if err := tx.Sign(vm.codec, signers); err != nil { + if err := tx.Sign(atomic.Codec, signers); err != nil { t.Fatal(err) } - commitBatch, err := vm.db.CommitBatch() + commitBatch, err := vm.versiondb.CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -1007,7 +1021,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatalf("Failed to accept export transaction due to: %s", err) } - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { t.Fatal(err) } indexedValues, _, _, err := xChainSharedMemory.Indexed(vm.ctx.ChainID, [][]byte{addr.Bytes()}, nil, nil, 3) @@ -1046,7 +1060,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatalf("inconsistent values returned fetched %x indexed %x", fetchedValues[1], indexedValues[1]) } - customUTXOBytes, err := Codec.Marshal(codecVersion, &avax.UTXO{ + customUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, &avax.UTXO{ UTXOID: customUTXOID, Asset: avax.Asset{ID: custom0AssetID}, Out: exportTx.ExportedOutputs[1].Out, @@ -1055,7 +1069,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatal(err) } - avaxUTXOBytes, err := Codec.Marshal(codecVersion, &avax.UTXO{ + avaxUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, &avax.UTXO{ UTXOID: avaxUTXOID, Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, Out: exportTx.ExportedOutputs[0].Out, @@ -1074,11 +1088,11 @@ func TestExportTxAccept(t *testing.T) { func TestExportTxVerify(t *testing.T) { var exportAmount uint64 = 10000000 - exportTx := &UnsignedExportTx{ + exportTx := &atomic.UnsignedExportTx{ NetworkID: testNetworkID, BlockchainID: testCChainID, DestinationChain: testXChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1119,25 +1133,25 @@ func TestExportTxVerify(t *testing.T) { } // Sort the inputs and outputs to ensure the transaction is canonical - avax.SortTransferableOutputs(exportTx.ExportedOutputs, Codec) + avax.SortTransferableOutputs(exportTx.ExportedOutputs, atomic.Codec) // Pass in a list of signers here with the appropriate length // to avoid causing a nil-pointer error in the helper method emptySigners := make([][]*secp256k1.PrivateKey, 2) - SortEVMInputsAndSigners(exportTx.Ins, emptySigners) + atomic.SortEVMInputsAndSigners(exportTx.Ins, emptySigners) ctx := NewContext() tests := map[string]atomicTxVerifyTest{ "nil tx": { - generate: func(t *testing.T) UnsignedAtomicTx { - return (*UnsignedExportTx)(nil) + generate: func(t *testing.T) atomic.UnsignedAtomicTx { + return (*atomic.UnsignedExportTx)(nil) }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNilTx.Error(), + expectedErr: atomic.ErrNilTx.Error(), }, "valid export tx": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return exportTx }, ctx: ctx, @@ -1145,7 +1159,7 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "valid export tx banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return exportTx }, ctx: ctx, @@ -1153,47 +1167,47 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "incorrect networkID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.NetworkID++ return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongNetworkID.Error(), + expectedErr: atomic.ErrWrongNetworkID.Error(), }, "incorrect blockchainID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.BlockchainID = nonExistentID return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongBlockchainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "incorrect destination chain": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.DestinationChain = nonExistentID return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongChainID.Error(), // TODO make this error more specific to destination not just chainID + expectedErr: atomic.ErrWrongChainID.Error(), // TODO make this error more specific to destination not just chainID }, "no exported outputs": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = nil return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNoExportOutputs.Error(), + expectedErr: atomic.ErrNoExportOutputs.Error(), }, "unsorted outputs": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{ tx.ExportedOutputs[1], @@ -1203,10 +1217,10 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errOutputsNotSorted.Error(), + expectedErr: atomic.ErrOutputsNotSorted.Error(), }, "invalid exported output": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{tx.ExportedOutputs[0], nil} return &tx @@ -1216,9 +1230,9 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "nil transferable output is not valid", }, "unsorted EVM inputs before AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ tx.Ins[1], tx.Ins[0], } @@ -1229,9 +1243,9 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "unsorted EVM inputs after AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ tx.Ins[1], tx.Ins[0], } @@ -1239,12 +1253,12 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase1, - expectedErr: errInputsNotSortedUnique.Error(), + expectedErr: atomic.ErrInputsNotSortedUnique.Error(), }, "EVM input with amount 0": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: 0, @@ -1256,12 +1270,12 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNoValueInput.Error(), + expectedErr: atomic.ErrNoValueInput.Error(), }, "non-unique EVM input before AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{tx.Ins[0], tx.Ins[0]} + tx.Ins = []atomic.EVMInput{tx.Ins[0], tx.Ins[0]} return &tx }, ctx: ctx, @@ -1269,19 +1283,19 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "non-unique EVM input after AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{tx.Ins[0], tx.Ins[0]} + tx.Ins = []atomic.EVMInput{tx.Ins[0], tx.Ins[0]} return &tx }, ctx: ctx, rules: apricotRulesPhase1, - expectedErr: errInputsNotSortedUnique.Error(), + expectedErr: atomic.ErrInputsNotSortedUnique.Error(), }, "non-AVAX input Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: 1, @@ -1296,7 +1310,7 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX output Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -1318,9 +1332,9 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX input Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: 1, @@ -1332,10 +1346,10 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errExportNonAVAXInputBanff.Error(), + expectedErr: atomic.ErrExportNonAVAXInputBanff.Error(), }, "non-AVAX output Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -1354,7 +1368,7 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errExportNonAVAXOutputBanff.Error(), + expectedErr: atomic.ErrExportNonAVAXOutputBanff.Error(), }, } @@ -1375,7 +1389,7 @@ func TestExportTxGasCost(t *testing.T) { exportAmount := uint64(5000000) tests := map[string]struct { - UnsignedExportTx *UnsignedExportTx + UnsignedExportTx *atomic.UnsignedExportTx Keys [][]*secp256k1.PrivateKey BaseFee *big.Int @@ -1384,11 +1398,11 @@ func TestExportTxGasCost(t *testing.T) { FixedFee bool }{ "simple export 1wei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1416,11 +1430,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(1), }, "simple export 1wei BaseFee + fixed fee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1449,11 +1463,11 @@ func TestExportTxGasCost(t *testing.T) { FixedFee: true, }, "simple export 25Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1481,11 +1495,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "simple export 225Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1513,11 +1527,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(225 * params.GWei), }, "complex export 25Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1557,11 +1571,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "complex export 225Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1604,10 +1618,10 @@ func TestExportTxGasCost(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - tx := &Tx{UnsignedAtomicTx: test.UnsignedExportTx} + tx := &atomic.Tx{UnsignedAtomicTx: test.UnsignedExportTx} // Sign with the correct key - if err := tx.Sign(Codec, test.Keys); err != nil { + if err := tx.Sign(atomic.Codec, test.Keys); err != nil { t.Fatal(err) } @@ -1619,7 +1633,7 @@ func TestExportTxGasCost(t *testing.T) { t.Fatalf("Expected gasUsed to be %d, but found %d", test.ExpectedGasUsed, gasUsed) } - fee, err := CalculateDynamicFee(gasUsed, test.BaseFee) + fee, err := atomic.CalculateDynamicFee(gasUsed, test.BaseFee) if err != nil { t.Fatal(err) } @@ -1702,22 +1716,22 @@ func TestNewExportTx(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -1754,14 +1768,28 @@ func TestNewExportTx(t *testing.T) { parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - tx, err = vm.newExportTx(vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + state, err := vm.blockChain.State() + if err != nil { + t.Fatal(err) + } + + tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx - if err := exportTx.SemanticVerify(vm, tx, parent, parent.ethBlock.BaseFee(), test.rules); err != nil { + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: vm.currentRules(), + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + + if err := exportTx.SemanticVerify(backend, tx, parent, parent.ethBlock.BaseFee()); err != nil { t.Fatal("newExportTx created an invalid transaction", err) } @@ -1773,7 +1801,7 @@ func TestNewExportTx(t *testing.T) { t.Fatalf("burned wrong amount of AVAX - expected %d burned %d", test.expectedBurnedAVAX, burnedAVAX) } - commitBatch, err := vm.db.CommitBatch() + commitBatch, err := vm.versiondb.CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -1782,7 +1810,7 @@ func TestNewExportTx(t *testing.T) { t.Fatalf("Failed to accept export transaction due to: %s", err) } - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { t.Fatal(err) } @@ -1795,7 +1823,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - addr := GetEthAddress(testKeys[0]) + addr := testKeys[0].EthAddress() if sdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), sdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } @@ -1861,11 +1889,11 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } @@ -1882,30 +1910,30 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount2, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes2, err := vm.codec.Marshal(codecVersion, utxo2) + utxoBytes2, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo2) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID2 := utxo2.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{ { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }, { Key: inputID2[:], Value: utxoBytes2, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }, }}}); err != nil { @@ -1917,7 +1945,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - if err := vm.mempool.AddTx(tx); err != nil { + if err := vm.mempool.AddRemoteTx(tx); err != nil { t.Fatal(err) } @@ -1943,24 +1971,37 @@ func TestNewExportTxMulticoin(t *testing.T) { parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - testKeys0Addr := GetEthAddress(testKeys[0]) + testKeys0Addr := testKeys[0].EthAddress() exportId, err := ids.ToShortID(testKeys0Addr[:]) if err != nil { t.Fatal(err) } - tx, err = vm.newExportTx(tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + state, err := vm.blockChain.State() + if err != nil { + t.Fatal(err) + } + + tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: vm.currentRules(), + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } - if err := exportTx.SemanticVerify(vm, tx, parent, parent.ethBlock.BaseFee(), test.rules); err != nil { + if err := exportTx.SemanticVerify(backend, tx, parent, parent.ethBlock.BaseFee()); err != nil { t.Fatal("newExportTx created an invalid transaction", err) } - commitBatch, err := vm.db.CommitBatch() + commitBatch, err := vm.versiondb.CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -1969,7 +2010,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatalf("Failed to accept export transaction due to: %s", err) } - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { t.Fatal(err) } @@ -1982,7 +2023,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - addr := GetEthAddress(testKeys[0]) + addr := testKeys[0].EthAddress() if stdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), stdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } diff --git a/plugin/evm/formatting.go b/plugin/evm/formatting.go index 0b89dda135..feeab134b7 100644 --- a/plugin/evm/formatting.go +++ b/plugin/evm/formatting.go @@ -8,10 +8,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/crypto" ) // ParseServiceAddress get address ID from address string, being it either localized (using address manager, @@ -53,21 +50,3 @@ func (vm *VM) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { hrp := constants.GetHRP(vm.ctx.NetworkID) return address.Format(chainIDAlias, hrp, addr.Bytes()) } - -// ParseEthAddress parses [addrStr] and returns an Ethereum address -func ParseEthAddress(addrStr string) (common.Address, error) { - if !common.IsHexAddress(addrStr) { - return common.Address{}, errInvalidAddr - } - return common.HexToAddress(addrStr), nil -} - -// GetEthAddress returns the ethereum address derived from [privKey] -func GetEthAddress(privKey *secp256k1.PrivateKey) common.Address { - return PublicKeyToEthAddress(privKey.PublicKey()) -} - -// PublicKeyToEthAddress returns the ethereum address derived from [pubKey] -func PublicKeyToEthAddress(pubKey *secp256k1.PublicKey) common.Address { - return crypto.PubkeyToAddress(*(pubKey.ToECDSA())) -} diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index a6218ff33c..1197562865 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -1,6 +1,8 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +// TODO: move to network + package evm import ( @@ -24,6 +26,7 @@ import ( "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" + "github.com/ava-labs/coreth/plugin/evm/config" ) const pendingTxsBuffer = 10 @@ -31,11 +34,9 @@ const pendingTxsBuffer = 10 var ( _ p2p.Handler = (*txGossipHandler)(nil) - _ gossip.Gossipable = (*GossipEthTx)(nil) - _ gossip.Gossipable = (*GossipAtomicTx)(nil) - _ gossip.Marshaller[*GossipAtomicTx] = (*GossipAtomicTxMarshaller)(nil) - _ gossip.Marshaller[*GossipEthTx] = (*GossipEthTxMarshaller)(nil) - _ gossip.Set[*GossipEthTx] = (*GossipEthTxPool)(nil) + _ gossip.Gossipable = (*GossipEthTx)(nil) + _ gossip.Marshaller[*GossipEthTx] = (*GossipEthTxMarshaller)(nil) + _ gossip.Set[*GossipEthTx] = (*GossipEthTxPool)(nil) _ eth.PushGossiper = (*EthPushGossiper)(nil) ) @@ -90,29 +91,14 @@ func (t txGossipHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, dead return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) } -type GossipAtomicTxMarshaller struct{} - -func (g GossipAtomicTxMarshaller) MarshalGossip(tx *GossipAtomicTx) ([]byte, error) { - return tx.Tx.SignedBytes(), nil -} - -func (g GossipAtomicTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipAtomicTx, error) { - tx, err := ExtractAtomicTx(bytes, Codec) - return &GossipAtomicTx{ - Tx: tx, - }, err -} - -type GossipAtomicTx struct { - Tx *Tx -} - -func (tx *GossipAtomicTx) GossipID() ids.ID { - return tx.Tx.ID() -} - func NewGossipEthTxPool(mempool *txpool.TxPool, registerer prometheus.Registerer) (*GossipEthTxPool, error) { - bloom, err := gossip.NewBloomFilter(registerer, "eth_tx_bloom_filter", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + bloom, err := gossip.NewBloomFilter( + registerer, + "eth_tx_bloom_filter", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) if err != nil { return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) } @@ -160,7 +146,7 @@ func (g *GossipEthTxPool) Subscribe(ctx context.Context) { return case pendingTxs := <-g.pendingTxs: g.lock.Lock() - optimalElements := (g.mempool.PendingSize(txpool.PendingFilter{}) + len(pendingTxs.Txs)) * txGossipBloomChurnMultiplier + optimalElements := (g.mempool.PendingSize(txpool.PendingFilter{}) + len(pendingTxs.Txs)) * config.TxGossipBloomChurnMultiplier for _, pendingTx := range pendingTxs.Txs { tx := &GossipEthTx{Tx: pendingTx} g.bloom.Add(tx) diff --git a/plugin/evm/gossip_stats.go b/plugin/evm/gossip_stats.go deleted file mode 100644 index 9805c7f1ff..0000000000 --- a/plugin/evm/gossip_stats.go +++ /dev/null @@ -1,67 +0,0 @@ -// (c) 2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import "github.com/ava-labs/coreth/metrics" - -var _ GossipStats = &gossipStats{} - -// GossipStats contains methods for updating incoming and outgoing gossip stats. -type GossipStats interface { - IncAtomicGossipReceived() - IncEthTxsGossipReceived() - - // new vs. known txs received - IncAtomicGossipReceivedDropped() - IncAtomicGossipReceivedError() - IncAtomicGossipReceivedKnown() - IncAtomicGossipReceivedNew() - IncEthTxsGossipReceivedError() - IncEthTxsGossipReceivedKnown() - IncEthTxsGossipReceivedNew() -} - -// gossipStats implements stats for incoming and outgoing gossip stats. -type gossipStats struct { - // messages - atomicGossipReceived metrics.Counter - ethTxsGossipReceived metrics.Counter - - // new vs. known txs received - atomicGossipReceivedDropped metrics.Counter - atomicGossipReceivedError metrics.Counter - atomicGossipReceivedKnown metrics.Counter - atomicGossipReceivedNew metrics.Counter - ethTxsGossipReceivedError metrics.Counter - ethTxsGossipReceivedKnown metrics.Counter - ethTxsGossipReceivedNew metrics.Counter -} - -func NewGossipStats() GossipStats { - return &gossipStats{ - atomicGossipReceived: metrics.GetOrRegisterCounter("gossip_atomic_received", nil), - ethTxsGossipReceived: metrics.GetOrRegisterCounter("gossip_eth_txs_received", nil), - - atomicGossipReceivedDropped: metrics.GetOrRegisterCounter("gossip_atomic_received_dropped", nil), - atomicGossipReceivedError: metrics.GetOrRegisterCounter("gossip_atomic_received_error", nil), - atomicGossipReceivedKnown: metrics.GetOrRegisterCounter("gossip_atomic_received_known", nil), - atomicGossipReceivedNew: metrics.GetOrRegisterCounter("gossip_atomic_received_new", nil), - ethTxsGossipReceivedError: metrics.GetOrRegisterCounter("gossip_eth_txs_received_error", nil), - ethTxsGossipReceivedKnown: metrics.GetOrRegisterCounter("gossip_eth_txs_received_known", nil), - ethTxsGossipReceivedNew: metrics.GetOrRegisterCounter("gossip_eth_txs_received_new", nil), - } -} - -// incoming messages -func (g *gossipStats) IncAtomicGossipReceived() { g.atomicGossipReceived.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceived() { g.ethTxsGossipReceived.Inc(1) } - -// new vs. known txs received -func (g *gossipStats) IncAtomicGossipReceivedDropped() { g.atomicGossipReceivedDropped.Inc(1) } -func (g *gossipStats) IncAtomicGossipReceivedError() { g.atomicGossipReceivedError.Inc(1) } -func (g *gossipStats) IncAtomicGossipReceivedKnown() { g.atomicGossipReceivedKnown.Inc(1) } -func (g *gossipStats) IncAtomicGossipReceivedNew() { g.atomicGossipReceivedNew.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedError() { g.ethTxsGossipReceivedError.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedKnown() { g.ethTxsGossipReceivedKnown.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedNew() { g.ethTxsGossipReceivedNew.Inc(1) } diff --git a/plugin/evm/gossip_test.go b/plugin/evm/gossip_test.go index 3fa4f53888..03bb31431a 100644 --- a/plugin/evm/gossip_test.go +++ b/plugin/evm/gossip_test.go @@ -9,11 +9,7 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p/gossip" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" @@ -29,110 +25,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestGossipAtomicTxMarshaller(t *testing.T) { - require := require.New(t) - - want := &GossipAtomicTx{ - Tx: &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{}, - Creds: []verify.Verifiable{}, - }, - } - marshaller := GossipAtomicTxMarshaller{} - - key0 := testKeys[0] - require.NoError(want.Tx.Sign(Codec, [][]*secp256k1.PrivateKey{{key0}})) - - bytes, err := marshaller.MarshalGossip(want) - require.NoError(err) - - got, err := marshaller.UnmarshalGossip(bytes) - require.NoError(err) - require.Equal(want.GossipID(), got.GossipID()) -} - -func TestAtomicMempoolIterate(t *testing.T) { - txs := []*GossipAtomicTx{ - { - Tx: &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - }, - { - Tx: &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - }, - } - - tests := []struct { - name string - add []*GossipAtomicTx - f func(tx *GossipAtomicTx) bool - possibleValues []*GossipAtomicTx - expectedLen int - }{ - { - name: "func matches nothing", - add: txs, - f: func(*GossipAtomicTx) bool { - return false - }, - possibleValues: nil, - }, - { - name: "func matches all", - add: txs, - f: func(*GossipAtomicTx) bool { - return true - }, - possibleValues: txs, - expectedLen: 2, - }, - { - name: "func matches subset", - add: txs, - f: func(tx *GossipAtomicTx) bool { - return tx.Tx == txs[0].Tx - }, - possibleValues: txs, - expectedLen: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) - require.NoError(err) - - for _, add := range tt.add { - require.NoError(m.Add(add)) - } - - matches := make([]*GossipAtomicTx, 0) - f := func(tx *GossipAtomicTx) bool { - match := tt.f(tx) - - if match { - matches = append(matches, tx) - } - - return match - } - - m.Iterate(f) - - require.Len(matches, tt.expectedLen) - require.Subset(tt.possibleValues, matches) - }) - } -} - func TestGossipEthTxMarshaller(t *testing.T) { require := require.New(t) @@ -189,7 +81,7 @@ func TestGossipSubscribe(t *testing.T) { defer gossipTxPool.lock.RUnlock() for i, tx := range ethTxs { - require.Truef(gossipTxPool.bloom.Has(&GossipEthTx{Tx: tx}), "expected tx[%d] to be in bloom filter", i) + assert.Truef(c, gossipTxPool.bloom.Has(&GossipEthTx{Tx: tx}), "expected tx[%d] to be in bloom filter", i) } }, 30*time.Second, diff --git a/plugin/evm/gossiper_atomic_gossiping_test.go b/plugin/evm/gossiper_atomic_gossiping_test.go index 0974b50638..e2c97167f8 100644 --- a/plugin/evm/gossiper_atomic_gossiping_test.go +++ b/plugin/evm/gossiper_atomic_gossiping_test.go @@ -5,17 +5,21 @@ package evm import ( "context" + "encoding/binary" "sync" "testing" "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/utils/set" "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) // show that a txID discovered from gossip is requested to the same node only if @@ -53,14 +57,17 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { tx, conflictingTx := importTxs[0], importTxs[1] // gossip tx and check it is accepted and gossiped - msg := message.AtomicTxGossip{ - Tx: tx.SignedBytes(), + msg := atomic.GossipAtomicTx{ + Tx: tx, } - msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) + marshaller := atomic.GossipAtomicTxMarshaller{} + txBytes, err := marshaller.MarshalGossip(&msg) assert.NoError(err) - vm.ctx.Lock.Unlock() + msgBytes, err := buildAtomicPushGossip(txBytes) + assert.NoError(err) + // show that no txID is requested assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) time.Sleep(500 * time.Millisecond) @@ -85,14 +92,17 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { txGossipedLock.Unlock() // show that conflicting tx is not added to mempool - msg = message.AtomicTxGossip{ - Tx: conflictingTx.SignedBytes(), + msg = atomic.GossipAtomicTx{ + Tx: conflictingTx, } - msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) + marshaller = atomic.GossipAtomicTxMarshaller{} + txBytes, err = marshaller.MarshalGossip(&msg) assert.NoError(err) vm.ctx.Lock.Unlock() + msgBytes, err = buildAtomicPushGossip(txBytes) + assert.NoError(err) assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) vm.ctx.Lock.Lock() @@ -137,7 +147,7 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { tx, conflictingTx := importTxs[0], importTxs[1] txID := tx.ID() - mempool.AddTx(tx) + mempool.AddRemoteTx(tx) mempool.NextTx() mempool.DiscardCurrentTx(txID) @@ -147,14 +157,17 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // Gossip the transaction to the VM and ensure that it is not added to the mempool // and is not re-gossipped. nodeID := ids.GenerateTestNodeID() - msg := message.AtomicTxGossip{ - Tx: tx.SignedBytes(), + msg := atomic.GossipAtomicTx{ + Tx: tx, } - msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) + marshaller := atomic.GossipAtomicTxMarshaller{} + txBytes, err := marshaller.MarshalGossip(&msg) assert.NoError(err) vm.ctx.Lock.Unlock() + msgBytes, err := buildAtomicPushGossip(txBytes) + assert.NoError(err) assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) vm.ctx.Lock.Lock() @@ -171,8 +184,8 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // Conflicting tx must be submitted over the API to be included in push gossip. // (i.e., txs received via p2p are not included in push gossip) // This test adds it directly to the mempool + gossiper to simulate that. - vm.mempool.AddTx(conflictingTx) - vm.atomicTxPushGossiper.Add(&GossipAtomicTx{conflictingTx}) + vm.mempool.AddRemoteTx(conflictingTx) + vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: conflictingTx}) time.Sleep(500 * time.Millisecond) vm.ctx.Lock.Lock() @@ -185,3 +198,16 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { assert.False(mempool.Has(txID)) assert.True(mempool.Has(conflictingTx.ID())) } + +func buildAtomicPushGossip(txBytes []byte) ([]byte, error) { + inboundGossip := &sdk.PushGossip{ + Gossip: [][]byte{txBytes}, + } + inboundGossipBytes, err := proto.Marshal(inboundGossip) + if err != nil { + return nil, err + } + + inboundGossipMsg := append(binary.AppendUvarint(nil, p2p.AtomicTxGossipHandlerID), inboundGossipBytes...) + return inboundGossipMsg, nil +} diff --git a/plugin/evm/handler.go b/plugin/evm/handler.go deleted file mode 100644 index 74930eb963..0000000000 --- a/plugin/evm/handler.go +++ /dev/null @@ -1,139 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "github.com/ava-labs/avalanchego/ids" - - "github.com/ava-labs/libevm/log" - "github.com/ava-labs/libevm/rlp" - - "github.com/ava-labs/coreth/core/txpool" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/plugin/evm/message" -) - -// GossipHandler handles incoming gossip messages -type GossipHandler struct { - vm *VM - atomicMempool *Mempool - txPool *txpool.TxPool - stats GossipStats -} - -func NewGossipHandler(vm *VM, stats GossipStats) *GossipHandler { - return &GossipHandler{ - vm: vm, - atomicMempool: vm.mempool, - txPool: vm.txPool, - stats: stats, - } -} - -func (h *GossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGossip) error { - log.Trace( - "AppGossip called with AtomicTxGossip", - "peerID", nodeID, - ) - - if len(msg.Tx) == 0 { - log.Trace( - "AppGossip received empty AtomicTxGossip Message", - "peerID", nodeID, - ) - return nil - } - - // In the case that the gossip message contains a transaction, - // attempt to parse it and add it as a remote. - tx := Tx{} - if _, err := Codec.Unmarshal(msg.Tx, &tx); err != nil { - log.Trace( - "AppGossip provided invalid tx", - "err", err, - ) - return nil - } - unsignedBytes, err := Codec.Marshal(codecVersion, &tx.UnsignedAtomicTx) - if err != nil { - log.Trace( - "AppGossip failed to marshal unsigned tx", - "err", err, - ) - return nil - } - tx.Initialize(unsignedBytes, msg.Tx) - - txID := tx.ID() - h.stats.IncAtomicGossipReceived() - if _, dropped, found := h.atomicMempool.GetTx(txID); found { - h.stats.IncAtomicGossipReceivedKnown() - return nil - } else if dropped { - h.stats.IncAtomicGossipReceivedDropped() - return nil - } - - h.stats.IncAtomicGossipReceivedNew() - - h.vm.ctx.Lock.RLock() - defer h.vm.ctx.Lock.RUnlock() - - if err := h.vm.mempool.AddTx(&tx); err != nil { - log.Trace( - "AppGossip provided invalid transaction", - "peerID", nodeID, - "err", err, - ) - h.stats.IncAtomicGossipReceivedError() - } - - return nil -} - -func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { - log.Trace( - "AppGossip called with EthTxsGossip", - "peerID", nodeID, - "size(txs)", len(msg.Txs), - ) - - if len(msg.Txs) == 0 { - log.Trace( - "AppGossip received empty EthTxsGossip Message", - "peerID", nodeID, - ) - return nil - } - - // The maximum size of this encoded object is enforced by the codec. - txs := make([]*types.Transaction, 0) - if err := rlp.DecodeBytes(msg.Txs, &txs); err != nil { - log.Trace( - "AppGossip provided invalid txs", - "peerID", nodeID, - "err", err, - ) - return nil - } - h.stats.IncEthTxsGossipReceived() - errs := h.txPool.Add(txs, false, false) - for i, err := range errs { - if err != nil { - log.Trace( - "AppGossip failed to add to mempool", - "err", err, - "tx", txs[i].Hash(), - ) - if err == txpool.ErrAlreadyKnown { - h.stats.IncEthTxsGossipReceivedKnown() - } else { - h.stats.IncEthTxsGossipReceivedError() - } - continue - } - h.stats.IncEthTxsGossipReceivedNew() - } - return nil -} diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index a5f5513e00..bc57aec0d3 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -8,12 +8,13 @@ import ( "testing" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" "github.com/holiman/uint256" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" + avalancheutils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -23,7 +24,7 @@ import ( // createImportTxOptions adds a UTXO to shared memory and generates a list of import transactions sending this UTXO // to each of the three test keys (conflicting transactions) -func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *atomic.Memory) []*Tx { +func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, @@ -31,28 +32,28 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *atomic.Memory) [] Amt: uint64(50000000), OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) } - importTxs := make([]*Tx, 0, 3) + importTxs := make([]*atomic.Tx, 0, 3) for _, ethAddr := range testEthAddrs { importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { @@ -69,7 +70,7 @@ func TestImportTxVerify(t *testing.T) { var importAmount uint64 = 10000000 txID := ids.GenerateTestID() - importTx := &UnsignedImportTx{ + importTx := &atomic.UnsignedImportTx{ NetworkID: ctx.NetworkID, BlockchainID: ctx.ChainID, SourceChain: ctx.XChainID, @@ -101,10 +102,10 @@ func TestImportTxVerify(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], - Amount: importAmount - params.AvalancheAtomicTxFee, + Amount: importAmount - atomic.AvalancheAtomicTxFee, AssetID: ctx.AVAXAssetID, }, { @@ -116,21 +117,21 @@ func TestImportTxVerify(t *testing.T) { } // Sort the inputs and outputs to ensure the transaction is canonical - utils.Sort(importTx.ImportedInputs) - utils.Sort(importTx.Outs) + avalancheutils.Sort(importTx.ImportedInputs) + avalancheutils.Sort(importTx.Outs) tests := map[string]atomicTxVerifyTest{ "nil tx": { - generate: func(t *testing.T) UnsignedAtomicTx { - var importTx *UnsignedImportTx + generate: func(t *testing.T) atomic.UnsignedAtomicTx { + var importTx *atomic.UnsignedImportTx return importTx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNilTx.Error(), + expectedErr: atomic.ErrNilTx.Error(), }, "valid import tx": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return importTx }, ctx: ctx, @@ -138,7 +139,7 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", // Expect this transaction to be valid in Apricot Phase 0 }, "valid import tx banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return importTx }, ctx: ctx, @@ -146,37 +147,37 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", // Expect this transaction to be valid in Banff }, "invalid network ID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.NetworkID++ return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongNetworkID.Error(), + expectedErr: atomic.ErrWrongNetworkID.Error(), }, "invalid blockchain ID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.BlockchainID = ids.GenerateTestID() return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongBlockchainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "P-chain source before AP5": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.SourceChain = constants.PlatformChainID return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongChainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "P-chain source after AP5": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.SourceChain = constants.PlatformChainID return &tx @@ -185,27 +186,27 @@ func TestImportTxVerify(t *testing.T) { rules: apricotRulesPhase5, }, "invalid source chain ID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.SourceChain = ids.GenerateTestID() return &tx }, ctx: ctx, rules: apricotRulesPhase5, - expectedErr: errWrongChainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "no inputs": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = nil return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNoImportInputs.Error(), + expectedErr: atomic.ErrNoImportInputs.Error(), }, "inputs sorted incorrectly": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ tx.ImportedInputs[1], @@ -215,10 +216,10 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errInputsNotSortedUnique.Error(), + expectedErr: atomic.ErrInputsNotSortedUnique.Error(), }, "invalid input": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ tx.ImportedInputs[0], @@ -231,9 +232,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "atomic input failed verification", }, "unsorted outputs phase 0 passes verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[1], tx.Outs[0], } @@ -244,9 +245,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "non-unique outputs phase 0 passes verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[0], tx.Outs[0], } @@ -257,9 +258,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "unsorted outputs phase 1 fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[1], tx.Outs[0], } @@ -267,12 +268,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase1, - expectedErr: errOutputsNotSorted.Error(), + expectedErr: atomic.ErrOutputsNotSorted.Error(), }, "non-unique outputs phase 1 passes verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[0], tx.Outs[0], } @@ -283,9 +284,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "outputs not sorted and unique phase 2 fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[0], tx.Outs[0], } @@ -293,12 +294,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase2, - expectedErr: errOutputsNotSortedUnique.Error(), + expectedErr: atomic.ErrOutputsNotSortedUnique.Error(), }, "outputs not sorted phase 2 fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[1], tx.Outs[0], } @@ -306,12 +307,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase2, - expectedErr: errOutputsNotSortedUnique.Error(), + expectedErr: atomic.ErrOutputsNotSortedUnique.Error(), }, "invalid EVMOutput fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: 0, @@ -325,17 +326,17 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "EVM Output failed verification", }, "no outputs apricot phase 3": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.Outs = nil return &tx }, ctx: ctx, rules: apricotRulesPhase3, - expectedErr: errNoEVMOutputs.Error(), + expectedErr: atomic.ErrNoEVMOutputs.Error(), }, "non-AVAX input Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ { @@ -359,9 +360,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX output Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ { Address: importTx.Outs[0].Address, Amount: importTx.Outs[0].Amount, @@ -375,7 +376,7 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX input Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ { @@ -396,12 +397,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errImportNonAVAXInputBanff.Error(), + expectedErr: atomic.ErrImportNonAVAXInputBanff.Error(), }, "non-AVAX output Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ { Address: importTx.Outs[0].Address, Amount: importTx.Outs[0].Amount, @@ -412,7 +413,7 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errImportNonAVAXOutputBanff.Error(), + expectedErr: atomic.ErrImportNonAVAXOutputBanff.Error(), }, } for name, test := range tests { @@ -426,7 +427,7 @@ func TestNewImportTx(t *testing.T) { importAmount := uint64(5000000) // createNewImportAVAXTx adds a UTXO to shared memory and then constructs a new import transaction // and checks that it has the correct fee for the base fee that has been used - createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) if err != nil { @@ -450,7 +451,7 @@ func TestNewImportTx(t *testing.T) { if err != nil { t.Fatal(err) } - actualFee, err = CalculateDynamicFee(actualCost, initialBaseFee) + actualFee, err = atomic.CalculateDynamicFee(actualCost, initialBaseFee) if err != nil { t.Fatal(err) } @@ -496,8 +497,8 @@ func TestNewImportTx(t *testing.T) { } expectedRemainingBalance := new(uint256.Int).Mul( - uint256.NewInt(importAmount-actualAVAXBurned), x2cRate) - addr := GetEthAddress(testKeys[0]) + uint256.NewInt(importAmount-actualAVAXBurned), atomic.X2CRate) + addr := testKeys[0].EthAddress() if actualBalance := sdb.GetBalance(addr); actualBalance.Cmp(expectedRemainingBalance) != 0 { t.Fatalf("address remaining balance %s equal %s not %s", addr.String(), actualBalance, expectedRemainingBalance) } @@ -543,7 +544,7 @@ func TestImportTxGasCost(t *testing.T) { importAmount := uint64(5000000) tests := map[string]struct { - UnsignedImportTx *UnsignedImportTx + UnsignedImportTx *atomic.UnsignedImportTx Keys [][]*secp256k1.PrivateKey ExpectedGasUsed uint64 @@ -552,7 +553,7 @@ func TestImportTxGasCost(t *testing.T) { FixedFee bool }{ "simple import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -564,7 +565,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -576,7 +577,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "simple import 1wei": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -588,7 +589,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -600,7 +601,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(1), }, "simple import 1wei + fixed fee": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -612,7 +613,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -625,7 +626,7 @@ func TestImportTxGasCost(t *testing.T) { FixedFee: true, }, "simple ANT import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -647,7 +648,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: importAmount, @@ -661,7 +662,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "complex ANT import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -683,7 +684,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: importAmount, @@ -702,7 +703,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "multisig import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -714,7 +715,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0, 1}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -726,7 +727,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "large import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -812,7 +813,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: importAmount * 10, @@ -840,10 +841,10 @@ func TestImportTxGasCost(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - tx := &Tx{UnsignedAtomicTx: test.UnsignedImportTx} + tx := &atomic.Tx{UnsignedAtomicTx: test.UnsignedImportTx} // Sign with the correct key - if err := tx.Sign(Codec, test.Keys); err != nil { + if err := tx.Sign(atomic.Codec, test.Keys); err != nil { t.Fatal(err) } @@ -855,7 +856,7 @@ func TestImportTxGasCost(t *testing.T) { t.Fatalf("Expected gasUsed to be %d, but found %d", test.ExpectedGasUsed, gasUsed) } - fee, err := CalculateDynamicFee(gasUsed, test.BaseFee) + fee, err := atomic.CalculateDynamicFee(gasUsed, test.BaseFee) if err != nil { t.Fatal(err) } @@ -869,8 +870,8 @@ func TestImportTxGasCost(t *testing.T) { func TestImportTxSemanticVerify(t *testing.T) { tests := map[string]atomicTxTest{ "UTXO not present during bootstrapping": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -884,13 +885,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -898,8 +899,8 @@ func TestImportTxSemanticVerify(t *testing.T) { bootstrapping: true, }, "UTXO not present": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -913,13 +914,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -927,11 +928,11 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to fetch import UTXOs from", }, "garbage UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxoID.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: []byte("hey there"), Traits: [][]byte{ @@ -941,7 +942,7 @@ func TestImportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -953,13 +954,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -967,7 +968,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to unmarshal UTXO", }, "UTXO AssetID mismatch": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() expectedAssetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, testShortIDAddrs[0]) @@ -975,7 +976,7 @@ func TestImportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -987,28 +988,28 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx }, - semanticVerifyErr: errAssetIDMismatch.Error(), + semanticVerifyErr: atomic.ErrAssetIDMismatch.Error(), }, "insufficient AVAX funds": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1020,13 +1021,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 2, // Produce more output than is consumed by the transaction AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1034,7 +1035,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "insufficient non-AVAX funds": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() assetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) @@ -1042,7 +1043,7 @@ func TestImportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1054,13 +1055,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 2, // Produce more output than is consumed by the transaction AssetID: assetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1068,14 +1069,14 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "no signatures": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1087,13 +1088,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, nil); err != nil { + if err := tx.Sign(atomic.Codec, nil); err != nil { t.Fatal(err) } return tx @@ -1101,14 +1102,14 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx contained mismatched number of inputs/credentials", }, "incorrect signature": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1120,14 +1121,14 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} // Sign the transaction with the incorrect key - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[1]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[1]}}); err != nil { t.Fatal(err) } return tx @@ -1135,14 +1136,14 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx transfer failed verification", }, "non-unique EVM Outputs": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 2, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1154,7 +1155,7 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: 1, @@ -1167,13 +1168,13 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx }, genesisJSON: genesisJSONApricotPhase3, - semanticVerifyErr: errOutputsNotSortedUnique.Error(), + semanticVerifyErr: atomic.ErrOutputsNotSortedUnique.Error(), }, } @@ -1188,14 +1189,14 @@ func TestImportTxEVMStateTransfer(t *testing.T) { assetID := ids.GenerateTestID() tests := map[string]atomicTxTest{ "AVAX UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1207,13 +1208,13 @@ func TestImportTxEVMStateTransfer(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1227,20 +1228,20 @@ func TestImportTxEVMStateTransfer(t *testing.T) { } avaxBalance := sdb.GetBalance(testEthAddrs[0]) - if avaxBalance.Cmp(x2cRate) != 0 { - t.Fatalf("Expected AVAX balance to be %d, found balance: %d", x2cRate, avaxBalance) + if avaxBalance.Cmp(atomic.X2CRate) != 0 { + t.Fatalf("Expected AVAX balance to be %d, found balance: %d", *atomic.X2CRate, avaxBalance) } }, }, "non-AVAX UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1252,13 +1253,13 @@ func TestImportTxEVMStateTransfer(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: assetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx diff --git a/plugin/evm/imports_test.go b/plugin/evm/imports_test.go new file mode 100644 index 0000000000..5e8021d3e5 --- /dev/null +++ b/plugin/evm/imports_test.go @@ -0,0 +1,72 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/tools/go/packages" +) + +// getDependencies takes a fully qualified package name and returns a map of all +// its recursive package imports (including itself) in the same format. +func getDependencies(packageName string) (map[string]struct{}, error) { + // Configure the load mode to include dependencies + cfg := &packages.Config{Mode: packages.NeedDeps | packages.NeedImports | packages.NeedName | packages.NeedModule} + pkgs, err := packages.Load(cfg, packageName) + if err != nil { + return nil, fmt.Errorf("failed to load package: %v", err) + } + + if len(pkgs) == 0 || pkgs[0].Errors != nil { + return nil, fmt.Errorf("failed to load package %s", packageName) + } + + deps := make(map[string]struct{}) + var collectDeps func(pkg *packages.Package) + collectDeps = func(pkg *packages.Package) { + if _, ok := deps[pkg.PkgPath]; ok { + return // Avoid re-processing the same dependency + } + deps[pkg.PkgPath] = struct{}{} + for _, dep := range pkg.Imports { + collectDeps(dep) + } + } + + // Start collecting dependencies + collectDeps(pkgs[0]) + return deps, nil +} + +func TestMustNotImport(t *testing.T) { + withRepo := func(pkg string) string { + const repo = "github.com/ava-labs/coreth" + return fmt.Sprintf("%s/%s", repo, pkg) + } + mustNotImport := map[string][]string{ + // The following sub-packages of plugin/evm must not import core, core/vm + // so clients (e.g., wallets, e2e tests) can import them without pulling in + // the entire VM logic. + // Importing these packages configures libevm globally and it is not + // possible to do so for both coreth and subnet-evm, where the client may + // wish to connect to multiple chains. + "plugin/evm/atomic": {"core", "core/vm", "params"}, + "plugin/evm/client": {"core", "core/vm", "params"}, + "plugin/evm/config": {"core", "core/vm", "params"}, + } + + for packageName, forbiddenImports := range mustNotImport { + imports, err := getDependencies(withRepo(packageName)) + require.NoError(t, err) + + for _, forbiddenImport := range forbiddenImports { + fullForbiddenImport := withRepo(forbiddenImport) + _, found := imports[fullForbiddenImport] + require.False(t, found, "package %s must not import %s, check output of go list -f '{{ .Deps }}' \"%s\" ", packageName, fullForbiddenImport, withRepo(packageName)) + } + } +} diff --git a/plugin/evm/mempool_atomic_gossiping_test.go b/plugin/evm/mempool_atomic_gossiping_test.go index b44f2097b4..9f2cc89535 100644 --- a/plugin/evm/mempool_atomic_gossiping_test.go +++ b/plugin/evm/mempool_atomic_gossiping_test.go @@ -9,8 +9,11 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/chain" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" ) @@ -32,7 +35,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { // generate a valid and conflicting tx var ( - tx, conflictingTx *Tx + tx, conflictingTx *atomic.Tx ) if name == "import" { importTxs := createImportTxOptions(t, vm, sharedMemory) @@ -52,7 +55,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { // try to add a conflicting tx err = vm.mempool.AddLocalTx(conflictingTx) - assert.ErrorIs(err, errConflictingAtomicTx) + assert.ErrorIs(err, atomic.ErrConflictingAtomicTx) has = mempool.Has(conflictingTxID) assert.False(has, "conflicting tx in mempool") @@ -91,27 +94,22 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { func TestMempoolMaxMempoolSizeHandling(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, _ := GenesisVM(t, true, "", "", "") - defer func() { - err := vm.Shutdown(context.Background()) - assert.NoError(err) - }() - mempool := vm.mempool - + mempool, err := atomic.NewMempool(&snow.Context{}, prometheus.NewRegistry(), 1, nil) + assert.NoError(err) // create candidate tx (we will drop before validation) - tx := createImportTxOptions(t, vm, sharedMemory)[0] + tx := atomic.GenerateTestImportTx() - // shortcut to simulated almost filled mempool - mempool.maxSize = 0 - - assert.ErrorIs(mempool.AddTx(tx), errTooManyAtomicTx) - assert.False(mempool.Has(tx.ID())) - - // shortcut to simulated empty mempool - mempool.maxSize = defaultMempoolSize - - assert.NoError(mempool.AddTx(tx)) + assert.NoError(mempool.AddRemoteTx(tx)) assert.True(mempool.Has(tx.ID())) + // promote tx to be issued + _, ok := mempool.NextTx() + assert.True(ok) + mempool.IssueCurrentTxs() + + // try to add one more tx + tx2 := atomic.GenerateTestImportTx() + assert.ErrorIs(mempool.AddRemoteTx(tx2), atomic.ErrTooManyAtomicTx) + assert.False(mempool.Has(tx2.ID())) } // mempool will drop transaction with the lowest fee @@ -128,21 +126,21 @@ func TestMempoolPriorityDrop(t *testing.T) { err := vm.Shutdown(context.Background()) assert.NoError(err) }() - mempool := vm.mempool - mempool.maxSize = 1 + mempool, err := atomic.NewMempool(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) + assert.NoError(err) tx1, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } - assert.NoError(mempool.AddTx(tx1)) + assert.NoError(mempool.AddRemoteTx(tx1)) assert.True(mempool.Has(tx1.ID())) tx2, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[1]}) if err != nil { t.Fatal(err) } - assert.ErrorIs(mempool.AddTx(tx2), errInsufficientAtomicTxFee) + assert.ErrorIs(mempool.AddRemoteTx(tx2), atomic.ErrInsufficientAtomicTxFee) assert.True(mempool.Has(tx1.ID())) assert.False(mempool.Has(tx2.ID())) @@ -150,7 +148,7 @@ func TestMempoolPriorityDrop(t *testing.T) { if err != nil { t.Fatal(err) } - assert.NoError(mempool.AddTx(tx3)) + assert.NoError(mempool.AddRemoteTx(tx3)) assert.False(mempool.Has(tx1.ID())) assert.False(mempool.Has(tx2.ID())) assert.True(mempool.Has(tx3.ID())) diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index de3603b9c2..664c9252bb 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -15,20 +15,16 @@ const ( maxMessageSize = 2*units.MiB - 64*units.KiB // Subtract 64 KiB from p2p network cap to leave room for encoding overhead from AvalancheGo ) -var ( - Codec codec.Manager -) +var Codec codec.Manager func init() { Codec = codec.NewManager(maxMessageSize) c := linearcodec.NewDefault() errs := wrappers.Errs{} + // Gossip types removed from codec + c.SkipRegistrations(2) errs.Add( - // Gossip types - c.RegisterType(AtomicTxGossip{}), - c.RegisterType(EthTxsGossip{}), - // Types for state sync frontier consensus c.RegisterType(SyncSummary{}), diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index a11f970259..1b910e3826 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -6,33 +6,10 @@ package message import ( "context" - "github.com/ava-labs/libevm/log" - "github.com/ava-labs/avalanchego/ids" ) -var ( - _ GossipHandler = NoopMempoolGossipHandler{} - _ RequestHandler = NoopRequestHandler{} -) - -// GossipHandler handles incoming gossip messages -type GossipHandler interface { - HandleAtomicTx(nodeID ids.NodeID, msg AtomicTxGossip) error - HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error -} - -type NoopMempoolGossipHandler struct{} - -func (NoopMempoolGossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg AtomicTxGossip) error { - log.Debug("dropping unexpected AtomicTxGossip message", "peerID", nodeID) - return nil -} - -func (NoopMempoolGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error { - log.Debug("dropping unexpected EthTxsGossip message", "peerID", nodeID) - return nil -} +var _ RequestHandler = NoopRequestHandler{} // RequestHandler interface handles incoming requests from peers // Must have methods in format of handleType(context.Context, ids.NodeID, uint32, request Type) error diff --git a/plugin/evm/message/handler_test.go b/plugin/evm/message/handler_test.go deleted file mode 100644 index a27b1f9d4f..0000000000 --- a/plugin/evm/message/handler_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - - "github.com/stretchr/testify/assert" -) - -type CounterHandler struct { - AtomicTx, EthTxs int -} - -func (h *CounterHandler) HandleAtomicTx(ids.NodeID, AtomicTxGossip) error { - h.AtomicTx++ - return nil -} - -func (h *CounterHandler) HandleEthTxs(ids.NodeID, EthTxsGossip) error { - h.EthTxs++ - return nil -} - -func TestHandleAtomicTx(t *testing.T) { - assert := assert.New(t) - - handler := CounterHandler{} - msg := AtomicTxGossip{} - - err := msg.Handle(&handler, ids.EmptyNodeID) - assert.NoError(err) - assert.Equal(1, handler.AtomicTx) - assert.Zero(handler.EthTxs) -} - -func TestHandleEthTxs(t *testing.T) { - assert := assert.New(t) - - handler := CounterHandler{} - msg := EthTxsGossip{} - - err := msg.Handle(&handler, ids.EmptyNodeID) - assert.NoError(err) - assert.Zero(handler.AtomicTx) - assert.Equal(1, handler.EthTxs) -} - -func TestNoopHandler(t *testing.T) { - assert := assert.New(t) - - handler := NoopMempoolGossipHandler{} - - err := handler.HandleEthTxs(ids.EmptyNodeID, EthTxsGossip{}) - assert.NoError(err) - - err = handler.HandleAtomicTx(ids.EmptyNodeID, AtomicTxGossip{}) - assert.NoError(err) -} diff --git a/plugin/evm/message/message.go b/plugin/evm/message/message.go deleted file mode 100644 index c8c80a0343..0000000000 --- a/plugin/evm/message/message.go +++ /dev/null @@ -1,78 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "errors" - "fmt" - - "github.com/ava-labs/avalanchego/codec" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/units" -) - -const ( - // EthMsgSoftCapSize is the ideal size of encoded transaction bytes we send in - // any [EthTxsGossip] or [AtomicTxGossip] message. We do not limit inbound messages to - // this size, however. Max inbound message size is enforced by the codec - // (512KB). - EthMsgSoftCapSize = 64 * units.KiB -) - -var ( - _ GossipMessage = AtomicTxGossip{} - _ GossipMessage = EthTxsGossip{} - - errUnexpectedCodecVersion = errors.New("unexpected codec version") -) - -type GossipMessage interface { - // types implementing GossipMessage should also implement fmt.Stringer for logging purposes. - fmt.Stringer - - // Handle this gossip message with the gossip handler. - Handle(handler GossipHandler, nodeID ids.NodeID) error -} - -type AtomicTxGossip struct { - Tx []byte `serialize:"true"` -} - -func (msg AtomicTxGossip) Handle(handler GossipHandler, nodeID ids.NodeID) error { - return handler.HandleAtomicTx(nodeID, msg) -} - -func (msg AtomicTxGossip) String() string { - return fmt.Sprintf("AtomicTxGossip(Len=%d)", len(msg.Tx)) -} - -type EthTxsGossip struct { - Txs []byte `serialize:"true"` -} - -func (msg EthTxsGossip) Handle(handler GossipHandler, nodeID ids.NodeID) error { - return handler.HandleEthTxs(nodeID, msg) -} - -func (msg EthTxsGossip) String() string { - return fmt.Sprintf("EthTxsGossip(Len=%d)", len(msg.Txs)) -} - -func ParseGossipMessage(codec codec.Manager, bytes []byte) (GossipMessage, error) { - var msg GossipMessage - version, err := codec.Unmarshal(bytes, &msg) - if err != nil { - return nil, err - } - if version != Version { - return nil, errUnexpectedCodecVersion - } - return msg, nil -} - -func BuildGossipMessage(codec codec.Manager, msg GossipMessage) ([]byte, error) { - bytes, err := codec.Marshal(Version, &msg) - return bytes, err -} diff --git a/plugin/evm/message/message_test.go b/plugin/evm/message/message_test.go deleted file mode 100644 index dbcdea2d75..0000000000 --- a/plugin/evm/message/message_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "encoding/base64" - "testing" - - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/units" - - "github.com/stretchr/testify/assert" -) - -// TestMarshalAtomicTx asserts that the structure or serialization logic hasn't changed, primarily to -// ensure compatibility with the network. -func TestMarshalAtomicTx(t *testing.T) { - assert := assert.New(t) - - base64AtomicTxGossip := "AAAAAAAAAAAABGJsYWg=" - msg := []byte("blah") - builtMsg := AtomicTxGossip{ - Tx: msg, - } - builtMsgBytes, err := BuildGossipMessage(Codec, builtMsg) - assert.NoError(err) - assert.Equal(base64AtomicTxGossip, base64.StdEncoding.EncodeToString(builtMsgBytes)) - - parsedMsgIntf, err := ParseGossipMessage(Codec, builtMsgBytes) - assert.NoError(err) - - parsedMsg, ok := parsedMsgIntf.(AtomicTxGossip) - assert.True(ok) - - assert.Equal(msg, parsedMsg.Tx) -} - -// TestMarshalEthTxs asserts that the structure or serialization logic hasn't changed, primarily to -// ensure compatibility with the network. -func TestMarshalEthTxs(t *testing.T) { - assert := assert.New(t) - - base64EthTxGossip := "AAAAAAABAAAABGJsYWg=" - msg := []byte("blah") - builtMsg := EthTxsGossip{ - Txs: msg, - } - builtMsgBytes, err := BuildGossipMessage(Codec, builtMsg) - assert.NoError(err) - assert.Equal(base64EthTxGossip, base64.StdEncoding.EncodeToString(builtMsgBytes)) - - parsedMsgIntf, err := ParseGossipMessage(Codec, builtMsgBytes) - assert.NoError(err) - - parsedMsg, ok := parsedMsgIntf.(EthTxsGossip) - assert.True(ok) - - assert.Equal(msg, parsedMsg.Txs) -} - -func TestEthTxsTooLarge(t *testing.T) { - assert := assert.New(t) - - builtMsg := EthTxsGossip{ - Txs: utils.RandomBytes(maxMessageSize), - } - _, err := BuildGossipMessage(Codec, builtMsg) - assert.Error(err) -} - -func TestParseGibberish(t *testing.T) { - assert := assert.New(t) - - randomBytes := utils.RandomBytes(256 * units.KiB) - _, err := ParseGossipMessage(Codec, randomBytes) - assert.Error(err) -} diff --git a/plugin/evm/shared_memory_writer.go b/plugin/evm/shared_memory_writer.go deleted file mode 100644 index 7e6de6f862..0000000000 --- a/plugin/evm/shared_memory_writer.go +++ /dev/null @@ -1,37 +0,0 @@ -// (c) 2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/precompile/precompileconfig" -) - -var _ precompileconfig.SharedMemoryWriter = &sharedMemoryWriter{} - -type sharedMemoryWriter struct { - requests map[ids.ID]*atomic.Requests -} - -func NewSharedMemoryWriter() *sharedMemoryWriter { - return &sharedMemoryWriter{ - requests: make(map[ids.ID]*atomic.Requests), - } -} - -func (s *sharedMemoryWriter) AddSharedMemoryRequests(chainID ids.ID, requests *atomic.Requests) { - mergeAtomicOpsToMap(s.requests, chainID, requests) -} - -// mergeAtomicOps merges atomic ops for [chainID] represented by [requests] -// to the [output] map provided. -func mergeAtomicOpsToMap(output map[ids.ID]*atomic.Requests, chainID ids.ID, requests *atomic.Requests) { - if request, exists := output[chainID]; exists { - request.PutRequests = append(request.PutRequests, requests.PutRequests...) - request.RemoveRequests = append(request.RemoveRequests, requests.RemoveRequests...) - } else { - output[chainID] = requests - } -} diff --git a/plugin/evm/statesync/connector.go b/plugin/evm/statesync/connector.go new file mode 100644 index 0000000000..813f1f87ea --- /dev/null +++ b/plugin/evm/statesync/connector.go @@ -0,0 +1,172 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package statesync + +import ( + "context" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/eth/protocols/snap" + "github.com/ava-labs/libevm/log" + ethp2p "github.com/ava-labs/libevm/p2p" + "github.com/ava-labs/libevm/p2p/enode" +) + +const ( + maxRetries = 5 + failedRequestSleepInterval = 10 * time.Millisecond +) + +var ( + _ validators.Connector = (*Connector)(nil) + _ ethp2p.MsgReadWriter = (*outbound)(nil) +) + +type Connector struct { + sync *snap.Syncer + sender *p2p.Client +} + +func NewConnector(sync *snap.Syncer, sender *p2p.Client) *Connector { + return &Connector{sync: sync, sender: sender} +} + +func (c *Connector) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + return c.sync.Register(NewOutboundPeer(nodeID, c.sync, c.sender)) +} + +func (c *Connector) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + return c.sync.Unregister(nodeID.String()) +} + +type outbound struct { + peerID ids.NodeID + sync *snap.Syncer + sender *p2p.Client +} + +func NewOutboundPeer(nodeID ids.NodeID, sync *snap.Syncer, sender *p2p.Client) *snap.Peer { + return snap.NewFakePeer(protocolVersion, nodeID.String(), &outbound{ + peerID: nodeID, + sync: sync, + sender: sender, + }) +} + +// ReadMsg implements the ethp2p.MsgReadWriter interface. +// It is not expected to be called in the used code path. +func (o *outbound) ReadMsg() (ethp2p.Msg, error) { panic("not expected to be called") } + +func (o *outbound) WriteMsg(msg ethp2p.Msg) error { + bytes, err := toBytes(msg) + if err != nil { + return fmt.Errorf("failed to convert message to bytes: %w, expected: %d", err, msg.Size) + } + + message := &retryableMessage{outbound: o, outBytes: bytes} + return message.send() +} + +type retryableMessage struct { + outbound *outbound + outBytes []byte + retries int +} + +func (r *retryableMessage) send() error { + nodeIDs := set.NewSet[ids.NodeID](1) + nodeIDs.Add(r.outbound.peerID) + + return r.outbound.sender.AppRequest(context.Background(), nodeIDs, r.outBytes, r.handleResponse) +} + +func (r *retryableMessage) handleResponse( + ctx context.Context, + nodeID ids.NodeID, + responseBytes []byte, + err error, +) { + if err == nil { // Handle successful response + log.Debug("statesync AppRequest response", "nodeID", nodeID, "responseBytes", len(responseBytes)) + p := snap.NewFakePeer(protocolVersion, nodeID.String(), &rw{readBytes: responseBytes}) + if err := snap.HandleMessage(r.outbound, p); err != nil { + log.Warn("failed to handle response", "peer", nodeID, "err", err) + } + return + } + + // Handle retry + + log.Warn("got error response from peer", "peer", nodeID, "err", err) + // TODO: Is this the right way to check for AppError? + // Notably errors.As expects a ptr to a type that implements error interface, + // but *AppError implements error, not AppError. + appErr, ok := err.(*common.AppError) + if !ok { + log.Warn("unexpected error type", "err", err) + return + } + if appErr.Code != common.ErrTimeout.Code { + log.Debug("dropping non-timeout error", "peer", nodeID, "err", err) + return // only retry on timeout + } + if r.retries >= maxRetries { + log.Warn("reached max retries", "peer", nodeID) + return + } + r.retries++ + log.Debug("retrying request", "peer", nodeID, "retries", r.retries) + time.Sleep(failedRequestSleepInterval) + if err := r.send(); err != nil { + log.Warn("failed to retry request, dropping", "peer", nodeID, "err", err) + } +} + +func (o *outbound) Chain() *core.BlockChain { panic("not expected to be called") } +func (o *outbound) RunPeer(*snap.Peer, snap.Handler) error { panic("not expected to be called") } +func (o *outbound) PeerInfo(id enode.ID) interface{} { panic("not expected to be called") } + +func (o *outbound) Handle(peer *snap.Peer, packet snap.Packet) error { + d := &Downloader{SnapSyncer: o.sync} + return d.DeliverSnapPacket(peer, packet) +} + +// Downloader is copied from eth/downloader/downloader.go +type Downloader struct { + SnapSyncer *snap.Syncer +} + +// DeliverSnapPacket is invoked from a peer's message handler when it transmits a +// data packet for the local node to consume. +func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { + switch packet := packet.(type) { + case *snap.AccountRangePacket: + hashes, accounts, err := packet.Unpack() + if err != nil { + return err + } + return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof) + + case *snap.StorageRangesPacket: + hashset, slotset := packet.Unpack() + return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof) + + case *snap.ByteCodesPacket: + return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes) + + case *snap.TrieNodesPacket: + return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes) + + default: + return fmt.Errorf("unexpected snap packet type: %T", packet) + } +} diff --git a/plugin/evm/statesync/handler.go b/plugin/evm/statesync/handler.go new file mode 100644 index 0000000000..cffe5242f2 --- /dev/null +++ b/plugin/evm/statesync/handler.go @@ -0,0 +1,128 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package statesync + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/eth/protocols/snap" + "github.com/ava-labs/libevm/log" + ethp2p "github.com/ava-labs/libevm/p2p" + "github.com/ava-labs/libevm/p2p/enode" +) + +var ( + _ p2p.Handler = (*Handler)(nil) + _ snap.Backend = (*Handler)(nil) + _ ethp2p.MsgReadWriter = (*rw)(nil) +) + +const ( + ProtocolID = 128 // ID for the state sync handler, leaving earlier IDs reserved + ErrCodeSnapHandlerFailed = 1 + + protocolVersion = 0 +) + +type Handler struct { + chain *core.BlockChain +} + +func NewHandler(chain *core.BlockChain) *Handler { + return &Handler{chain: chain} +} + +func (h *Handler) AppRequest( + ctx context.Context, + nodeID ids.NodeID, + deadline time.Time, + requestBytes []byte, +) ([]byte, *common.AppError) { + start := time.Now() + rw := &rw{readBytes: requestBytes} + p := snap.NewFakePeer(protocolVersion, nodeID.String(), rw) + err := snap.HandleMessage(h, p) + if err != nil { + log.Debug("statesync AppRequest err", "nodeID", nodeID, "err", err) + return nil, &common.AppError{ + Code: ErrCodeSnapHandlerFailed, + Message: err.Error(), + } + } + log.Debug("statesync AppRequest response", "nodeID", nodeID, "responseBytes", len(rw.writeBytes), "duration", time.Since(start)) + return rw.writeBytes, nil +} + +// AppGossip implements p2p.Handler. +// It is implemented as a no-op as gossip is not used in the state sync protocol. +func (h *Handler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) {} + +// Chain implements snap.Backend. +func (h *Handler) Chain() *core.BlockChain { return h.chain } + +// Handle implements snap.Backend. +// It is implemented as a no-op as the main handling is done in AppRequest. +func (h *Handler) Handle(*snap.Peer, snap.Packet) error { return nil } + +// RunPeer implements snap.Handler. +// It is not expected to be called in the used code path. +func (h *Handler) RunPeer(*snap.Peer, snap.Handler) error { panic("calling not expected") } + +// PeerInfo implements snap.Handler. +// It is not expected to be called in the used code path. +func (h *Handler) PeerInfo(id enode.ID) interface{} { panic("calling not expected") } + +// rw is a helper struct that implements ethp2p.MsgReadWriter. +type rw struct { + readBytes []byte + writeBytes []byte +} + +// ReadMsg implements ethp2p.MsgReadWriter. +// It is expected to be called exactly once, immediately after the request is received. +func (rw *rw) ReadMsg() (ethp2p.Msg, error) { + return fromBytes(rw.readBytes) +} + +// WriteMsg implements ethp2p.MsgReadWriter. +// It is expected to be called exactly once, immediately after the response is prepared. +func (rw *rw) WriteMsg(msg ethp2p.Msg) error { + var err error + rw.writeBytes, err = toBytes(msg) + return err +} + +func fromBytes(msgBytes []byte) (ethp2p.Msg, error) { + if len(msgBytes) < wrappers.LongLen { + return ethp2p.Msg{}, fmt.Errorf("bytes too short: %d", len(msgBytes)) + } + code := binary.BigEndian.Uint64(msgBytes) + return ethp2p.Msg{ + Code: code, + Size: uint32(len(msgBytes) - wrappers.LongLen), + Payload: bytes.NewReader(msgBytes[wrappers.LongLen:]), + ReceivedAt: time.Now(), + }, nil +} + +func toBytes(msg ethp2p.Msg) ([]byte, error) { + bytes := make([]byte, msg.Size+wrappers.LongLen) + binary.BigEndian.PutUint64(bytes, msg.Code) + n, err := msg.Payload.Read(bytes[wrappers.LongLen:]) + if n == int(msg.Size) && errors.Is(err, io.EOF) { + err = nil + } + return bytes, err +} diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/syncervm_client.go index 2807b6b0da..8a3438108e 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/syncervm_client.go @@ -17,8 +17,11 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/eth" + "github.com/ava-labs/coreth/eth/protocols/snap" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/message" + ethstatesync "github.com/ava-labs/coreth/plugin/evm/statesync" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" "github.com/ava-labs/libevm/common" @@ -57,6 +60,12 @@ type stateSyncClientConfig struct { client syncclient.Client toEngine chan<- commonEng.Message + + /// + useUpstream bool + network peer.Network + appSender commonEng.AppSender + stateSyncNodes []ids.NodeID } type stateSyncerClient struct { @@ -151,13 +160,43 @@ func (client *stateSyncerClient) stateSync(ctx context.Context) error { // Sync the EVM trie and then the atomic trie. These steps could be done // in parallel or in the opposite order. Keeping them serial for simplicity for now. - if err := client.syncStateTrie(ctx); err != nil { - return err + if client.useUpstream { + log.Warn("Using upstream state syncer (untested)") + syncer := snap.NewSyncer(client.chaindb, rawdb.HashScheme) + p2pClient := client.network.NewClient(ethstatesync.ProtocolID) + if len(client.stateSyncNodes) > 0 { + for _, nodeID := range client.stateSyncNodes { + syncer.Register(ethstatesync.NewOutboundPeer(nodeID, syncer, p2pClient)) + } + } else { + client.network.AddConnector(ethstatesync.NewConnector(syncer, p2pClient)) + } + if err := syncer.Sync(client.syncSummary.BlockRoot, convertReadOnlyToBidirectional(ctx.Done())); err != nil { + return err + } + log.Info("Upstream state syncer completed") + } else { + if err := client.syncStateTrie(ctx); err != nil { + return err + } } return client.syncAtomicTrie(ctx) } +func convertReadOnlyToBidirectional[T any](readOnly <-chan T) chan T { + bidirectional := make(chan T) + + go func() { + defer close(bidirectional) + for value := range readOnly { + bidirectional <- value + } + }() + + return bidirectional +} + // acceptSyncSummary returns true if sync will be performed and launches the state sync process // in a goroutine. func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncSummary) (block.StateSyncMode, error) { diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 3abd73275b..65aff9b080 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -15,8 +15,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -34,6 +34,8 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" @@ -291,7 +293,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(serverVM.Shutdown(context.Background())) }) var ( - importTx, exportTx *Tx + importTx, exportTx *atomic.Tx err error ) generateAndAcceptBlocks(t, serverVM, numBlocks, func(i int, gen *core.BlockGen) { @@ -333,11 +335,11 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverAtomicTrie := serverVM.atomicTrie.(*atomicTrie) serverAtomicTrie.commitInterval = test.syncableInterval require.NoError(serverAtomicTrie.commit(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) - require.NoError(serverVM.db.Commit()) + require.NoError(serverVM.versiondb.Commit()) serverSharedMemories := newSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) - serverSharedMemories.assertOpsApplied(t, importTx.mustAtomicOps()) - serverSharedMemories.assertOpsApplied(t, exportTx.mustAtomicOps()) + serverSharedMemories.assertOpsApplied(t, mustAtomicOps(importTx)) + serverSharedMemories.assertOpsApplied(t, mustAtomicOps(exportTx)) // make some accounts trieDB := triedb.NewDatabase(serverVM.chaindb, nil) @@ -406,7 +408,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s return &syncVMSetup{ serverVM: serverVM, serverAppSender: serverAppSender, - includedAtomicTxs: []*Tx{ + includedAtomicTxs: []*atomic.Tx{ importTx, exportTx, }, @@ -425,13 +427,13 @@ type syncVMSetup struct { serverVM *VM serverAppSender *enginetest.Sender - includedAtomicTxs []*Tx + includedAtomicTxs []*atomic.Tx fundedAccounts map[*utils.Key]*types.StateAccount syncerVM *VM - syncerDB database.Database + syncerDB avalanchedatabase.Database syncerEngineChan <-chan commonEng.Message - syncerAtomicMemory *atomic.Memory + syncerAtomicMemory *avalancheatomic.Memory shutdownOnceSyncerVM *shutdownOnceVM } @@ -490,7 +492,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { if test.expectedErr != nil { require.ErrorIs(err, test.expectedErr) // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. - chaindb := Database{prefixdb.NewNested(ethDBPrefix, syncerVM.db)} + chaindb := database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, syncerVM.db)) assertSyncPerformedHeights(t, chaindb, map[uint64]struct{}{}) return } @@ -554,13 +556,13 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { // check we can transition to [NormalOp] state and continue to process blocks. require.NoError(syncerVM.SetState(context.Background(), snow.NormalOp)) - require.True(syncerVM.bootstrapped) + require.True(syncerVM.bootstrapped.Get()) // check atomic memory was synced properly syncerSharedMemories := newSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) for _, tx := range includedAtomicTxs { - syncerSharedMemories.assertOpsApplied(t, tx.mustAtomicOps()) + syncerSharedMemories.assertOpsApplied(t, mustAtomicOps(tx)) } // Generate blocks after we have entered normal consensus as well diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 6024747180..35527a51f7 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" @@ -21,7 +21,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" @@ -36,6 +35,8 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/utils" ) @@ -43,12 +44,12 @@ func TestEthTxGossip(t *testing.T) { require := require.New(t) ctx := context.Background() snowCtx := utils.TestSnowContext() - validatorState := &validatorstest.State{} + validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -104,7 +105,13 @@ func TestEthTxGossip(t *testing.T) { } // Ask the VM for any new transactions. We should get nothing at first. - emptyBloomFilter, err := gossip.NewBloomFilter(prometheus.NewRegistry(), "", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + emptyBloomFilter, err := gossip.NewBloomFilter( + prometheus.NewRegistry(), + "", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) require.NoError(err) emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() request := &sdk.PullGossipRequest{ @@ -169,19 +176,14 @@ func TestAtomicTxGossip(t *testing.T) { ctx := context.Background() snowCtx := utils.TestSnowContext() snowCtx.AVAXAssetID = ids.GenerateTestID() - snowCtx.XChainID = ids.GenerateTestID() - validatorState := &validatorstest.State{ - GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { - return ids.Empty, nil - }, - } + validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState - memory := atomic.NewMemory(memdb.New()) - snowCtx.SharedMemory = memory.NewSharedMemory(ids.Empty) + memory := avalancheatomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -236,7 +238,13 @@ func TestAtomicTxGossip(t *testing.T) { } // Ask the VM for any new transactions. We should get nothing at first. - emptyBloomFilter, err := gossip.NewBloomFilter(prometheus.NewRegistry(), "", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + emptyBloomFilter, err := gossip.NewBloomFilter( + prometheus.NewRegistry(), + "", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) require.NoError(err) emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() request := &sdk.PullGossipRequest{ @@ -270,10 +278,10 @@ func TestAtomicTxGossip(t *testing.T) { 0, snowCtx.AVAXAssetID, 100_000_000_000, - pk.PublicKey().Address(), + pk.Address(), ) require.NoError(err) - tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) @@ -283,7 +291,7 @@ func TestAtomicTxGossip(t *testing.T) { // Ask the VM for new transactions. We should get the newly issued tx. wg.Add(1) - marshaller := GossipAtomicTxMarshaller{} + marshaller := atomic.GossipAtomicTxMarshaller{} onResponse = func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { require.NoError(err) @@ -308,14 +316,6 @@ func TestEthTxPushGossipOutbound(t *testing.T) { require := require.New(t) ctx := context.Background() snowCtx := utils.TestSnowContext() - snowCtx.ValidatorState = &validatorstest.State{ - GetCurrentHeightF: func(context.Context) (uint64, error) { - return 0, nil - }, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return nil, nil - }, - } sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 1), } @@ -328,7 +328,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -388,7 +388,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -440,19 +440,14 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { ctx := context.Background() snowCtx := utils.TestSnowContext() snowCtx.AVAXAssetID = ids.GenerateTestID() - snowCtx.XChainID = ids.GenerateTestID() - validatorState := &validatorstest.State{ - GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { - return ids.Empty, nil - }, - } + validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState - memory := atomic.NewMemory(memdb.New()) - snowCtx.SharedMemory = memory.NewSharedMemory(ids.Empty) + memory := avalancheatomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -491,13 +486,13 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { 0, snowCtx.AVAXAssetID, 100_000_000_000, - pk.PublicKey().Address(), + pk.Address(), ) require.NoError(err) - tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) - vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) gossipedBytes := <-sender.SentAppGossip require.Equal(byte(p2p.AtomicTxGossipHandlerID), gossipedBytes[0]) @@ -506,7 +501,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { require.NoError(proto.Unmarshal(gossipedBytes[1:], outboundGossipMsg)) require.Len(outboundGossipMsg.Gossip, 1) - marshaller := GossipAtomicTxMarshaller{} + marshaller := atomic.GossipAtomicTxMarshaller{} gossipedTx, err := marshaller.UnmarshalGossip(outboundGossipMsg.Gossip[0]) require.NoError(err) require.Equal(tx.ID(), gossipedTx.Tx.ID()) @@ -518,19 +513,14 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { ctx := context.Background() snowCtx := utils.TestSnowContext() snowCtx.AVAXAssetID = ids.GenerateTestID() - snowCtx.XChainID = ids.GenerateTestID() - validatorState := &validatorstest.State{ - GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { - return ids.Empty, nil - }, - } + validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState - memory := atomic.NewMemory(memdb.New()) - snowCtx.SharedMemory = memory.NewSharedMemory(ids.Empty) + memory := avalancheatomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -567,15 +557,15 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { 0, snowCtx.AVAXAssetID, 100_000_000_000, - pk.PublicKey().Address(), + pk.Address(), ) require.NoError(err) - tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) - marshaller := GossipAtomicTxMarshaller{} - gossipedTx := &GossipAtomicTx{ + marshaller := atomic.GossipAtomicTxMarshaller{} + gossipedTx := &atomic.GossipAtomicTx{ Tx: tx, } gossipBytes, err := marshaller.MarshalGossip(gossipedTx) diff --git a/plugin/evm/tx_heap.go b/plugin/evm/tx_heap.go index d44020039e..c6562fd9b0 100644 --- a/plugin/evm/tx_heap.go +++ b/plugin/evm/tx_heap.go @@ -7,6 +7,7 @@ import ( "container/heap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) // txEntry is used to track the [gasPrice] transactions pay to be included in @@ -14,7 +15,7 @@ import ( type txEntry struct { id ids.ID gasPrice uint64 - tx *Tx + tx *atomic.Tx index int } @@ -91,7 +92,7 @@ func newTxHeap(maxSize int) *txHeap { } } -func (th *txHeap) Push(tx *Tx, gasPrice uint64) { +func (th *txHeap) Push(tx *atomic.Tx, gasPrice uint64) { txID := tx.ID() oldLen := th.Len() heap.Push(th.maxHeap, &txEntry{ @@ -109,28 +110,28 @@ func (th *txHeap) Push(tx *Tx, gasPrice uint64) { } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMax() (*Tx, uint64) { +func (th *txHeap) PeekMax() (*atomic.Tx, uint64) { txEntry := th.maxHeap.items[0] return txEntry.tx, txEntry.gasPrice } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMin() (*Tx, uint64) { +func (th *txHeap) PeekMin() (*atomic.Tx, uint64) { txEntry := th.minHeap.items[0] return txEntry.tx, txEntry.gasPrice } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMax() *Tx { +func (th *txHeap) PopMax() *atomic.Tx { return th.Remove(th.maxHeap.items[0].id) } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMin() *Tx { +func (th *txHeap) PopMin() *atomic.Tx { return th.Remove(th.minHeap.items[0].id) } -func (th *txHeap) Remove(id ids.ID) *Tx { +func (th *txHeap) Remove(id ids.ID) *atomic.Tx { maxEntry, ok := th.maxHeap.Get(id) if !ok { return nil @@ -150,7 +151,7 @@ func (th *txHeap) Len() int { return th.maxHeap.Len() } -func (th *txHeap) Get(id ids.ID) (*Tx, bool) { +func (th *txHeap) Get(id ids.ID) (*atomic.Tx, bool) { txEntry, ok := th.maxHeap.Get(id) if !ok { return nil, false diff --git a/plugin/evm/tx_heap_test.go b/plugin/evm/tx_heap_test.go index 206b87bbdb..a054b7362e 100644 --- a/plugin/evm/tx_heap_test.go +++ b/plugin/evm/tx_heap_test.go @@ -6,27 +6,28 @@ package evm import ( "testing" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/stretchr/testify/assert" ) func TestTxHeap(t *testing.T) { var ( - tx0 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx0 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 0, }, } tx0Bytes = []byte{0} - tx1 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx1 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 1, }, } tx1Bytes = []byte{1} - tx2 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx2 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 2, }, } diff --git a/plugin/evm/tx_test.go b/plugin/evm/tx_test.go index ef22bfa829..5e63eeb090 100644 --- a/plugin/evm/tx_test.go +++ b/plugin/evm/tx_test.go @@ -15,8 +15,9 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/params/extras" + "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" ) @@ -31,7 +32,7 @@ func TestCalculateDynamicFee(t *testing.T) { var tests []test = []test{ { gas: 1, - baseFee: new(big.Int).Set(x2cRate.ToBig()), + baseFee: new(big.Int).Set(atomic.X2CRate.ToBig()), expectedValue: 1, }, { @@ -42,7 +43,7 @@ func TestCalculateDynamicFee(t *testing.T) { } for _, test := range tests { - cost, err := CalculateDynamicFee(test.gas, test.baseFee) + cost, err := atomic.CalculateDynamicFee(test.gas, test.baseFee) if test.expectedErr == nil { if err != nil { t.Fatalf("Unexpectedly failed to calculate dynamic fee: %s", err) @@ -60,7 +61,7 @@ func TestCalculateDynamicFee(t *testing.T) { type atomicTxVerifyTest struct { ctx *snow.Context - generate func(t *testing.T) UnsignedAtomicTx + generate func(t *testing.T) atomic.UnsignedAtomicTx rules extras.Rules expectedErr string } @@ -79,7 +80,7 @@ func executeTxVerifyTest(t *testing.T, test atomicTxVerifyTest) { type atomicTxTest struct { // setup returns the atomic transaction for the test - setup func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx + setup func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx // define a string that should be contained in the error message if the tx fails verification // at some point. If the strings are empty, then the tx should pass verification at the // respective step. @@ -116,7 +117,15 @@ func executeTxTest(t *testing.T, test atomicTxTest) { } lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) - if err := tx.UnsignedAtomicTx.SemanticVerify(vm, tx, lastAcceptedBlock, baseFee, rules); len(test.semanticVerifyErr) == 0 && err != nil { + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + if err := tx.UnsignedAtomicTx.SemanticVerify(backend, tx, lastAcceptedBlock, baseFee); len(test.semanticVerifyErr) == 0 && err != nil { t.Fatalf("SemanticVerify failed unexpectedly due to: %s", err) } else if len(test.semanticVerifyErr) != 0 { if err == nil { @@ -192,18 +201,18 @@ func executeTxTest(t *testing.T, test atomicTxTest) { func TestEVMOutputCompare(t *testing.T) { type test struct { name string - a, b EVMOutput + a, b atomic.EVMOutput expected int } tests := []test{ { name: "address less", - a: EVMOutput{ + a: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, - b: EVMOutput{ + b: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{0}, }, @@ -211,11 +220,11 @@ func TestEVMOutputCompare(t *testing.T) { }, { name: "address greater; assetIDs equal", - a: EVMOutput{ + a: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{}, }, - b: EVMOutput{ + b: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{}, }, @@ -223,11 +232,11 @@ func TestEVMOutputCompare(t *testing.T) { }, { name: "addresses equal; assetID less", - a: EVMOutput{ + a: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{0}, }, - b: EVMOutput{ + b: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, @@ -235,8 +244,8 @@ func TestEVMOutputCompare(t *testing.T) { }, { name: "equal", - a: EVMOutput{}, - b: EVMOutput{}, + a: atomic.EVMOutput{}, + b: atomic.EVMOutput{}, expected: 0, }, } @@ -254,18 +263,18 @@ func TestEVMOutputCompare(t *testing.T) { func TestEVMInputCompare(t *testing.T) { type test struct { name string - a, b EVMInput + a, b atomic.EVMInput expected int } tests := []test{ { name: "address less", - a: EVMInput{ + a: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, - b: EVMInput{ + b: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{0}, }, @@ -273,11 +282,11 @@ func TestEVMInputCompare(t *testing.T) { }, { name: "address greater; assetIDs equal", - a: EVMInput{ + a: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{}, }, - b: EVMInput{ + b: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{}, }, @@ -285,11 +294,11 @@ func TestEVMInputCompare(t *testing.T) { }, { name: "addresses equal; assetID less", - a: EVMInput{ + a: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{0}, }, - b: EVMInput{ + b: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, @@ -297,8 +306,8 @@ func TestEVMInputCompare(t *testing.T) { }, { name: "equal", - a: EVMInput{}, - b: EVMInput{}, + a: atomic.EVMInput{}, + b: atomic.EVMInput{}, expected: 0, }, } diff --git a/plugin/evm/user.go b/plugin/evm/user.go index d1e7ba00fb..4f004db867 100644 --- a/plugin/evm/user.go +++ b/plugin/evm/user.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/database/encdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" ) @@ -47,7 +48,7 @@ func (u *user) getAddresses() ([]common.Address, error) { return nil, err } addresses := []common.Address{} - if _, err := Codec.Unmarshal(bytes, &addresses); err != nil { + if _, err := atomic.Codec.Unmarshal(bytes, &addresses); err != nil { return nil, err } return addresses, nil @@ -69,7 +70,7 @@ func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { return errKeyNil } - address := GetEthAddress(privKey) // address the privKey controls + address := privKey.EthAddress() // address the privKey controls controlsAddress, err := u.controlsAddress(address) if err != nil { return err @@ -93,7 +94,7 @@ func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { } } addresses = append(addresses, address) - bytes, err := Codec.Marshal(codecVersion, addresses) + bytes, err := atomic.Codec.Marshal(atomic.CodecVersion, addresses) if err != nil { return err } diff --git a/plugin/evm/version.go b/plugin/evm/version.go index 6151bbbe8e..e4b93c457e 100644 --- a/plugin/evm/version.go +++ b/plugin/evm/version.go @@ -11,7 +11,7 @@ var ( // GitCommit is set by the build script GitCommit string // Version is the version of Coreth - Version string = "v0.13.9" + Version string = "v0.14.1" ) func init() { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index efd2617414..cadafd80a1 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -17,11 +17,12 @@ import ( "sync" "time" + "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/upgrade" avalanchegoConstants "github.com/ava-labs/avalanchego/utils/constants" - "github.com/holiman/uint256" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/consensus/dummy" @@ -40,7 +41,10 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/params/extras" "github.com/ava-labs/coreth/peer" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/statesync" "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/libevm/triedb" @@ -50,7 +54,6 @@ import ( statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/client/stats" "github.com/ava-labs/coreth/warp" - "github.com/ava-labs/coreth/warp/handlers" // Force-load tracer engine to trigger registration // @@ -75,7 +78,6 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -84,7 +86,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/set" @@ -108,27 +109,12 @@ var ( _ secp256k1fx.VM = &VM{} ) -const ( - x2cRateUint64 uint64 = 1_000_000_000 - x2cRateMinus1Uint64 uint64 = x2cRateUint64 - 1 -) - -var ( - // x2cRate is the conversion rate between the smallest denomination on the X-Chain - // 1 nAVAX and the smallest denomination on the C-Chain 1 wei. Where 1 nAVAX = 1 gWei. - // This is only required for AVAX because the denomination of 1 AVAX is 9 decimal - // places on the X and P chains, but is 18 decimal places within the EVM. - x2cRate = uint256.NewInt(x2cRateUint64) - x2cRateMinus1 = uint256.NewInt(x2cRateMinus1Uint64) -) - const ( // Max time from current time allowed for blocks, before they're considered future blocks // and fail verification maxFutureBlockTime = 10 * time.Second maxUTXOsToFetch = 1024 defaultMempoolSize = 4096 - codecVersion = uint16(0) secpCacheSize = 1024 decidedCacheSize = 10 * units.MiB @@ -145,16 +131,12 @@ const ( targetAtomicTxsSize = 40 * units.KiB // gossip constants - pushGossipDiscardedElements = 16_384 - txGossipBloomMinTargetElements = 8 * 1024 - txGossipBloomTargetFalsePositiveRate = 0.01 - txGossipBloomResetFalsePositiveRate = 0.05 - txGossipBloomChurnMultiplier = 3 - txGossipTargetMessageSize = 20 * units.KiB - maxValidatorSetStaleness = time.Minute - txGossipThrottlingPeriod = 10 * time.Second - txGossipThrottlingLimit = 2 - txGossipPollSize = 1 + pushGossipDiscardedElements = 16_384 + txGossipTargetMessageSize = 20 * units.KiB + maxValidatorSetStaleness = time.Minute + txGossipThrottlingPeriod = 10 * time.Second + txGossipThrottlingLimit = 2 + txGossipPollSize = 1 ) // Define the API endpoints for the VM @@ -184,30 +166,12 @@ var ( errEmptyBlock = errors.New("empty block") errUnsupportedFXs = errors.New("unsupported feature extensions") errInvalidBlock = errors.New("invalid block") - errInvalidAddr = errors.New("invalid hex address") - errInsufficientAtomicTxFee = errors.New("atomic tx fee too low for atomic mempool") - errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") - errNoImportInputs = errors.New("tx has no imported inputs") - errInputsNotSortedUnique = errors.New("inputs not sorted and unique") - errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") - errWrongChainID = errors.New("tx has wrong chain ID") - errInsufficientFunds = errors.New("insufficient funds") - errNoExportOutputs = errors.New("tx has no export outputs") - errOutputsNotSorted = errors.New("tx outputs not sorted") - errOutputsNotSortedUnique = errors.New("outputs not sorted and unique") - errOverflowExport = errors.New("overflow when computing export amount + txFee") errInvalidNonce = errors.New("invalid nonce") - errConflictingAtomicInputs = errors.New("invalid block due to conflicting atomic inputs") errUnclesUnsupported = errors.New("uncles unsupported") errRejectedParent = errors.New("rejected parent") - errInsufficientFundsForFee = errors.New("insufficient AVAX funds to pay transaction fee") - errNoEVMOutputs = errors.New("tx has no EVM outputs") errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errNilExtDataGasUsedApricotPhase4 = errors.New("nil extDataGasUsed is invalid after apricotPhase4") errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") - errConflictingAtomicTx = errors.New("conflicting atomic tx present") - errTooManyAtomicTx = errors.New("too many atomic tx") - errMissingAtomicTxs = errors.New("cannot build a block with non-empty extra data and zero atomic transactions") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") ) @@ -248,7 +212,7 @@ type VM struct { // with an efficient caching layer. *chain.State - config Config + config config.Config chainID *big.Int networkID uint64 @@ -262,8 +226,11 @@ type VM struct { blockChain *core.BlockChain miner *miner.Miner - // [db] is the VM's current database managed by ChainState - db *versiondb.Database + // [versiondb] is the VM's current versioned database + versiondb *versiondb.Database + + // [db] is the VM's current database + db database.Database // metadataDB is used to store one off keys. metadataDB database.Database @@ -295,9 +262,8 @@ type VM struct { builder *blockBuilder baseCodec codec.Registry - codec codec.Manager clock mockable.Clock - mempool *Mempool + mempool *atomic.Mempool shutdownChan chan struct{} shutdownWg sync.WaitGroup @@ -312,12 +278,12 @@ type VM struct { client peer.NetworkClient networkCodec codec.Manager - validators *p2p.Validators + p2pValidators *p2p.Validators // Metrics sdkMetrics *prometheus.Registry - bootstrapped bool + bootstrapped avalancheUtils.Atomic[bool] IsPlugin bool logger CorethLogger @@ -335,8 +301,12 @@ type VM struct { ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper atomicTxGossipHandler p2p.Handler - atomicTxPushGossiper *gossip.PushGossiper[*GossipAtomicTx] + atomicTxPushGossiper *gossip.PushGossiper[*atomic.GossipAtomicTx] atomicTxPullGossiper gossip.Gossiper + + chainAlias string + // RPC handlers (should be stopped before closing chaindb) + rpcHandlers []interface{ Stop() } } // CodecRegistry implements the secp256k1fx interface @@ -371,13 +341,15 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { - vm.config.SetDefaults() + vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { return fmt.Errorf("failed to unmarshal config %s: %w", string(configBytes), err) } } - if err := vm.config.Validate(); err != nil { + vm.ctx = chainCtx + + if err := vm.config.Validate(vm.ctx.NetworkID); err != nil { return err } // We should deprecate config flags as the first thing, before we do anything else @@ -385,21 +357,20 @@ func (vm *VM) Initialize( // initialized the logger. deprecateMsg := vm.config.Deprecate() - vm.ctx = chainCtx - // Create logger alias, err := vm.ctx.BCLookup.PrimaryAlias(vm.ctx.ChainID) if err != nil { // fallback to ChainID string instead of erroring alias = vm.ctx.ChainID.String() } + vm.chainAlias = alias var writer io.Writer = vm.ctx.Log if vm.IsPlugin { writer = originalStderr } - corethLogger, err := InitLogger(alias, vm.config.LogLevel, vm.config.LogJSONFormat, writer) + corethLogger, err := InitLogger(vm.chainAlias, vm.config.LogLevel, vm.config.LogJSONFormat, writer) if err != nil { return fmt.Errorf("failed to initialize logger due to: %w ", err) } @@ -420,24 +391,19 @@ func (vm *VM) Initialize( vm.toEngine = toEngine vm.shutdownChan = make(chan struct{}, 1) - // Use NewNested rather than New so that the structure of the database - // remains the same regardless of the provided baseDB type. - vm.chaindb = rawdb.NewDatabase(Database{prefixdb.NewNested(ethDBPrefix, db)}) - vm.db = versiondb.New(db) - vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) - vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) - // Note warpDB is not part of versiondb because it is not necessary - // that warp signatures are committed to the database atomically with - // the last accepted block. - vm.warpDB = prefixdb.New(warpPrefix, db) + if err := vm.initializeMetrics(); err != nil { + return fmt.Errorf("failed to initialize metrics: %w", err) + } + + // Initialize the database + if err := vm.initializeDBs(db); err != nil { + return fmt.Errorf("failed to initialize databases: %w", err) + } if vm.config.InspectDatabase { - start := time.Now() - log.Info("Starting database inspection") - if err := rawdb.InspectDatabase(vm.chaindb, nil, nil); err != nil { + if err := vm.inspectDatabases(); err != nil { return err } - log.Info("Completed database inspection", "elapsed", time.Since(start)) } g := new(core.Genesis) @@ -481,16 +447,6 @@ func (vm *VM) Initialize( } vm.syntacticBlockValidator = NewBlockValidator(extDataHashes) - // Ensure that non-standard commit interval is not allowed for production networks - if avalanchegoConstants.ProductionNetworkIDs.Contains(chainCtx.NetworkID) { - if vm.config.CommitInterval != defaultCommitInterval { - return fmt.Errorf("cannot start non-local network with commit interval %d", vm.config.CommitInterval) - } - if vm.config.StateSyncCommitInterval != defaultSyncableCommitInterval { - return fmt.Errorf("cannot start non-local network with syncable interval %d", vm.config.StateSyncCommitInterval) - } - } - // Free the memory of the extDataHash map that is not used (i.e. if mainnet // config, free fuji) fujiExtDataHashes = nil @@ -571,14 +527,8 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to verify chain config: %w", err) } - vm.codec = Codec - - if err := vm.initializeMetrics(); err != nil { - return err - } - // TODO: read size from settings - vm.mempool, err = NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) + vm.mempool, err = atomic.NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) if err != nil { return fmt.Errorf("failed to initialize mempool: %w", err) } @@ -588,11 +538,12 @@ func (vm *VM) Initialize( vm.p2pSender = appSender } + // TODO: move all network stuff to peer.NewNetwork p2pNetwork, err := p2p.NewNetwork(vm.ctx.Log, vm.p2pSender, vm.sdkMetrics, "p2p") if err != nil { return fmt.Errorf("failed to initialize p2p network: %w", err) } - vm.validators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) + vm.p2pValidators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) vm.networkCodec = message.Codec vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) @@ -602,26 +553,31 @@ func (vm *VM) Initialize( for i, hexMsg := range vm.config.WarpOffChainMessages { offchainWarpMessages[i] = []byte(hexMsg) } + warpSignatureCache := &cache.LRU[ids.ID, []byte]{Size: warpSignatureCacheSize} + meteredCache, err := metercacher.New("warp_signature_cache", vm.sdkMetrics, warpSignatureCache) + if err != nil { + return fmt.Errorf("failed to create warp signature cache: %w", err) + } + + // clear warpdb on initialization if config enabled + if vm.config.PruneWarpDB { + if err := database.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { + return fmt.Errorf("failed to prune warpDB: %w", err) + } + } + vm.warpBackend, err = warp.NewBackend( vm.ctx.NetworkID, vm.ctx.ChainID, vm.ctx.WarpSigner, vm, vm.warpDB, - warpSignatureCacheSize, + meteredCache, offchainWarpMessages, ) if err != nil { return err } - - // clear warpdb on initialization if config enabled - if vm.config.PruneWarpDB { - if err := vm.warpBackend.Clear(); err != nil { - return fmt.Errorf("failed to prune warpDB: %w", err) - } - } - if err := vm.initializeChain(lastAcceptedHash); err != nil { return err } @@ -637,12 +593,12 @@ func (vm *VM) Initialize( } // initialize atomic repository - vm.atomicTxRepository, err = NewAtomicTxRepository(vm.db, vm.codec, lastAcceptedHeight) + vm.atomicTxRepository, err = NewAtomicTxRepository(vm.versiondb, atomic.Codec, lastAcceptedHeight) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } vm.atomicBackend, err = NewAtomicBackend( - vm.db, vm.ctx.SharedMemory, bonusBlockHeights, + vm.versiondb, vm.ctx.SharedMemory, bonusBlockHeights, vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, vm.config.CommitInterval, ) @@ -662,7 +618,20 @@ func (vm *VM) Initialize( return err } - vm.initializeHandlers() + // Add p2p warp message warpHandler + warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) + vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) + + ////// + vm.Network.AddHandler(statesync.ProtocolID, statesync.NewHandler(vm.blockChain)) + + vm.setAppRequestHandlers() + + vm.StateSyncServer = NewStateSyncServer(&stateSyncServerConfig{ + Chain: vm.blockChain, + AtomicTrie: vm.atomicTrie, + SyncableInterval: vm.config.StateSyncCommitInterval, + }) return vm.initializeStateSyncClient(lastAcceptedHeight) } @@ -697,7 +666,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { &vm.ethConfig, &EthPushGossiper{vm: vm}, vm.chaindb, - vm.config.EthBackendSettings(), + eth.Settings{MaxBlocksPerRequest: vm.config.MaxBlocksPerRequest}, lastAcceptedHash, dummy.NewFakerWithClock(callbacks, &vm.clock), &vm.clock, @@ -713,41 +682,12 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { // Set the gas parameters for the tx pool to the minimum gas price for the // latest upgrade. vm.txPool.SetGasTip(big.NewInt(0)) - vm.setMinFeeAtEtna() + vm.txPool.SetMinFee(big.NewInt(params.EtnaMinBaseFee)) vm.eth.Start() return vm.initChainState(vm.blockChain.LastAcceptedBlock()) } -// TODO: remove this after Etna is activated -func (vm *VM) setMinFeeAtEtna() { - now := vm.clock.Time() - if vm.chainConfigExtra().EtnaTimestamp == nil { - // If Etna is not set, set the min fee according to the latest upgrade - vm.txPool.SetMinFee(big.NewInt(params.ApricotPhase4MinBaseFee)) - return - } else if vm.chainConfigExtra().IsEtna(uint64(now.Unix())) { - // If Etna is activated, set the min fee to the Etna min fee - vm.txPool.SetMinFee(big.NewInt(params.EtnaMinBaseFee)) - return - } - - vm.txPool.SetMinFee(big.NewInt(params.ApricotPhase4MinBaseFee)) - vm.shutdownWg.Add(1) - go func() { - defer vm.shutdownWg.Done() - - wait := utils.Uint64ToTime(vm.chainConfigExtra().EtnaTimestamp).Sub(now) - t := time.NewTimer(wait) - select { - case <-t.C: // Wait for Etna to be activated - vm.txPool.SetMinFee(big.NewInt(params.EtnaMinBaseFee)) - case <-vm.shutdownChan: - } - t.Stop() - }() -} - // initializeStateSyncClient initializes the client for performing state sync. // If state sync is disabled, this function will wipe any ongoing summary from // disk to ensure that we do not continue syncing from an invalid snapshot. @@ -787,9 +727,13 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { chaindb: vm.chaindb, metadataDB: vm.metadataDB, acceptedBlockDB: vm.acceptedBlockDB, - db: vm.db, + db: vm.versiondb, atomicBackend: vm.atomicBackend, toEngine: vm.toEngine, + network: vm.Network, + appSender: vm.p2pSender, + stateSyncNodes: stateSyncIDs, + useUpstream: vm.config.StateSyncUseUpstream, }) // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume @@ -801,21 +745,6 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { return nil } -// initializeHandlers should be called after [vm.chain] is initialized. -func (vm *VM) initializeHandlers() { - vm.StateSyncServer = NewStateSyncServer(&stateSyncServerConfig{ - Chain: vm.blockChain, - AtomicTrie: vm.atomicTrie, - SyncableInterval: vm.config.StateSyncCommitInterval, - }) - - // Add p2p warp message warpHandler - warpHandler := handlers.NewSignatureRequestHandlerP2P(vm.warpBackend, vm.networkCodec) - vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) - - vm.setAppRequestHandlers() -} - func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { block, err := vm.newBlock(lastAcceptedBlock) if err != nil { @@ -877,7 +806,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S continue } - atomicTxBytes, err := vm.codec.Marshal(codecVersion, tx) + atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, tx) if err != nil { // Discard the transaction from the mempool and error if the transaction // cannot be marshalled. This should never happen. @@ -906,7 +835,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S // assumes that we are in at least Apricot Phase 5. func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { var ( - batchAtomicTxs []*Tx + batchAtomicTxs []*atomic.Tx batchAtomicUTXOs set.Set[ids.ID] batchContribution *big.Int = new(big.Int).Set(common.Big0) batchGasUsed *big.Int = new(big.Int).Set(common.Big0) @@ -982,7 +911,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. // If there is a non-zero number of transactions, marshal them and return the byte slice // for the block's extra data along with the contribution and gas used. if len(batchAtomicTxs) > 0 { - atomicTxBytes, err := vm.codec.Marshal(codecVersion, batchAtomicTxs) + atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, batchAtomicTxs) if err != nil { // If we fail to marshal the batch of atomic transactions for any reason, // discard the entire set of current transactions. @@ -1021,7 +950,7 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big rulesExtra = *params.GetRulesExtra(rules) ) - txs, err := ExtractAtomicTxs(block.ExtData(), rulesExtra.IsApricotPhase5, vm.codec) + txs, err := atomic.ExtractAtomicTxs(block.ExtData(), rulesExtra.IsApricotPhase5, atomic.Codec) if err != nil { return nil, nil, err } @@ -1081,47 +1010,62 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big func (vm *VM) SetState(_ context.Context, state snow.State) error { switch state { case snow.StateSyncing: - vm.bootstrapped = false + vm.bootstrapped.Set(false) return nil case snow.Bootstrapping: - vm.bootstrapped = false - if err := vm.StateSyncClient.Error(); err != nil { - return err - } - // After starting bootstrapping, do not attempt to resume a previous state sync. - if err := vm.StateSyncClient.ClearOngoingSummary(); err != nil { - return err - } - // Ensure snapshots are initialized before bootstrapping (i.e., if state sync is skipped). - // Note calling this function has no effect if snapshots are already initialized. - vm.blockChain.InitializeSnapshots() - return vm.fx.Bootstrapping() + return vm.onBootstrapStarted() case snow.NormalOp: - // Initialize goroutines related to block building once we enter normal operation as there is no need to handle mempool gossip before this point. - if err := vm.initBlockBuilding(); err != nil { - return fmt.Errorf("failed to initialize block building: %w", err) - } - vm.bootstrapped = true - return vm.fx.Bootstrapped() + return vm.onNormalOperationsStarted() default: return snow.ErrUnknownState } } +// onBootstrapStarted marks this VM as bootstrapping +func (vm *VM) onBootstrapStarted() error { + vm.bootstrapped.Set(false) + if err := vm.StateSyncClient.Error(); err != nil { + return err + } + // After starting bootstrapping, do not attempt to resume a previous state sync. + if err := vm.StateSyncClient.ClearOngoingSummary(); err != nil { + return err + } + // Ensure snapshots are initialized before bootstrapping (i.e., if state sync is skipped). + // Note calling this function has no effect if snapshots are already initialized. + vm.blockChain.InitializeSnapshots() + + return vm.fx.Bootstrapping() +} + +// onNormalOperationsStarted marks this VM as bootstrapped +func (vm *VM) onNormalOperationsStarted() error { + if vm.bootstrapped.Get() { + return nil + } + vm.bootstrapped.Set(true) + if err := vm.fx.Bootstrapped(); err != nil { + return err + } + // Initialize goroutines related to block building + // once we enter normal operation as there is no need to handle mempool gossip before this point. + return vm.initBlockBuilding() +} + // initBlockBuilding starts goroutines to manage block building func (vm *VM) initBlockBuilding() error { ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel ethTxGossipMarshaller := GossipEthTxMarshaller{} - ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.validators)) + ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.p2pValidators)) ethTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) if err != nil { return fmt.Errorf("failed to initialize eth tx gossip metrics: %w", err) } ethTxPool, err := NewGossipEthTxPool(vm.txPool, vm.sdkMetrics) if err != nil { - return err + return fmt.Errorf("failed to initialize gossip eth tx pool: %w", err) } vm.shutdownWg.Add(1) go func() { @@ -1129,8 +1073,8 @@ func (vm *VM) initBlockBuilding() error { vm.shutdownWg.Done() }() - atomicTxGossipMarshaller := GossipAtomicTxMarshaller{} - atomicTxGossipClient := vm.Network.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.validators)) + atomicTxGossipMarshaller := atomic.GossipAtomicTxMarshaller{} + atomicTxGossipClient := vm.Network.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.p2pValidators)) atomicTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, atomicTxGossipNamespace) if err != nil { return fmt.Errorf("failed to initialize atomic tx gossip metrics: %w", err) @@ -1151,7 +1095,7 @@ func (vm *VM) initBlockBuilding() error { ethTxPushGossiper, err = gossip.NewPushGossiper[*GossipEthTx]( ethTxGossipMarshaller, ethTxPool, - vm.validators, + vm.p2pValidators, ethTxGossipClient, ethTxGossipMetrics, pushGossipParams, @@ -1167,10 +1111,10 @@ func (vm *VM) initBlockBuilding() error { } if vm.atomicTxPushGossiper == nil { - vm.atomicTxPushGossiper, err = gossip.NewPushGossiper[*GossipAtomicTx]( + vm.atomicTxPushGossiper, err = gossip.NewPushGossiper[*atomic.GossipAtomicTx]( atomicTxGossipMarshaller, vm.mempool, - vm.validators, + vm.p2pValidators, atomicTxGossipClient, atomicTxGossipMetrics, pushGossipParams, @@ -1185,10 +1129,8 @@ func (vm *VM) initBlockBuilding() error { } // NOTE: gossip network must be initialized first otherwise ETH tx gossip will not work. - gossipStats := NewGossipStats() vm.builder = vm.NewBlockBuilder(vm.toEngine) vm.builder.awaitSubmittedTxs() - vm.Network.SetGossipHandler(NewGossipHandler(vm, gossipStats)) if vm.ethTxGossipHandler == nil { vm.ethTxGossipHandler = newTxGossipHandler[*GossipEthTx]( @@ -1199,16 +1141,16 @@ func (vm *VM) initBlockBuilding() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.validators, + vm.p2pValidators, ) } if err := vm.Network.AddHandler(p2p.TxGossipHandlerID, vm.ethTxGossipHandler); err != nil { - return err + return fmt.Errorf("failed to add eth tx gossip handler: %w", err) } if vm.atomicTxGossipHandler == nil { - vm.atomicTxGossipHandler = newTxGossipHandler[*GossipAtomicTx]( + vm.atomicTxGossipHandler = newTxGossipHandler[*atomic.GossipAtomicTx]( vm.ctx.Log, atomicTxGossipMarshaller, vm.mempool, @@ -1216,12 +1158,12 @@ func (vm *VM) initBlockBuilding() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.validators, + vm.p2pValidators, ) } if err := vm.Network.AddHandler(p2p.AtomicTxGossipHandlerID, vm.atomicTxGossipHandler); err != nil { - return err + return fmt.Errorf("failed to add atomic tx gossip handler: %w", err) } if vm.ethTxPullGossiper == nil { @@ -1237,7 +1179,7 @@ func (vm *VM) initBlockBuilding() error { vm.ethTxPullGossiper = gossip.ValidatorGossiper{ Gossiper: ethTxPullGossiper, NodeID: vm.ctx.NodeID, - Validators: vm.validators, + Validators: vm.p2pValidators, } } @@ -1252,7 +1194,7 @@ func (vm *VM) initBlockBuilding() error { }() if vm.atomicTxPullGossiper == nil { - atomicTxPullGossiper := gossip.NewPullGossiper[*GossipAtomicTx]( + atomicTxPullGossiper := gossip.NewPullGossiper[*atomic.GossipAtomicTx]( vm.ctx.Log, atomicTxGossipMarshaller, vm.mempool, @@ -1264,7 +1206,7 @@ func (vm *VM) initBlockBuilding() error { vm.atomicTxPullGossiper = &gossip.ValidatorGossiper{ Gossiper: atomicTxPullGossiper, NodeID: vm.ctx.NodeID, - Validators: vm.validators, + Validators: vm.p2pValidators, } } @@ -1284,8 +1226,8 @@ func (vm *VM) initBlockBuilding() error { // setAppRequestHandlers sets the request handlers for the VM to serve state sync // requests. func (vm *VM) setAppRequestHandlers() { - // Create separate EVM TrieDB (read only) for serving leafs requests. - // We create a separate TrieDB here, so that it has a separate cache from the one + // Create standalone EVM TrieDB (read only) for serving leafs requests. + // We create a standalone TrieDB here, so that it has a standalone cache from the one // used by the node when processing blocks. evmTrieDB := triedb.NewDatabase( vm.chaindb, @@ -1319,6 +1261,10 @@ func (vm *VM) Shutdown(context.Context) error { log.Error("error stopping state syncer", "err", err) } close(vm.shutdownChan) + // Stop RPC handlers before eth.Stop which will close the database + for _, handler := range vm.rpcHandlers { + handler.Stop() + } vm.eth.Stop() vm.shutdownWg.Wait() return nil @@ -1505,10 +1451,6 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { return nil, err } - primaryAlias, err := vm.ctx.BCLookup.PrimaryAlias(vm.ctx.ChainID) - if err != nil { - return nil, fmt.Errorf("failed to get primary alias for chain due to %w", err) - } apis := make(map[string]http.Handler) avaxAPI, err := newHandler("avax", &AvaxAPI{vm}) if err != nil { @@ -1518,7 +1460,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { apis[avaxEndpoint] = avaxAPI if vm.config.AdminAPIEnabled { - adminAPI, err := newHandler("admin", NewAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.AdminAPIDir, primaryAlias)))) + adminAPI, err := newHandler("admin", NewAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.AdminAPIDir, vm.chainAlias)))) if err != nil { return nil, fmt.Errorf("failed to register service for admin API due to %w", err) } @@ -1526,6 +1468,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "coreth-admin") } + // RPC APIs if vm.config.SnowmanAPIEnabled { if err := handler.RegisterName("snowman", &SnowmanAPI{vm}); err != nil { return nil, err @@ -1549,6 +1492,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { vm.config.WSCPUMaxStored.Duration, ) + vm.rpcHandlers = append(vm.rpcHandlers, handler) return apis, nil } @@ -1562,6 +1506,7 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er return nil, err } + vm.rpcHandlers = append(vm.rpcHandlers, handler) return map[string]http.Handler{ "/rpc": handler, }, nil @@ -1573,61 +1518,22 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er ****************************************************************************** */ -// conflicts returns an error if [inputs] conflicts with any of the atomic inputs contained in [ancestor] -// or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is -// accepted, then nil will be returned immediately. -// If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. -func (vm *VM) conflicts(inputs set.Set[ids.ID], ancestor *Block) error { - lastAcceptedBlock := vm.LastAcceptedBlock() - lastAcceptedHeight := lastAcceptedBlock.Height() - for ancestor.Height() > lastAcceptedHeight { - // If any of the atomic transactions in the ancestor conflict with [inputs] - // return an error. - for _, atomicTx := range ancestor.atomicTxs { - if inputs.Overlaps(atomicTx.InputUTXOs()) { - return errConflictingAtomicInputs - } - } - - // Move up the chain. - nextAncestorID := ancestor.Parent() - // If the ancestor is unknown, then the parent failed - // verification when it was called. - // If the ancestor is rejected, then this block shouldn't be - // inserted into the canonical chain because the parent is - // will be missing. - // If the ancestor is processing, then the block may have - // been verified. - nextAncestorIntf, err := vm.GetBlockInternal(context.TODO(), nextAncestorID) - if err != nil { - return errRejectedParent - } - nextAncestor, ok := nextAncestorIntf.(*Block) - if !ok { - return fmt.Errorf("ancestor block %s had unexpected type %T", nextAncestor.ID(), nextAncestorIntf) - } - ancestor = nextAncestor - } - - return nil -} - // getAtomicTx returns the requested transaction, status, and height. // If the status is Unknown, then the returned transaction will be nil. -func (vm *VM) getAtomicTx(txID ids.ID) (*Tx, Status, uint64, error) { +func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { - return tx, Accepted, height, nil + return tx, atomic.Accepted, height, nil } else if err != database.ErrNotFound { - return nil, Unknown, 0, err + return nil, atomic.Unknown, 0, err } tx, dropped, found := vm.mempool.GetTx(txID) switch { case found && dropped: - return tx, Dropped, 0, nil + return tx, atomic.Dropped, 0, nil case found: - return tx, Processing, 0, nil + return tx, atomic.Processing, 0, nil default: - return nil, Unknown, 0, nil + return nil, atomic.Unknown, 0, nil } } @@ -1658,7 +1564,7 @@ func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { } // verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block -func (vm *VM) verifyTxAtTip(tx *Tx) error { +func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { if txByteLen := len(tx.SignedBytes()); txByteLen > targetAtomicTxsSize { return fmt.Errorf("tx size (%d) exceeds total atomic txs size target (%d)", txByteLen, targetAtomicTxsSize) } @@ -1699,7 +1605,7 @@ func (vm *VM) verifyTxAtTip(tx *Tx) error { // Note: verifyTx may modify [state]. If [state] needs to be properly maintained, the caller is responsible // for reverting to the correct snapshot after calling this function. If this function is called with a // throwaway state, then this is not necessary. -func (vm *VM) verifyTx(tx *Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules extras.Rules) error { +func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules extras.Rules) error { parentIntf, err := vm.GetBlockInternal(context.TODO(), ids.ID(parentHash)) if err != nil { return fmt.Errorf("failed to get parent block: %w", err) @@ -1708,7 +1614,15 @@ func (vm *VM) verifyTx(tx *Tx, parentHash common.Hash, baseFee *big.Int, state * if !ok { return fmt.Errorf("parent block %s had unexpected type %T", parentIntf.ID(), parentIntf) } - if err := tx.UnsignedAtomicTx.SemanticVerify(vm, tx, parent, baseFee, rules); err != nil { + atomicBackend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + if err := tx.UnsignedAtomicTx.SemanticVerify(atomicBackend, tx, parent, baseFee); err != nil { return err } return tx.UnsignedAtomicTx.EVMStateTransfer(vm.ctx, state) @@ -1716,7 +1630,7 @@ func (vm *VM) verifyTx(tx *Tx, parentHash common.Hash, baseFee *big.Int, state * // verifyTxs verifies that [txs] are valid to be issued into a block with parent block [parentHash] // using [rules] as the current rule set. -func (vm *VM) verifyTxs(txs []*Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules extras.Rules) error { +func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules extras.Rules) error { // Ensure that the parent was verified and inserted correctly. if !vm.blockChain.HasBlock(parentHash, height-1) { return errRejectedParent @@ -1739,14 +1653,22 @@ func (vm *VM) verifyTxs(txs []*Tx, parentHash common.Hash, baseFee *big.Int, hei // Ensure each tx in [txs] doesn't conflict with any other atomic tx in // a processing ancestor block. inputs := set.Set[ids.ID]{} + atomicBackend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } for _, atomicTx := range txs { utx := atomicTx.UnsignedAtomicTx - if err := utx.SemanticVerify(vm, atomicTx, ancestor, baseFee, rules); err != nil { + if err := utx.SemanticVerify(atomicBackend, atomicTx, ancestor, baseFee); err != nil { return fmt.Errorf("invalid block due to failed semanatic verify: %w at height %d", err, height) } txInputs := utx.InputUTXOs() if inputs.Overlaps(txInputs) { - return errConflictingAtomicInputs + return atomic.ErrConflictingAtomicInputs } inputs.Union(txInputs) } @@ -1768,7 +1690,7 @@ func (vm *VM) GetAtomicUTXOs( return avax.GetAtomicUTXOs( vm.ctx.SharedMemory, - vm.codec, + atomic.Codec, chainID, addrs, startAddr, @@ -1777,176 +1699,6 @@ func (vm *VM) GetAtomicUTXOs( ) } -// GetSpendableFunds returns a list of EVMInputs and keys (in corresponding -// order) to total [amount] of [assetID] owned by [keys]. -// Note: we return [][]*secp256k1.PrivateKey even though each input -// corresponds to a single key, so that the signers can be passed in to -// [tx.Sign] which supports multiple keys on a single input. -func (vm *VM) GetSpendableFunds( - keys []*secp256k1.PrivateKey, - assetID ids.ID, - amount uint64, -) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { - // Note: current state uses the state of the preferred block. - state, err := vm.blockChain.State() - if err != nil { - return nil, nil, err - } - inputs := []EVMInput{} - signers := [][]*secp256k1.PrivateKey{} - // Note: we assume that each key in [keys] is unique, so that iterating over - // the keys will not produce duplicated nonces in the returned EVMInput slice. - for _, key := range keys { - if amount == 0 { - break - } - addr := GetEthAddress(key) - var balance uint64 - if assetID == vm.ctx.AVAXAssetID { - // If the asset is AVAX, we divide by the x2cRate to convert back to the correct - // denomination of AVAX that can be exported. - balance = new(uint256.Int).Div(state.GetBalance(addr), x2cRate).Uint64() - } else { - balance = state.GetBalanceMultiCoin(addr, common.Hash(assetID)).Uint64() - } - if balance == 0 { - continue - } - if amount < balance { - balance = amount - } - nonce, err := vm.GetCurrentNonce(addr) - if err != nil { - return nil, nil, err - } - inputs = append(inputs, EVMInput{ - Address: addr, - Amount: balance, - AssetID: assetID, - Nonce: nonce, - }) - signers = append(signers, []*secp256k1.PrivateKey{key}) - amount -= balance - } - - if amount > 0 { - return nil, nil, errInsufficientFunds - } - - return inputs, signers, nil -} - -// GetSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding -// order) to total [amount] + [fee] of [AVAX] owned by [keys]. -// This function accounts for the added cost of the additional inputs needed to -// create the transaction and makes sure to skip any keys with a balance that is -// insufficient to cover the additional fee. -// Note: we return [][]*secp256k1.PrivateKey even though each input -// corresponds to a single key, so that the signers can be passed in to -// [tx.Sign] which supports multiple keys on a single input. -func (vm *VM) GetSpendableAVAXWithFee( - keys []*secp256k1.PrivateKey, - amount uint64, - cost uint64, - baseFee *big.Int, -) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { - // Note: current state uses the state of the preferred block. - state, err := vm.blockChain.State() - if err != nil { - return nil, nil, err - } - - initialFee, err := CalculateDynamicFee(cost, baseFee) - if err != nil { - return nil, nil, err - } - - newAmount, err := math.Add64(amount, initialFee) - if err != nil { - return nil, nil, err - } - amount = newAmount - - inputs := []EVMInput{} - signers := [][]*secp256k1.PrivateKey{} - // Note: we assume that each key in [keys] is unique, so that iterating over - // the keys will not produce duplicated nonces in the returned EVMInput slice. - for _, key := range keys { - if amount == 0 { - break - } - - prevFee, err := CalculateDynamicFee(cost, baseFee) - if err != nil { - return nil, nil, err - } - - newCost := cost + EVMInputGas - newFee, err := CalculateDynamicFee(newCost, baseFee) - if err != nil { - return nil, nil, err - } - - additionalFee := newFee - prevFee - - addr := GetEthAddress(key) - // Since the asset is AVAX, we divide by the x2cRate to convert back to - // the correct denomination of AVAX that can be exported. - balance := new(uint256.Int).Div(state.GetBalance(addr), x2cRate).Uint64() - // If the balance for [addr] is insufficient to cover the additional cost - // of adding an input to the transaction, skip adding the input altogether - if balance <= additionalFee { - continue - } - - // Update the cost for the next iteration - cost = newCost - - newAmount, err := math.Add64(amount, additionalFee) - if err != nil { - return nil, nil, err - } - amount = newAmount - - // Use the entire [balance] as an input, but if the required [amount] - // is less than the balance, update the [inputAmount] to spend the - // minimum amount to finish the transaction. - inputAmount := balance - if amount < balance { - inputAmount = amount - } - nonce, err := vm.GetCurrentNonce(addr) - if err != nil { - return nil, nil, err - } - inputs = append(inputs, EVMInput{ - Address: addr, - Amount: inputAmount, - AssetID: vm.ctx.AVAXAssetID, - Nonce: nonce, - }) - signers = append(signers, []*secp256k1.PrivateKey{key}) - amount -= inputAmount - } - - if amount > 0 { - return nil, nil, errInsufficientFunds - } - - return inputs, signers, nil -} - -// GetCurrentNonce returns the nonce associated with the address at the -// preferred block -func (vm *VM) GetCurrentNonce(address common.Address) (uint64, error) { - // Note: current state uses the state of the preferred block. - state, err := vm.blockChain.State() - if err != nil { - return 0, err - } - return state.GetNonce(address), nil -} - func (vm *VM) chainConfigExtra() *extras.ChainConfig { return params.GetExtra(vm.chainConfig) } @@ -2085,3 +1837,55 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { // enable state sync by default if the chain is empty. return lastAcceptedHeight == 0 } + +func (vm *VM) newImportTx( + chainID ids.ID, // chain to import from + to common.Address, // Address of recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Keys to import the funds +) (*atomic.Tx, error) { + kc := secp256k1fx.NewKeychain() + for _, key := range keys { + kc.Add(key) + } + + atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) + if err != nil { + return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) + } + + return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) +} + +// newExportTx returns a new ExportTx +func (vm *VM) newExportTx( + assetID ids.ID, // AssetID of the tokens to export + amount uint64, // Amount of tokens to export + chainID ids.ID, // Chain to send the UTXOs to + to ids.ShortID, // Address of chain recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens +) (*atomic.Tx, error) { + state, err := vm.blockChain.State() + if err != nil { + return nil, err + } + + // Create the transaction + tx, err := atomic.NewExportTx( + vm.ctx, // Context + vm.currentRules(), // VM rules + state, + assetID, // AssetID + amount, // Amount + chainID, // ID of the chain to send the funds to + to, // Address + baseFee, + keys, // Private keys + ) + if err != nil { + return nil, err + } + + return tx, nil +} diff --git a/plugin/evm/vm_database.go b/plugin/evm/vm_database.go new file mode 100644 index 0000000000..958f22db57 --- /dev/null +++ b/plugin/evm/vm_database.go @@ -0,0 +1,82 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "time" + + avalanchedatabase "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/plugin/evm/database" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/log" +) + +// initializeDBs initializes the databases used by the VM. +// coreth always uses the avalanchego provided database. +func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { + // Use NewNested rather than New so that the structure of the database + // remains the same regardless of the provided baseDB type. + vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) + vm.versiondb = versiondb.New(db) + vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.versiondb) + vm.metadataDB = prefixdb.New(metadataPrefix, vm.versiondb) + vm.db = db + // Note warpDB is not part of versiondb because it is not necessary + // that warp signatures are committed to the database atomically with + // the last accepted block. + vm.warpDB = prefixdb.New(warpPrefix, db) + return nil +} + +func (vm *VM) inspectDatabases() error { + start := time.Now() + log.Info("Starting database inspection") + if err := rawdb.InspectDatabase(vm.chaindb, nil, nil); err != nil { + return err + } + if err := inspectDB(vm.acceptedBlockDB, "acceptedBlockDB"); err != nil { + return err + } + if err := inspectDB(vm.metadataDB, "metadataDB"); err != nil { + return err + } + if err := inspectDB(vm.warpDB, "warpDB"); err != nil { + return err + } + log.Info("Completed database inspection", "elapsed", time.Since(start)) + return nil +} + +func inspectDB(db avalanchedatabase.Database, label string) error { + it := db.NewIterator() + defer it.Release() + + var ( + count int64 + start = time.Now() + logged = time.Now() + + // Totals + total common.StorageSize + ) + // Inspect key-value database first. + for it.Next() { + var ( + key = it.Key() + size = common.StorageSize(len(key) + len(it.Value())) + ) + total += size + count++ + if count%1000 == 0 && time.Since(logged) > 8*time.Second { + log.Info("Inspecting database", "label", label, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + // Display the database statistic. + log.Info("Database statistics", "label", label, "total", total.String(), "count", count) + return nil +} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 7d322eb209..b1acd489b6 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -24,6 +24,8 @@ import ( "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/libevm/trie" @@ -31,7 +33,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api/keystore" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -114,12 +116,6 @@ var ( return &cpy } - activateEtna = func(cfg *params.ChainConfig, etnaTime uint64) *params.ChainConfig { - cpy := *cfg - params.GetExtra(&cpy).EtnaTimestamp = &etnaTime - return &cpy - } - genesisJSONApricotPhase0 = genesisJSON(params.TestLaunchConfig) genesisJSONApricotPhase1 = genesisJSON(params.TestApricotPhase1Config) genesisJSONApricotPhase2 = genesisJSON(params.TestApricotPhase2Config) @@ -158,8 +154,8 @@ func init() { b, _ = cb58.Decode(key) pk, _ := secp256k1.ToPrivateKey(b) testKeys = append(testKeys, pk) - testEthAddrs = append(testEthAddrs, GetEthAddress(pk)) - testShortIDAddrs = append(testShortIDAddrs, pk.PublicKey().Address()) + testEthAddrs = append(testEthAddrs, pk.EthAddress()) + testShortIDAddrs = append(testShortIDAddrs, pk.Address()) } } @@ -226,12 +222,12 @@ func NewContext() *snow.Context { return subnetID, nil }, } - blsSecretKey, err := bls.NewSecretKey() + blsSecretKey, err := bls.NewSigner() if err != nil { panic(err) } ctx.WarpSigner = avalancheWarp.NewSigner(blsSecretKey, ctx.NetworkID, ctx.ChainID) - ctx.PublicKey = bls.PublicFromSecretKey(blsSecretKey) + ctx.PublicKey = blsSecretKey.PublicKey() return ctx } @@ -244,7 +240,7 @@ func setupGenesis( database.Database, []byte, chan commonEng.Message, - *atomic.Memory, + *avalancheatomic.Memory, ) { if len(genesisJSON) == 0 { genesisJSON = genesisJSONLatest @@ -255,7 +251,7 @@ func setupGenesis( baseDB := memdb.New() // initialize the atomic memory - atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) + atomicMemory := avalancheatomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) // NB: this lock is intentionally left locked when this function returns. @@ -286,7 +282,7 @@ func GenesisVM(t *testing.T, chan commonEng.Message, *VM, database.Database, - *atomic.Memory, + *avalancheatomic.Memory, *enginetest.Sender, ) { return GenesisVMWithClock(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON, mockable.Clock{}) @@ -305,7 +301,7 @@ func GenesisVMWithClock( chan commonEng.Message, *VM, database.Database, - *atomic.Memory, + *avalancheatomic.Memory, *enginetest.Sender, ) { vm := &VM{clock: clock} @@ -334,7 +330,7 @@ func GenesisVMWithClock( return issuer, vm, dbManager, m, appSender } -func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index uint32, assetID ids.ID, amount uint64, addr ids.ShortID) (*avax.UTXO, error) { +func addUTXO(sharedMemory *avalancheatomic.Memory, ctx *snow.Context, txID ids.ID, index uint32, assetID ids.ID, amount uint64, addr ids.ShortID) (*avax.UTXO, error) { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: txID, @@ -349,14 +345,14 @@ func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index }, }, } - utxoBytes, err := Codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { return nil, err } xChainSharedMemory := sharedMemory.NewSharedMemory(ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -372,7 +368,7 @@ func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index // GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map // Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *atomic.Memory, *enginetest.Sender) { +func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { issuer, vm, db, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) for addr, avaxAmount := range utxos { txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) @@ -403,8 +399,8 @@ func TestVMConfigDefaults(t *testing.T) { configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") - var vmConfig Config - vmConfig.SetDefaults() + var vmConfig config.Config + vmConfig.SetDefaults(defaultTxPoolConfig) vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs require.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") @@ -415,8 +411,8 @@ func TestVMNilConfig(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, false, "", "", "") // VM Config should match defaults if no config is passed in - var vmConfig Config - vmConfig.SetDefaults() + var vmConfig config.Config + vmConfig.SetDefaults(defaultTxPoolConfig) require.Equal(t, vmConfig, vm.config, "VM Config should match default config") require.NoError(t, vm.Shutdown(context.Background())) } @@ -648,7 +644,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatal("Expected logs to be non-nil") } - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*atomic.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -681,13 +677,13 @@ func TestIssueAtomicTxs(t *testing.T) { // Check that both atomic transactions were indexed as expected. indexedImportTx, status, height, err := vm.getAtomicTx(importTx.ID()) assert.NoError(t, err) - assert.Equal(t, Accepted, status) + assert.Equal(t, atomic.Accepted, status) assert.Equal(t, uint64(1), height, "expected height of indexed import tx to be 1") assert.Equal(t, indexedImportTx.ID(), importTx.ID(), "expected ID of indexed import tx to match original txID") indexedExportTx, status, height, err := vm.getAtomicTx(exportTx.ID()) assert.NoError(t, err) - assert.Equal(t, Accepted, status) + assert.Equal(t, atomic.Accepted, status) assert.Equal(t, uint64(2), height, "expected height of indexed export tx to be 2") assert.Equal(t, indexedExportTx.ID(), exportTx.ID(), "expected ID of indexed import tx to match original txID") } @@ -847,8 +843,8 @@ func testConflictingImportTxs(t *testing.T, genesis string) { } }() - importTxs := make([]*Tx, 0, 3) - conflictTxs := make([]*Tx, 0, 3) + importTxs := make([]*atomic.Tx, 0, 3) + conflictTxs := make([]*atomic.Tx, 0, 3) for i, key := range testKeys { importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { @@ -942,9 +938,9 @@ func testConflictingImportTxs(t *testing.T, genesis string) { var extraData []byte switch { case rules.IsApricotPhase5: - extraData, err = vm.codec.Marshal(codecVersion, []*Tx{conflictTxs[1]}) + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{conflictTxs[1]}) default: - extraData, err = vm.codec.Marshal(codecVersion, conflictTxs[1]) + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, conflictTxs[1]) } if err != nil { t.Fatal(err) @@ -970,15 +966,15 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, atomic.ErrConflictingAtomicInputs) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicInputs, err) } if !rules.IsApricotPhase5 { return } - extraData, err = vm.codec.Marshal(codecVersion, []*Tx{importTxs[2], conflictTxs[2]}) + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTxs[2], conflictTxs[2]}) if err != nil { t.Fatal(err) } @@ -1006,25 +1002,25 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, atomic.ErrConflictingAtomicInputs) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicInputs, err) } } func TestReissueAtomicTxHigherGasPrice(t *testing.T) { kc := secp256k1fx.NewKeychain(testKeys...) - for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, discarded []*Tx){ - "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, evicted []*Tx) { + for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ + "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } - tx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } @@ -1036,9 +1032,9 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - return []*Tx{tx2}, []*Tx{tx1} + return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, evicted []*Tx) { + "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -1047,11 +1043,11 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - tx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } - tx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo1}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } @@ -1063,9 +1059,9 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - return []*Tx{tx2}, []*Tx{tx1} + return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "hola": func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, evicted []*Tx) { + "hola": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -1075,17 +1071,17 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - importTx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1}) + importTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } - importTx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(3), initialBaseFee), kc, []*avax.UTXO{utxo2}) + importTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(3), initialBaseFee), kc, []*avax.UTXO{utxo2}) if err != nil { t.Fatal(err) } - reissuanceTx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(2), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(2), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -1097,15 +1093,15 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, errConflictingAtomicTx) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicTx, err) + if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, atomic.ErrConflictingAtomicTx) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicTx, err) } assert.True(t, vm.mempool.Has(importTx1.ID())) assert.True(t, vm.mempool.Has(importTx2.ID())) assert.False(t, vm.mempool.Has(reissuanceTx1.ID())) - reissuanceTx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(4), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(4), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -1113,7 +1109,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - return []*Tx{reissuanceTx2}, []*Tx{importTx1, importTx2} + return []*atomic.Tx{reissuanceTx2}, []*atomic.Tx{importTx1, importTx2} }, } { t.Run(name, func(t *testing.T) { @@ -1121,12 +1117,12 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { issuedTxs, evictedTxs := issueTxs(t, vm, sharedMemory) for i, tx := range issuedTxs { - _, issued := vm.mempool.txHeap.Get(tx.ID()) + _, issued := vm.mempool.GetPendingTx(tx.ID()) assert.True(t, issued, "expected issued tx at index %d to be issued", i) } for i, tx := range evictedTxs { - _, discarded := vm.mempool.discardedTxs.Get(tx.ID()) + _, discarded, _ := vm.mempool.GetTx(tx.ID()) assert.True(t, discarded, "expected discarded tx at index %d to be discarded", i) } }) @@ -1387,10 +1383,10 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } key0 := testKeys[0] - addr0 := key0.PublicKey().Address() + addr0 := key0.Address() key1 := testKeys[1] - addr1 := key1.PublicKey().Address() + addr1 := key1.Address() importAmount := uint64(1000000000) @@ -1530,22 +1526,22 @@ func TestBonusBlocksTxs(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -1571,7 +1567,7 @@ func TestBonusBlocksTxs(t *testing.T) { vm.atomicBackend.(*atomicBackend).bonusBlocks = map[uint64]ids.ID{blk.Height(): blk.ID()} // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { t.Fatal(err) } @@ -3042,7 +3038,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { t.Fatal("Should have failed to issue due to an invalid export tx") } - if err := vm.mempool.AddTx(exportTx2); err == nil { + if err := vm.mempool.AddRemoteTx(exportTx2); err == nil { t.Fatal("Should have failed to add because conflicting") } @@ -3068,13 +3064,13 @@ func TestBuildInvalidBlockHead(t *testing.T) { }() key0 := testKeys[0] - addr0 := key0.PublicKey().Address() + addr0 := key0.Address() // Create the transaction - utx := &UnsignedImportTx{ + utx := &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: common.Address(addr0), Amount: 1 * units.Avax, AssetID: vm.ctx.AVAXAssetID, @@ -3092,8 +3088,8 @@ func TestBuildInvalidBlockHead(t *testing.T) { }, SourceChain: vm.ctx.XChainID, } - tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { + tx := &atomic.Tx{UnsignedAtomicTx: utx} + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { t.Fatal(err) } @@ -3172,7 +3168,7 @@ func TestConfigureLogLevel(t *testing.T) { } } - // If the VM was not initialized, do not attept to shut it down + // If the VM was not initialized, do not attempt to shut it down if err == nil { shutdownChan := make(chan error, 1) shutdownFunc := func() { @@ -3225,22 +3221,22 @@ func TestBuildApricotPhase4Block(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -3395,22 +3391,22 @@ func TestBuildApricotPhase5Block(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -3579,8 +3575,8 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { // Add the two conflicting transactions directly to the mempool, so that two consecutive transactions // will fail verification when build block is called. - vm.mempool.AddTx(importTxs[1]) - vm.mempool.AddTx(importTxs[2]) + vm.mempool.AddRemoteTx(importTxs[1]) + vm.mempool.AddRemoteTx(importTxs[2]) if _, err := vm.BuildBlock(context.Background()); err == nil { t.Fatal("Expected build block to fail due to empty block") @@ -3676,7 +3672,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { utxo, err := addUTXO(sharedMemory, vm.ctx, txID, uint32(i), vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) assert.NoError(t, err) - importTx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) + importTx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } @@ -3754,7 +3750,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { validEthBlock := blk1.(*chain.BlockWrapper).Block.(*Block).ethBlock - extraData, err := vm2.codec.Marshal(codecVersion, []*Tx{importTx}) + extraData, err := atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTx}) if err != nil { t.Fatal(err) } @@ -3985,25 +3981,3 @@ func TestNoBlobsAllowed(t *testing.T) { err = vmBlock.Verify(ctx) require.ErrorContains(err, "blobs not enabled on avalanche networks") } - -func TestMinFeeSetAtEtna(t *testing.T) { - require := require.New(t) - now := time.Now() - etnaTime := uint64(now.Add(1 * time.Second).Unix()) - - genesis := genesisJSON( - activateEtna(params.TestEtnaChainConfig, etnaTime), - ) - clock := mockable.Clock{} - clock.Set(now) - - _, vm, _, _, _ := GenesisVMWithClock(t, false, genesis, "", "", clock) - initial := vm.txPool.MinFee() - require.Equal(params.ApricotPhase4MinBaseFee, initial.Int64()) - - require.Eventually( - func() bool { return params.EtnaMinBaseFee == vm.txPool.MinFee().Int64() }, - 5*time.Second, - 1*time.Second, - ) -} diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 49984b7f7c..d746931e06 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" @@ -32,9 +33,10 @@ import ( "github.com/ava-labs/coreth/params/extras" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/precompile/contract" - "github.com/ava-labs/coreth/precompile/contracts/warp" + warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" "github.com/ava-labs/coreth/predicate" "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/warp" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/crypto" "github.com/stretchr/testify/require" @@ -75,7 +77,7 @@ func TestSendWarpMessage(t *testing.T) { payloadData := avagoUtils.RandomBytes(100) - warpSendMessageInput, err := warp.PackSendWarpMessage(payloadData) + warpSendMessageInput, err := warpcontract.PackSendWarpMessage(payloadData) require.NoError(err) addressedPayload, err := payload.NewAddressedCall( testEthAddrs[0].Bytes(), @@ -90,7 +92,7 @@ func TestSendWarpMessage(t *testing.T) { require.NoError(err) // Submit a transaction to trigger sending a warp message - tx0 := types.NewTransaction(uint64(0), warp.ContractAddress, big.NewInt(1), 100_000, big.NewInt(params.LaunchMinGasPrice), warpSendMessageInput) + tx0 := types.NewTransaction(uint64(0), warpcontract.ContractAddress, big.NewInt(1), 100_000, big.NewInt(params.LaunchMinGasPrice), warpSendMessageInput) signedTx0, err := types.SignTx(tx0, types.LatestSignerForChainID(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(err) @@ -111,20 +113,19 @@ func TestSendWarpMessage(t *testing.T) { require.Len(receipts[0].Logs, 1) expectedTopics := []common.Hash{ - warp.WarpABI.Events["SendWarpMessage"].ID, + warpcontract.WarpABI.Events["SendWarpMessage"].ID, common.BytesToHash(testEthAddrs[0].Bytes()), common.Hash(expectedUnsignedMessage.ID()), } require.Equal(expectedTopics, receipts[0].Logs[0].Topics) logData := receipts[0].Logs[0].Data - unsignedMessage, err := warp.UnpackSendWarpEventDataToMessage(logData) + unsignedMessage, err := warpcontract.UnpackSendWarpEventDataToMessage(logData) require.NoError(err) - unsignedMessageID := unsignedMessage.ID() // Verify the signature cannot be fetched before the block is accepted - _, err = vm.warpBackend.GetMessageSignature(unsignedMessageID) + _, err = vm.warpBackend.GetMessageSignature(context.TODO(), unsignedMessage) require.Error(err) - _, err = vm.warpBackend.GetBlockSignature(blk.ID()) + _, err = vm.warpBackend.GetBlockSignature(context.TODO(), blk.ID()) require.Error(err) require.NoError(vm.SetPreference(context.Background(), blk.ID())) @@ -132,7 +133,7 @@ func TestSendWarpMessage(t *testing.T) { vm.blockChain.DrainAcceptorQueue() // Verify the message signature after accepting the block. - rawSignatureBytes, err := vm.warpBackend.GetMessageSignature(unsignedMessageID) + rawSignatureBytes, err := vm.warpBackend.GetMessageSignature(context.TODO(), unsignedMessage) require.NoError(err) blsSignature, err := bls.SignatureFromBytes(rawSignatureBytes[:]) require.NoError(err) @@ -149,7 +150,7 @@ func TestSendWarpMessage(t *testing.T) { require.True(bls.Verify(vm.ctx.PublicKey, blsSignature, unsignedMessage.Bytes())) // Verify the blockID will now be signed by the backend and produces a valid signature. - rawSignatureBytes, err = vm.warpBackend.GetBlockSignature(blk.ID()) + rawSignatureBytes, err = vm.warpBackend.GetBlockSignature(context.TODO(), blk.ID()) require.NoError(err) blsSignature, err = bls.SignatureFromBytes(rawSignatureBytes[:]) require.NoError(err) @@ -265,16 +266,16 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned defer logsSub.Unsubscribe() nodeID1 := ids.GenerateTestNodeID() - blsSecretKey1, err := bls.NewSecretKey() + blsSecretKey1, err := bls.NewSigner() require.NoError(err) - blsPublicKey1 := bls.PublicFromSecretKey(blsSecretKey1) - blsSignature1 := bls.Sign(blsSecretKey1, unsignedMessage.Bytes()) + blsPublicKey1 := blsSecretKey1.PublicKey() + blsSignature1 := blsSecretKey1.Sign(unsignedMessage.Bytes()) nodeID2 := ids.GenerateTestNodeID() - blsSecretKey2, err := bls.NewSecretKey() + blsSecretKey2, err := bls.NewSigner() require.NoError(err) - blsPublicKey2 := bls.PublicFromSecretKey(blsSecretKey2) - blsSignature2 := bls.Sign(blsSecretKey2, unsignedMessage.Bytes()) + blsPublicKey2 := blsSecretKey2.PublicKey() + blsSignature2 := blsSecretKey2.Sign(unsignedMessage.Bytes()) blsAggregatedSignature, err := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) require.NoError(err) @@ -342,7 +343,7 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned common.Big0, txPayload, types.AccessList{}, - warp.ContractAddress, + warpcontract.ContractAddress, signedMessage.Bytes(), ), types.LatestSignerForChainID(vm.chainConfig.ChainID), @@ -411,15 +412,15 @@ func TestReceiveWarpMessage(t *testing.T) { // enable warp at the default genesis time enableTime := upgrade.InitiallyActiveTime - enableConfig := warp.NewDefaultConfig(utils.TimeToNewUint64(enableTime)) + enableConfig := warpcontract.NewDefaultConfig(utils.TimeToNewUint64(enableTime)) // disable warp so we can re-enable it with RequirePrimaryNetworkSigners disableTime := upgrade.InitiallyActiveTime.Add(10 * time.Second) - disableConfig := warp.NewDisableConfig(utils.TimeToNewUint64(disableTime)) + disableConfig := warpcontract.NewDisableConfig(utils.TimeToNewUint64(disableTime)) // re-enable warp with RequirePrimaryNetworkSigners reEnableTime := disableTime.Add(10 * time.Second) - reEnableConfig := warp.NewConfig( + reEnableConfig := warpcontract.NewConfig( utils.TimeToNewUint64(reEnableTime), 0, // QuorumNumerator true, // RequirePrimaryNetworkSigners @@ -459,7 +460,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by subnet without RequirePrimaryNetworkSigners", - sourceChainID: testCChainID, + sourceChainID: vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersSubnet, blockTime: upgrade.InitiallyActiveTime.Add(2 * blockGap), @@ -482,7 +483,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by primary with RequirePrimaryNetworkSigners (impacted)", - sourceChainID: testCChainID, + sourceChainID: vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersPrimary, blockTime: reEnableTime.Add(2 * blockGap), @@ -523,18 +524,18 @@ func testReceiveWarpMessage( type signer struct { networkID ids.ID nodeID ids.NodeID - secret *bls.SecretKey + secret bls.Signer signature *bls.Signature weight uint64 } newSigner := func(networkID ids.ID, weight uint64) signer { - secret, err := bls.NewSecretKey() + secret, err := bls.NewSigner() require.NoError(err) return signer{ networkID: networkID, nodeID: ids.GenerateTestNodeID(), secret: secret, - signature: bls.Sign(secret, unsignedMessage.Bytes()), + signature: secret.Sign(unsignedMessage.Bytes()), weight: weight, } } @@ -582,7 +583,7 @@ func testReceiveWarpMessage( for _, s := range signers { vdrOutput[s.nodeID] = &validators.GetValidatorOutput{ NodeID: s.nodeID, - PublicKey: bls.PublicFromSecretKey(s.secret), + PublicKey: s.secret.PublicKey(), Weight: s.weight, } } @@ -608,20 +609,20 @@ func testReceiveWarpMessage( ) require.NoError(err) - getWarpMsgInput, err := warp.PackGetVerifiedWarpMessage(0) + getWarpMsgInput, err := warpcontract.PackGetVerifiedWarpMessage(0) require.NoError(err) getVerifiedWarpMessageTx, err := types.SignTx( predicate.NewPredicateTx( vm.chainConfig.ChainID, vm.txPool.Nonce(testEthAddrs[0]), - &warp.Module.Address, + &warpcontract.Module.Address, 1_000_000, big.NewInt(225*params.GWei), big.NewInt(params.GWei), common.Big0, getWarpMsgInput, types.AccessList{}, - warp.ContractAddress, + warpcontract.ContractAddress, signedMessage.Bytes(), ), types.LatestSignerForChainID(vm.chainConfig.ChainID), @@ -654,7 +655,7 @@ func testReceiveWarpMessage( // An empty bitset indicates success. txResultsBytes := results.GetResults( getVerifiedWarpMessageTx.Hash(), - warp.ContractAddress, + warpcontract.ContractAddress, ) bitset := set.BitsFromBytes(txResultsBytes) require.Zero(bitset.Len()) // Empty bitset indicates success @@ -687,8 +688,8 @@ func testReceiveWarpMessage( verifiedMessageTxReceipt := verifiedMessageReceipts[0] require.Equal(types.ReceiptStatusSuccessful, verifiedMessageTxReceipt.Status) - expectedOutput, err := warp.PackGetVerifiedWarpMessageOutput(warp.GetVerifiedWarpMessageOutput{ - Message: warp.WarpMessage{ + expectedOutput, err := warpcontract.PackGetVerifiedWarpMessageOutput(warpcontract.GetVerifiedWarpMessageOutput{ + Message: warpcontract.WarpMessage{ SourceChainID: common.Hash(sourceChainID), OriginSenderAddress: testEthAddrs[0], Payload: payloadData, @@ -729,8 +730,10 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { // Add the known message and get its signature to confirm. err = vm.warpBackend.AddMessage(warpMessage) require.NoError(t, err) - signature, err := vm.warpBackend.GetMessageSignature(warpMessage.ID()) + signature, err := vm.warpBackend.GetMessageSignature(context.TODO(), warpMessage) require.NoError(t, err) + var knownSignature [bls.SignatureLen]byte + copy(knownSignature[:], signature) tests := map[string]struct { messageID ids.ID @@ -738,7 +741,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { }{ "known": { messageID: warpMessage.ID(), - expectedResponse: signature, + expectedResponse: knownSignature, }, "unknown": { messageID: ids.GenerateTestID(), @@ -785,8 +788,10 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { lastAcceptedID, err := vm.LastAccepted(context.Background()) require.NoError(t, err) - signature, err := vm.warpBackend.GetBlockSignature(lastAcceptedID) + signature, err := vm.warpBackend.GetBlockSignature(context.TODO(), lastAcceptedID) require.NoError(t, err) + var knownSignature [bls.SignatureLen]byte + copy(knownSignature[:], signature) tests := map[string]struct { blockID ids.ID @@ -794,7 +799,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { }{ "known": { blockID: lastAcceptedID, - expectedResponse: signature, + expectedResponse: knownSignature, }, "unknown": { blockID: ids.GenerateTestID(), @@ -829,3 +834,61 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { }) } } + +func TestClearWarpDB(t *testing.T) { + ctx, db, genesisBytes, issuer, _ := setupGenesis(t, genesisJSONLatest) + vm := &VM{} + err := vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + require.NoError(t, err) + + // use multiple messages to test that all messages get cleared + payloads := [][]byte{[]byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), []byte("test5")} + messages := []*avalancheWarp.UnsignedMessage{} + + // add all messages + for _, payload := range payloads { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage(vm.ctx.NetworkID, vm.ctx.ChainID, payload) + require.NoError(t, err) + err = vm.warpBackend.AddMessage(unsignedMsg) + require.NoError(t, err) + // ensure that the message was added + _, err = vm.warpBackend.GetMessageSignature(context.TODO(), unsignedMsg) + require.NoError(t, err) + messages = append(messages, unsignedMsg) + } + + require.NoError(t, vm.Shutdown(context.Background())) + + // Restart VM with the same database default should not prune the warp db + vm = &VM{} + // we need new context since the previous one has registered metrics. + ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) + err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + require.NoError(t, err) + + // check messages are still present + for _, message := range messages { + bytes, err := vm.warpBackend.GetMessageSignature(context.TODO(), message) + require.NoError(t, err) + require.NotEmpty(t, bytes) + } + + require.NoError(t, vm.Shutdown(context.Background())) + + // restart the VM with pruning enabled + vm = &VM{} + config := `{"prune-warp-db-enabled": true}` + ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) + err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte(config), issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + require.NoError(t, err) + + it := vm.warpDB.NewIterator() + require.False(t, it.Next()) + it.Release() + + // ensure all messages have been deleted + for _, message := range messages { + _, err := vm.warpBackend.GetMessageSignature(context.TODO(), message) + require.ErrorIs(t, err, &commonEng.AppError{Code: warp.ParseErrCode}) + } +} diff --git a/precompile/contract/interfaces.go b/precompile/contract/interfaces.go index 2bcaf1f948..7de1423a02 100644 --- a/precompile/contract/interfaces.go +++ b/precompile/contract/interfaces.go @@ -61,7 +61,7 @@ type AccessibleState interface { GetBlockContext() BlockContext GetSnowContext() *snow.Context GetChainConfig() precompileconfig.ChainConfig - Call(addr common.Address, input []byte, gas uint64, value *uint256.Int, _ ...vm.CallOption) (ret []byte, gasRemaining uint64, _ error) + Call(addr common.Address, input []byte, gas uint64, value *uint256.Int, opts ...vm.CallOption) (ret []byte, gasRemaining uint64, _ error) } // ConfigurationBlockContext defines the interface required to configure a precompile. @@ -72,7 +72,7 @@ type ConfigurationBlockContext interface { type BlockContext interface { ConfigurationBlockContext - // GetPredicateResults returns a the result of verifying the predicates of the + // GetPredicateResults returns the result of verifying the predicates of the // given transaction, precompile address pair as a byte array. GetPredicateResults(txHash common.Hash, precompileAddress common.Address) []byte } diff --git a/precompile/contract/mocks.go b/precompile/contract/mocks.go index 27a4cfaa41..6a83011623 100644 --- a/precompile/contract/mocks.go +++ b/precompile/contract/mocks.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/coreth/precompile/contract (interfaces: BlockContext,AccessibleState,StateDB) +// Source: interfaces.go // // Generated by this command: // -// mockgen -package=contract -destination=precompile/contract/mocks.go github.com/ava-labs/coreth/precompile/contract BlockContext,AccessibleState,StateDB +// mockgen -package=contract -source=interfaces.go -destination=mocks.go -exclude_interfaces StatefulPrecompiledContract,StateReader,ConfigurationBlockContext,Configurator // // Package contract is a generated GoMock package. @@ -16,181 +16,16 @@ import ( snow "github.com/ava-labs/avalanchego/snow" precompileconfig "github.com/ava-labs/coreth/precompile/precompileconfig" common "github.com/ava-labs/libevm/common" - types "github.com/ava-labs/libevm/core/types" vm "github.com/ava-labs/libevm/core/vm" uint256 "github.com/holiman/uint256" gomock "go.uber.org/mock/gomock" ) -// MockBlockContext is a mock of BlockContext interface. -type MockBlockContext struct { - ctrl *gomock.Controller - recorder *MockBlockContextMockRecorder -} - -// MockBlockContextMockRecorder is the mock recorder for MockBlockContext. -type MockBlockContextMockRecorder struct { - mock *MockBlockContext -} - -// NewMockBlockContext creates a new mock instance. -func NewMockBlockContext(ctrl *gomock.Controller) *MockBlockContext { - mock := &MockBlockContext{ctrl: ctrl} - mock.recorder = &MockBlockContextMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBlockContext) EXPECT() *MockBlockContextMockRecorder { - return m.recorder -} - -// GetPredicateResults mocks base method. -func (m *MockBlockContext) GetPredicateResults(arg0 common.Hash, arg1 common.Address) []byte { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPredicateResults", arg0, arg1) - ret0, _ := ret[0].([]byte) - return ret0 -} - -// GetPredicateResults indicates an expected call of GetPredicateResults. -func (mr *MockBlockContextMockRecorder) GetPredicateResults(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateResults", reflect.TypeOf((*MockBlockContext)(nil).GetPredicateResults), arg0, arg1) -} - -// Number mocks base method. -func (m *MockBlockContext) Number() *big.Int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Number") - ret0, _ := ret[0].(*big.Int) - return ret0 -} - -// Number indicates an expected call of Number. -func (mr *MockBlockContextMockRecorder) Number() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Number", reflect.TypeOf((*MockBlockContext)(nil).Number)) -} - -// Timestamp mocks base method. -func (m *MockBlockContext) Timestamp() uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Timestamp") - ret0, _ := ret[0].(uint64) - return ret0 -} - -// Timestamp indicates an expected call of Timestamp. -func (mr *MockBlockContextMockRecorder) Timestamp() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockBlockContext)(nil).Timestamp)) -} - -// MockAccessibleState is a mock of AccessibleState interface. -type MockAccessibleState struct { - ctrl *gomock.Controller - recorder *MockAccessibleStateMockRecorder -} - -// MockAccessibleStateMockRecorder is the mock recorder for MockAccessibleState. -type MockAccessibleStateMockRecorder struct { - mock *MockAccessibleState -} - -// NewMockAccessibleState creates a new mock instance. -func NewMockAccessibleState(ctrl *gomock.Controller) *MockAccessibleState { - mock := &MockAccessibleState{ctrl: ctrl} - mock.recorder = &MockAccessibleStateMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockAccessibleState) EXPECT() *MockAccessibleStateMockRecorder { - return m.recorder -} - -// Call mocks base method. -func (m *MockAccessibleState) Call(arg0 common.Address, arg1 []byte, arg2 uint64, arg3 *uint256.Int, arg4 ...vm.CallOption) ([]byte, uint64, error) { - m.ctrl.T.Helper() - varargs := []any{arg0, arg1, arg2, arg3} - for _, a := range arg4 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Call", varargs...) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(uint64) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// Call indicates an expected call of Call. -func (mr *MockAccessibleStateMockRecorder) Call(arg0, arg1, arg2, arg3 any, arg4 ...any) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]any{arg0, arg1, arg2, arg3}, arg4...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Call", reflect.TypeOf((*MockAccessibleState)(nil).Call), varargs...) -} - -// GetBlockContext mocks base method. -func (m *MockAccessibleState) GetBlockContext() BlockContext { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlockContext") - ret0, _ := ret[0].(BlockContext) - return ret0 -} - -// GetBlockContext indicates an expected call of GetBlockContext. -func (mr *MockAccessibleStateMockRecorder) GetBlockContext() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockContext", reflect.TypeOf((*MockAccessibleState)(nil).GetBlockContext)) -} - -// GetChainConfig mocks base method. -func (m *MockAccessibleState) GetChainConfig() precompileconfig.ChainConfig { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChainConfig") - ret0, _ := ret[0].(precompileconfig.ChainConfig) - return ret0 -} - -// GetChainConfig indicates an expected call of GetChainConfig. -func (mr *MockAccessibleStateMockRecorder) GetChainConfig() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChainConfig", reflect.TypeOf((*MockAccessibleState)(nil).GetChainConfig)) -} - -// GetSnowContext mocks base method. -func (m *MockAccessibleState) GetSnowContext() *snow.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSnowContext") - ret0, _ := ret[0].(*snow.Context) - return ret0 -} - -// GetSnowContext indicates an expected call of GetSnowContext. -func (mr *MockAccessibleStateMockRecorder) GetSnowContext() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnowContext", reflect.TypeOf((*MockAccessibleState)(nil).GetSnowContext)) -} - -// GetStateDB mocks base method. -func (m *MockAccessibleState) GetStateDB() StateDB { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStateDB") - ret0, _ := ret[0].(StateDB) - return ret0 -} - -// GetStateDB indicates an expected call of GetStateDB. -func (mr *MockAccessibleStateMockRecorder) GetStateDB() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateDB", reflect.TypeOf((*MockAccessibleState)(nil).GetStateDB)) -} - // MockStateDB is a mock of StateDB interface. type MockStateDB struct { ctrl *gomock.Controller recorder *MockStateDBMockRecorder + isgomock struct{} } // MockStateDBMockRecorder is the mock recorder for MockStateDB. @@ -235,7 +70,7 @@ func (mr *MockStateDBMockRecorder) AddBalanceMultiCoin(arg0, arg1, arg2 any) *go } // AddLog mocks base method. -func (m *MockStateDB) AddLog(arg0 *types.Log) { +func (m *MockStateDB) AddLog(arg0 *Log) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddLog", arg0) } @@ -330,18 +165,18 @@ func (mr *MockStateDBMockRecorder) GetNonce(arg0 any) *gomock.Call { } // GetPredicateStorageSlots mocks base method. -func (m *MockStateDB) GetPredicateStorageSlots(arg0 common.Address, arg1 int) ([]byte, bool) { +func (m *MockStateDB) GetPredicateStorageSlots(address common.Address, index int) ([]byte, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPredicateStorageSlots", arg0, arg1) + ret := m.ctrl.Call(m, "GetPredicateStorageSlots", address, index) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(bool) return ret0, ret1 } // GetPredicateStorageSlots indicates an expected call of GetPredicateStorageSlots. -func (mr *MockStateDBMockRecorder) GetPredicateStorageSlots(arg0, arg1 any) *gomock.Call { +func (mr *MockStateDBMockRecorder) GetPredicateStorageSlots(address, index any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).GetPredicateStorageSlots), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).GetPredicateStorageSlots), address, index) } // GetState mocks base method. @@ -397,15 +232,15 @@ func (mr *MockStateDBMockRecorder) SetNonce(arg0, arg1 any) *gomock.Call { } // SetPredicateStorageSlots mocks base method. -func (m *MockStateDB) SetPredicateStorageSlots(arg0 common.Address, arg1 [][]byte) { +func (m *MockStateDB) SetPredicateStorageSlots(address common.Address, predicates [][]byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetPredicateStorageSlots", arg0, arg1) + m.ctrl.Call(m, "SetPredicateStorageSlots", address, predicates) } // SetPredicateStorageSlots indicates an expected call of SetPredicateStorageSlots. -func (mr *MockStateDBMockRecorder) SetPredicateStorageSlots(arg0, arg1 any) *gomock.Call { +func (mr *MockStateDBMockRecorder) SetPredicateStorageSlots(address, predicates any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).SetPredicateStorageSlots), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).SetPredicateStorageSlots), address, predicates) } // SetState mocks base method. @@ -445,3 +280,170 @@ func (mr *MockStateDBMockRecorder) SubBalanceMultiCoin(arg0, arg1, arg2 any) *go mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubBalanceMultiCoin", reflect.TypeOf((*MockStateDB)(nil).SubBalanceMultiCoin), arg0, arg1, arg2) } + +// MockAccessibleState is a mock of AccessibleState interface. +type MockAccessibleState struct { + ctrl *gomock.Controller + recorder *MockAccessibleStateMockRecorder + isgomock struct{} +} + +// MockAccessibleStateMockRecorder is the mock recorder for MockAccessibleState. +type MockAccessibleStateMockRecorder struct { + mock *MockAccessibleState +} + +// NewMockAccessibleState creates a new mock instance. +func NewMockAccessibleState(ctrl *gomock.Controller) *MockAccessibleState { + mock := &MockAccessibleState{ctrl: ctrl} + mock.recorder = &MockAccessibleStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAccessibleState) EXPECT() *MockAccessibleStateMockRecorder { + return m.recorder +} + +// Call mocks base method. +func (m *MockAccessibleState) Call(addr common.Address, input []byte, gas uint64, value *uint256.Int, opts ...vm.CallOption) ([]byte, uint64, error) { + m.ctrl.T.Helper() + varargs := []any{addr, input, gas, value} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Call", varargs...) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(uint64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Call indicates an expected call of Call. +func (mr *MockAccessibleStateMockRecorder) Call(addr, input, gas, value any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{addr, input, gas, value}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Call", reflect.TypeOf((*MockAccessibleState)(nil).Call), varargs...) +} + +// GetBlockContext mocks base method. +func (m *MockAccessibleState) GetBlockContext() BlockContext { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockContext") + ret0, _ := ret[0].(BlockContext) + return ret0 +} + +// GetBlockContext indicates an expected call of GetBlockContext. +func (mr *MockAccessibleStateMockRecorder) GetBlockContext() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockContext", reflect.TypeOf((*MockAccessibleState)(nil).GetBlockContext)) +} + +// GetChainConfig mocks base method. +func (m *MockAccessibleState) GetChainConfig() precompileconfig.ChainConfig { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChainConfig") + ret0, _ := ret[0].(precompileconfig.ChainConfig) + return ret0 +} + +// GetChainConfig indicates an expected call of GetChainConfig. +func (mr *MockAccessibleStateMockRecorder) GetChainConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChainConfig", reflect.TypeOf((*MockAccessibleState)(nil).GetChainConfig)) +} + +// GetSnowContext mocks base method. +func (m *MockAccessibleState) GetSnowContext() *snow.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnowContext") + ret0, _ := ret[0].(*snow.Context) + return ret0 +} + +// GetSnowContext indicates an expected call of GetSnowContext. +func (mr *MockAccessibleStateMockRecorder) GetSnowContext() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnowContext", reflect.TypeOf((*MockAccessibleState)(nil).GetSnowContext)) +} + +// GetStateDB mocks base method. +func (m *MockAccessibleState) GetStateDB() StateDB { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStateDB") + ret0, _ := ret[0].(StateDB) + return ret0 +} + +// GetStateDB indicates an expected call of GetStateDB. +func (mr *MockAccessibleStateMockRecorder) GetStateDB() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateDB", reflect.TypeOf((*MockAccessibleState)(nil).GetStateDB)) +} + +// MockBlockContext is a mock of BlockContext interface. +type MockBlockContext struct { + ctrl *gomock.Controller + recorder *MockBlockContextMockRecorder + isgomock struct{} +} + +// MockBlockContextMockRecorder is the mock recorder for MockBlockContext. +type MockBlockContextMockRecorder struct { + mock *MockBlockContext +} + +// NewMockBlockContext creates a new mock instance. +func NewMockBlockContext(ctrl *gomock.Controller) *MockBlockContext { + mock := &MockBlockContext{ctrl: ctrl} + mock.recorder = &MockBlockContextMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockContext) EXPECT() *MockBlockContextMockRecorder { + return m.recorder +} + +// GetPredicateResults mocks base method. +func (m *MockBlockContext) GetPredicateResults(txHash common.Hash, precompileAddress common.Address) []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPredicateResults", txHash, precompileAddress) + ret0, _ := ret[0].([]byte) + return ret0 +} + +// GetPredicateResults indicates an expected call of GetPredicateResults. +func (mr *MockBlockContextMockRecorder) GetPredicateResults(txHash, precompileAddress any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateResults", reflect.TypeOf((*MockBlockContext)(nil).GetPredicateResults), txHash, precompileAddress) +} + +// Number mocks base method. +func (m *MockBlockContext) Number() *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Number") + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// Number indicates an expected call of Number. +func (mr *MockBlockContextMockRecorder) Number() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Number", reflect.TypeOf((*MockBlockContext)(nil).Number)) +} + +// Timestamp mocks base method. +func (m *MockBlockContext) Timestamp() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Timestamp") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Timestamp indicates an expected call of Timestamp. +func (mr *MockBlockContextMockRecorder) Timestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockBlockContext)(nil).Timestamp)) +} diff --git a/precompile/contract/mocks_generate_test.go b/precompile/contract/mocks_generate_test.go new file mode 100644 index 0000000000..dbb1f56115 --- /dev/null +++ b/precompile/contract/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package contract + +//go:generate go run go.uber.org/mock/mockgen -package=$GOPACKAGE -source=interfaces.go -destination=mocks.go -exclude_interfaces StatefulPrecompiledContract,StateReader,ConfigurationBlockContext,Configurator diff --git a/precompile/contracts/warp/README.md b/precompile/contracts/warp/README.md index 10e1daaa38..73ca165224 100644 --- a/precompile/contracts/warp/README.md +++ b/precompile/contracts/warp/README.md @@ -57,7 +57,7 @@ To use this function, the transaction must include the signed Avalanche Warp Mes This leads to the following advantages: 1. The EVM execution does not need to verify the Warp Message at runtime (no signature verification or external calls to the P-Chain) -2. The EVM can deterministically re-execute and re-verify blocks assuming the predicate was verified by the network (eg., in bootstrapping) +2. The EVM can deterministically re-execute and re-verify blocks assuming the predicate was verified by the network (e.g., in bootstrapping) This pre-verification is performed using the ProposerVM Block header during [block verification](../../../plugin/evm/block.go#L220) and [block building](../../../miner/worker.go#L200). diff --git a/precompile/contracts/warp/predicate_test.go b/precompile/contracts/warp/predicate_test.go index 04ddd46705..a5fe8265b7 100644 --- a/precompile/contracts/warp/predicate_test.go +++ b/precompile/contracts/warp/predicate_test.go @@ -93,7 +93,7 @@ func init() { } for _, testVdr := range testVdrs { - blsSignature := bls.Sign(testVdr.sk, unsignedMsg.Bytes()) + blsSignature := testVdr.sk.Sign(unsignedMsg.Bytes()) blsSignatures = append(blsSignatures, blsSignature) } @@ -102,7 +102,7 @@ func init() { type testValidator struct { nodeID ids.NodeID - sk *bls.SecretKey + sk bls.Signer vdr *avalancheWarp.Validator } @@ -111,13 +111,13 @@ func (v *testValidator) Compare(o *testValidator) int { } func newTestValidator() *testValidator { - sk, err := bls.NewSecretKey() + sk, err := bls.NewSigner() if err != nil { panic(err) } nodeID := ids.GenerateTestNodeID() - pk := bls.PublicFromSecretKey(sk) + pk := sk.PublicKey() return &testValidator{ nodeID: nodeID, sk: sk, @@ -240,7 +240,7 @@ func testWarpMessageFromPrimaryNetwork(t *testing.T, requirePrimaryNetworkSigner PublicKey: testVdrs[i].vdr.PublicKey, } getValidatorsOutput[testVdrs[i].nodeID] = validatorOutput - blsSignatures = append(blsSignatures, bls.Sign(testVdrs[i].sk, unsignedMsg.Bytes())) + blsSignatures = append(blsSignatures, testVdrs[i].sk.Sign(unsignedMsg.Bytes())) } aggregateSignature, err := bls.AggregateSignatures(blsSignatures) require.NoError(err) diff --git a/precompile/contracts/warp/signature_verification_test.go b/precompile/contracts/warp/signature_verification_test.go index 5b54cd29b8..d52f0a0f89 100644 --- a/precompile/contracts/warp/signature_verification_test.go +++ b/precompile/contracts/warp/signature_verification_test.go @@ -212,8 +212,8 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) - vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) + vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -284,7 +284,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) aggSigBytes := [bls.SignatureLen]byte{} copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) @@ -323,10 +323,10 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) // Give sig from vdr[2] even though the bit vector says it // should be from vdr[1] - vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -367,7 +367,7 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) // Don't give the sig from vdr[1] aggSigBytes := [bls.SignatureLen]byte{} copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) @@ -407,11 +407,11 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) - vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) + vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) // Give sig from vdr[2] even though the bit vector doesn't have // it - vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -454,8 +454,8 @@ func TestSignatureVerification(t *testing.T) { signers.Add(2) unsignedBytes := unsignedMsg.Bytes() - vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) - vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) + vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -498,8 +498,8 @@ func TestSignatureVerification(t *testing.T) { signers.Add(2) unsignedBytes := unsignedMsg.Bytes() - vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) - vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) + vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -559,8 +559,8 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) // vdr[2] unsignedBytes := unsignedMsg.Bytes() - vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) - vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) + vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -621,7 +621,7 @@ func TestSignatureVerification(t *testing.T) { unsignedBytes := unsignedMsg.Bytes() // Because vdr[1] and vdr[2] share a key, only one of them sign. - vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) aggSigBytes := [bls.SignatureLen]byte{} copy(aggSigBytes[:], bls.SignatureToBytes(vdr2Sig)) diff --git a/precompile/precompileconfig/config.go b/precompile/precompileconfig/config.go index 7853fac42d..7dd027aadc 100644 --- a/precompile/precompileconfig/config.go +++ b/precompile/precompileconfig/config.go @@ -5,8 +5,6 @@ package precompileconfig import ( - "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/vms/platformvm/warp" @@ -53,21 +51,14 @@ type Predicater interface { VerifyPredicate(predicateContext *PredicateContext, predicateBytes []byte) error } -// SharedMemoryWriter defines an interface to allow a precompile's Accepter to write operations -// into shared memory to be committed atomically on block accept. -type SharedMemoryWriter interface { - AddSharedMemoryRequests(chainID ids.ID, requests *atomic.Requests) -} - type WarpMessageWriter interface { AddMessage(unsignedMessage *warp.UnsignedMessage) error } // AcceptContext defines the context passed in to a precompileconfig's Accepter type AcceptContext struct { - SnowCtx *snow.Context - SharedMemory SharedMemoryWriter - Warp WarpMessageWriter + SnowCtx *snow.Context + Warp WarpMessageWriter } // Accepter is an optional interface for StatefulPrecompiledContracts to implement. diff --git a/precompile/precompileconfig/mocks.go b/precompile/precompileconfig/mocks.go index 764e6980c5..6843eaa6b4 100644 --- a/precompile/precompileconfig/mocks.go +++ b/precompile/precompileconfig/mocks.go @@ -3,7 +3,7 @@ // // Generated by this command: // -// mockgen -package=precompileconfig -destination=precompile/precompileconfig/mocks.go github.com/ava-labs/coreth/precompile/precompileconfig Predicater,Config,ChainConfig,Accepter +// mockgen -package=precompileconfig -destination=mocks.go . Predicater,Config,ChainConfig,Accepter // // Package precompileconfig is a generated GoMock package. @@ -20,6 +20,7 @@ import ( type MockPredicater struct { ctrl *gomock.Controller recorder *MockPredicaterMockRecorder + isgomock struct{} } // MockPredicaterMockRecorder is the mock recorder for MockPredicater. @@ -40,38 +41,39 @@ func (m *MockPredicater) EXPECT() *MockPredicaterMockRecorder { } // PredicateGas mocks base method. -func (m *MockPredicater) PredicateGas(arg0 []byte) (uint64, error) { +func (m *MockPredicater) PredicateGas(predicateBytes []byte) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PredicateGas", arg0) + ret := m.ctrl.Call(m, "PredicateGas", predicateBytes) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // PredicateGas indicates an expected call of PredicateGas. -func (mr *MockPredicaterMockRecorder) PredicateGas(arg0 any) *gomock.Call { +func (mr *MockPredicaterMockRecorder) PredicateGas(predicateBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PredicateGas", reflect.TypeOf((*MockPredicater)(nil).PredicateGas), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PredicateGas", reflect.TypeOf((*MockPredicater)(nil).PredicateGas), predicateBytes) } // VerifyPredicate mocks base method. -func (m *MockPredicater) VerifyPredicate(arg0 *PredicateContext, arg1 []byte) error { +func (m *MockPredicater) VerifyPredicate(predicateContext *PredicateContext, predicateBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyPredicate", arg0, arg1) + ret := m.ctrl.Call(m, "VerifyPredicate", predicateContext, predicateBytes) ret0, _ := ret[0].(error) return ret0 } // VerifyPredicate indicates an expected call of VerifyPredicate. -func (mr *MockPredicaterMockRecorder) VerifyPredicate(arg0, arg1 any) *gomock.Call { +func (mr *MockPredicaterMockRecorder) VerifyPredicate(predicateContext, predicateBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPredicate", reflect.TypeOf((*MockPredicater)(nil).VerifyPredicate), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPredicate", reflect.TypeOf((*MockPredicater)(nil).VerifyPredicate), predicateContext, predicateBytes) } // MockConfig is a mock of Config interface. type MockConfig struct { ctrl *gomock.Controller recorder *MockConfigMockRecorder + isgomock struct{} } // MockConfigMockRecorder is the mock recorder for MockConfig. @@ -165,6 +167,7 @@ func (mr *MockConfigMockRecorder) Verify(arg0 any) *gomock.Call { type MockChainConfig struct { ctrl *gomock.Controller recorder *MockChainConfigMockRecorder + isgomock struct{} } // MockChainConfigMockRecorder is the mock recorder for MockChainConfig. @@ -185,23 +188,24 @@ func (m *MockChainConfig) EXPECT() *MockChainConfigMockRecorder { } // IsDurango mocks base method. -func (m *MockChainConfig) IsDurango(arg0 uint64) bool { +func (m *MockChainConfig) IsDurango(time uint64) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsDurango", arg0) + ret := m.ctrl.Call(m, "IsDurango", time) ret0, _ := ret[0].(bool) return ret0 } // IsDurango indicates an expected call of IsDurango. -func (mr *MockChainConfigMockRecorder) IsDurango(arg0 any) *gomock.Call { +func (mr *MockChainConfigMockRecorder) IsDurango(time any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDurango", reflect.TypeOf((*MockChainConfig)(nil).IsDurango), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDurango", reflect.TypeOf((*MockChainConfig)(nil).IsDurango), time) } // MockAccepter is a mock of Accepter interface. type MockAccepter struct { ctrl *gomock.Controller recorder *MockAccepterMockRecorder + isgomock struct{} } // MockAccepterMockRecorder is the mock recorder for MockAccepter. @@ -222,15 +226,15 @@ func (m *MockAccepter) EXPECT() *MockAccepterMockRecorder { } // Accept mocks base method. -func (m *MockAccepter) Accept(arg0 *AcceptContext, arg1 common.Hash, arg2 uint64, arg3 common.Hash, arg4 int, arg5 []common.Hash, arg6 []byte) error { +func (m *MockAccepter) Accept(acceptCtx *AcceptContext, blockHash common.Hash, blockNumber uint64, txHash common.Hash, logIndex int, topics []common.Hash, logData []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Accept", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret := m.ctrl.Call(m, "Accept", acceptCtx, blockHash, blockNumber, txHash, logIndex, topics, logData) ret0, _ := ret[0].(error) return ret0 } // Accept indicates an expected call of Accept. -func (mr *MockAccepterMockRecorder) Accept(arg0, arg1, arg2, arg3, arg4, arg5, arg6 any) *gomock.Call { +func (mr *MockAccepterMockRecorder) Accept(acceptCtx, blockHash, blockNumber, txHash, logIndex, topics, logData any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockAccepter)(nil).Accept), arg0, arg1, arg2, arg3, arg4, arg5, arg6) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockAccepter)(nil).Accept), acceptCtx, blockHash, blockNumber, txHash, logIndex, topics, logData) } diff --git a/precompile/precompileconfig/mocks_generate_test.go b/precompile/precompileconfig/mocks_generate_test.go new file mode 100644 index 0000000000..9ca4ea38df --- /dev/null +++ b/precompile/precompileconfig/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package precompileconfig + +//go:generate go run go.uber.org/mock/mockgen -package=$GOPACKAGE -destination=mocks.go . Predicater,Config,ChainConfig,Accepter diff --git a/rpc/handler.go b/rpc/handler.go index eba91eb990..427af29cd7 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -356,7 +356,7 @@ func (h *handler) addRequestOp(op *requestOp) { } } -// removeRequestOps stops waiting for the given request IDs. +// removeRequestOp stops waiting for the given request IDs. func (h *handler) removeRequestOp(op *requestOp) { for _, id := range op.ids { delete(h.respWait, string(id)) @@ -479,7 +479,7 @@ func (h *handler) startCallProc(fn func(*callProc)) { } } -// handleResponse processes method call responses. +// handleResponses processes method call responses. func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) { var resolvedops []*requestOp handleResp := func(msg *jsonrpcMessage) { diff --git a/rpc/types.go b/rpc/types.go index 49da20455f..38123bb430 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -62,7 +62,7 @@ type ServerCodec interface { type jsonWriter interface { // writeJSON writes a message to the connection. writeJSON(ctx context.Context, msg interface{}, isError bool) error - // writeJSON writes a message to the connection with the option of skipping the deadline. + // writeJSONSkipDeadline writes a message to the connection with the option of skipping the deadline. writeJSONSkipDeadline(ctx context.Context, msg interface{}, isError bool, skip bool) error // Closed returns a channel which is closed when the connection is closed. closed() <-chan interface{} diff --git a/scripts/build_docker_image.sh b/scripts/build_docker_image.sh deleted file mode 100755 index 9126a47a8d..0000000000 --- a/scripts/build_docker_image.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Avalanche root directory -CORETH_PATH=$( - cd "$(dirname "${BASH_SOURCE[0]}")" - cd .. && pwd -) - -# Load the constants -source "$CORETH_PATH"/scripts/constants.sh - -# Load the versions -source "$CORETH_PATH"/scripts/versions.sh - -# WARNING: this will use the most recent commit even if there are un-committed changes present -BUILD_IMAGE_ID=${BUILD_IMAGE_ID:-"${CURRENT_BRANCH}"} -echo "Building Docker Image: $DOCKERHUB_REPO:$BUILD_IMAGE_ID based of AvalancheGo@$AVALANCHE_VERSION" -docker build -t "$DOCKERHUB_REPO:$BUILD_IMAGE_ID" "$CORETH_PATH" -f "$CORETH_PATH/Dockerfile" \ - --build-arg AVALANCHE_VERSION="$AVALANCHE_VERSION" \ - --build-arg CORETH_COMMIT="$CORETH_COMMIT" \ - --build-arg CURRENT_BRANCH="$CURRENT_BRANCH" diff --git a/scripts/build_test.sh b/scripts/build_test.sh index 460ebcd564..c3fb241349 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -41,7 +41,7 @@ do unexpected_failures=$( # First grep pattern corresponds to test failures, second pattern corresponds to test panics due to timeouts (grep "^--- FAIL" test.out | awk '{print $3}' || grep -E '^\s+Test.+ \(' test.out | awk '{print $1}') | - sort -u | comm -23 - ./scripts/known_flakes.txt + sort -u | comm -23 - <(sed 's/\r$//' ./scripts/known_flakes.txt) ) if [ -n "${unexpected_failures}" ]; then echo "Unexpected test failures: ${unexpected_failures}" diff --git a/scripts/eth-allowed-packages.txt b/scripts/eth-allowed-packages.txt index 6f69e52bcd..8412e8d94e 100644 --- a/scripts/eth-allowed-packages.txt +++ b/scripts/eth-allowed-packages.txt @@ -27,6 +27,7 @@ "github.com/ava-labs/libevm/libevm" "github.com/ava-labs/libevm/libevm/stateconf" "github.com/ava-labs/libevm/log" +"github.com/ava-labs/libevm/p2p/enode" "github.com/ava-labs/libevm/params" "github.com/ava-labs/libevm/rlp" "github.com/ava-labs/libevm/trie" diff --git a/scripts/known_flakes.txt b/scripts/known_flakes.txt index 05c41794ba..0f324a6714 100644 --- a/scripts/known_flakes.txt +++ b/scripts/known_flakes.txt @@ -5,7 +5,9 @@ TestMempoolAtmTxsAppGossipHandlingDiscardedTx TestMempoolEthTxsAppGossipHandling TestResumeSyncAccountsTrieInterrupted TestResyncNewRootAfterDeletes +TestTimedUnlock TestTransactionSkipIndexing TestVMShutdownWhileSyncing +TestWaitDeployedCornerCases TestWalletNotifications TestWebsocketLargeRead \ No newline at end of file diff --git a/scripts/lint_allowed_eth_imports.sh b/scripts/lint_allowed_eth_imports.sh index fcbfe7c6f4..be4af2bdf9 100755 --- a/scripts/lint_allowed_eth_imports.sh +++ b/scripts/lint_allowed_eth_imports.sh @@ -11,7 +11,7 @@ set -o pipefail # 4. Print out the difference between the search results and the list of specified allowed package imports from libevm. libevm_regexp='"github.com/ava-labs/libevm/.*"' allow_named_imports='eth\w\+ "' -extra_imports=$(grep -r --include='*.go' --exclude=mocks.go "${libevm_regexp}" -h | grep -v "${allow_named_imports}" | grep -o "${libevm_regexp}" | sort -u | comm -23 - ./scripts/eth-allowed-packages.txt) +extra_imports=$(find . -type f -name '*.go' ! -path './eth/protocols/snap/*' ! -name 'mocks.go' -print0 | xargs -0 grep "${libevm_regexp}" -h | grep -v "${allow_named_imports}" | grep -o "${libevm_regexp}" | sort -u | comm -23 - ./scripts/eth-allowed-packages.txt) if [ -n "${extra_imports}" ]; then echo "new ethereum imports should be added to ./scripts/eth-allowed-packages.txt to prevent accidental imports:" echo "${extra_imports}" diff --git a/scripts/mock.gen.sh b/scripts/mock.gen.sh deleted file mode 100755 index 87465d43a9..0000000000 --- a/scripts/mock.gen.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Root directory -CORETH_PATH=$( - cd "$(dirname "${BASH_SOURCE[0]}")" - cd .. && pwd -) - -if ! [[ "$0" =~ scripts/mock.gen.sh ]]; then - echo "must be run from repository root" - exit 255 -fi - -# https://github.com/uber-go/mock -go install -v go.uber.org/mock/mockgen@v0.4.0 - -if ! command -v go-license &>/dev/null; then - echo "go-license not found, installing..." - # https://github.com/palantir/go-license - go install -v github.com/palantir/go-license@v1.25.0 -fi - -# Load the versions -source "$CORETH_PATH"/scripts/versions.sh - -# Load the constants -source "$CORETH_PATH"/scripts/constants.sh - -# tuples of (source interface import path, comma-separated interface names, output file path) -input="scripts/mocks.mockgen.txt" -while IFS= read -r line; do - IFS='=' read -r src_import_path interface_name output_path <<<"${line}" - package_name=$(basename "$(dirname "$output_path")") - echo "Generating ${output_path}..." - mockgen -package="${package_name}" -destination="${output_path}" "${src_import_path}" "${interface_name}" - - go-license \ - --config=./header.yml \ - "${output_path}" -done <"$input" - -echo "SUCCESS" diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt deleted file mode 100644 index 694343e40e..0000000000 --- a/scripts/mocks.mockgen.txt +++ /dev/null @@ -1,2 +0,0 @@ -github.com/ava-labs/coreth/precompile/precompileconfig=Predicater,Config,ChainConfig,Accepter=precompile/precompileconfig/mocks.go -github.com/ava-labs/coreth/precompile/contract=BlockContext,AccessibleState,StateDB=precompile/contract/mocks.go diff --git a/scripts/versions.sh b/scripts/versions.sh index fc1d045b95..5eed0c55ef 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'0a4ba1bb'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'32f58b4f'} diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go index c9df10ef10..1b628a8327 100644 --- a/sync/handlers/leafs_request.go +++ b/sync/handlers/leafs_request.go @@ -185,6 +185,30 @@ type responseBuilder struct { stats stats.LeafsRequestHandlerStats } +func NewResponseBuilder( + request *message.LeafsRequest, + response *message.LeafsResponse, + t *trie.Trie, + snap *snapshot.Tree, + keyLength int, + limit uint16, + stats stats.LeafsRequestHandlerStats, +) *responseBuilder { + return &responseBuilder{ + request: request, + response: response, + t: t, + snap: snap, + keyLength: keyLength, + limit: limit, + stats: stats, + } +} + +func (rb *responseBuilder) HandleRequest(ctx context.Context) error { + return rb.handleRequest(ctx) +} + func (rb *responseBuilder) handleRequest(ctx context.Context) error { // Read from snapshot if a [snapshot.Tree] was provided in initialization if rb.snap != nil { @@ -223,6 +247,8 @@ func (rb *responseBuilder) handleRequest(ctx context.Context) error { rb.stats.IncProofError() return err } + rb.response.More = true // set to signal a proof is needed + return nil } @@ -271,6 +297,7 @@ func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { rb.stats.IncSnapshotReadSuccess() return true, nil } + rb.response.More = true // set to signal a proof is needed rb.response.ProofVals, err = iterateVals(proof) if err != nil { rb.stats.IncProofError() @@ -436,7 +463,7 @@ func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool, more := false for it.Next() { // if we're at the end, break this loop - if len(end) > 0 && bytes.Compare(it.Key, end) > 0 { + if len(rb.response.Keys) > 0 && len(end) > 0 && bytes.Compare(it.Key, end) > 0 { more = true break } @@ -487,7 +514,7 @@ func (rb *responseBuilder) readLeafsFromSnapshot(ctx context.Context) ([][]byte, defer snapIt.Release() for snapIt.Next() { // if we're at the end, break this loop - if len(rb.request.End) > 0 && bytes.Compare(snapIt.Key(), rb.request.End) > 0 { + if len(keys) > 0 && len(rb.request.End) > 0 && bytes.Compare(snapIt.Key(), rb.request.End) > 0 { break } // If we've returned enough data or run out of time, set the more flag and exit diff --git a/sync/statesync/state_syncer.go b/sync/statesync/state_syncer.go index 7031979208..d2f3159e27 100644 --- a/sync/statesync/state_syncer.go +++ b/sync/statesync/state_syncer.go @@ -132,7 +132,7 @@ func (t *stateSync) onStorageTrieFinished(root common.Hash) error { return nil } -// onMainTrieFinishes is called after the main trie finishes syncing. +// onMainTrieFinished is called after the main trie finishes syncing. func (t *stateSync) onMainTrieFinished() error { t.codeSyncer.notifyAccountTrieCompleted() diff --git a/sync/statesync/trie_sync_stats.go b/sync/statesync/trie_sync_stats.go index f12709ff8e..3bf124d55d 100644 --- a/sync/statesync/trie_sync_stats.go +++ b/sync/statesync/trie_sync_stats.go @@ -79,7 +79,7 @@ func (t *trieSyncStats) incLeafs(segment *trieSegment, count uint64, remaining u } } -// estimateSegmentsInProgressTime retrns the ETA for all trie segments +// estimateSegmentsInProgressTime returns the ETA for all trie segments // in progress to finish (uses the one with most remaining leafs to estimate). func (t *trieSyncStats) estimateSegmentsInProgressTime() time.Duration { if len(t.remainingLeafs) == 0 { diff --git a/tools.go b/tools.go new file mode 100644 index 0000000000..d7c65266dd --- /dev/null +++ b/tools.go @@ -0,0 +1,8 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package coreth + +import ( + _ "golang.org/x/tools/imports" // golang.org/x/tools to satisfy requirement for go.uber.org/mock/mockgen@v0.5 +) diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index 7005a44966..56beaa53ac 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -100,9 +100,9 @@ type cache interface { // Config contains the settings for database. type Config struct { - CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes - StatsPrefix string // Prefix for cache stats (disabled if empty) - ReferenceRoot bool // Whether to reference the root node on update + CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes + StatsPrefix string // Prefix for cache stats (disabled if empty) + ReferenceRootAtomicallyOnUpdate bool // Whether to reference the root node on update } func (c Config) BackendConstructor(diskdb ethdb.Database, config *triedb.Config) triedb.DBOverride { @@ -195,7 +195,7 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas resolver: resolver, cleans: cleans, dirties: make(map[common.Hash]*cachedNode), - referenceRoot: config.ReferenceRoot, + referenceRoot: config.ReferenceRootAtomicallyOnUpdate, } } @@ -645,7 +645,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool { // Update inserts the dirty nodes in provided nodeset into database and link the // account trie with multiple storage tries if necessary. -// If ReferenceRoot was enabled in the config, it will also add a reference from +// If ReferenceRootAtomicallyOnUpdate was enabled in the config, it will also add a reference from // the root to the metaroot while holding the db's lock. func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { // Ensure the parent state is present and signal a warning if not. diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index 37be932816..bf1934c97b 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -228,7 +228,11 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode dirties = make(map[common.Hash]struct{}) ) for i := 0; i < 20; i++ { - switch rand.Intn(opLen) { + op := createAccountOp + if i > 0 { + op = rand.Intn(opLen) + } + switch op { case createAccountOp: // account creation addr := testutil.RandomAddress() @@ -315,7 +319,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil) } -// lastRoot returns the latest root hash, or empty if nothing is cached. +// lastHash returns the latest root hash, or empty if nothing is cached. func (t *tester) lastHash() common.Hash { if len(t.roots) == 0 { return common.Hash{} diff --git a/utils/snow.go b/utils/snow.go index 77368f0750..36b9b7b7fb 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -4,30 +4,75 @@ package utils import ( + "context" + "errors" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +var ( + testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} + testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} + testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} ) func TestSnowContext() *snow.Context { - sk, err := bls.NewSecretKey() + sk, err := bls.NewSigner() if err != nil { panic(err) } - pk := bls.PublicFromSecretKey(sk) - return &snow.Context{ - NetworkID: 0, + pk := sk.PublicKey() + networkID := constants.UnitTestID + chainID := testChainID + + ctx := &snow.Context{ + NetworkID: networkID, SubnetID: ids.Empty, - ChainID: ids.Empty, - NodeID: ids.EmptyNodeID, + ChainID: chainID, + NodeID: ids.GenerateTestNodeID(), + XChainID: testXChainID, + CChainID: testCChainID, PublicKey: pk, + WarpSigner: warp.NewSigner(sk, networkID, chainID), Log: logging.NoLog{}, BCLookup: ids.NewAliaser(), Metrics: metrics.NewPrefixGatherer(), ChainDataDir: "", - ValidatorState: &validatorstest.State{}, + ValidatorState: NewTestValidatorState(), + } + + return ctx +} + +func NewTestValidatorState() *validatorstest.State { + return &validatorstest.State{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + testXChainID: constants.PrimaryNetworkID, + testCChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errors.New("unknown chain") + } + return subnetID, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{}, nil + }, + GetCurrentValidatorSetF: func(context.Context, ids.ID) (map[ids.ID]*validators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*validators.GetCurrentValidatorOutput{}, 0, nil + }, } } diff --git a/warp/aggregator/aggregator_test.go b/warp/aggregator/aggregator_test.go index a423f75b25..055d3edfa8 100644 --- a/warp/aggregator/aggregator_test.go +++ b/warp/aggregator/aggregator_test.go @@ -17,10 +17,10 @@ import ( avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) -func newValidator(t testing.TB, weight uint64) (*bls.SecretKey, *avalancheWarp.Validator) { - sk, err := bls.NewSecretKey() +func newValidator(t testing.TB, weight uint64) (bls.Signer, *avalancheWarp.Validator) { + sk, err := bls.NewSigner() require.NoError(t, err) - pk := bls.PublicFromSecretKey(sk) + pk := sk.PublicKey() return sk, &avalancheWarp.Validator{ PublicKey: pk, PublicKeyBytes: bls.PublicKeyToCompressedBytes(pk), @@ -43,17 +43,17 @@ func TestAggregateSignatures(t *testing.T) { vdr1sk, vdr1 := newValidator(t, vdrWeight) vdr2sk, vdr2 := newValidator(t, vdrWeight+1) vdr3sk, vdr3 := newValidator(t, vdrWeight-1) - sig1 := bls.Sign(vdr1sk, unsignedMsg.Bytes()) - sig2 := bls.Sign(vdr2sk, unsignedMsg.Bytes()) - sig3 := bls.Sign(vdr3sk, unsignedMsg.Bytes()) + sig1 := vdr1sk.Sign(unsignedMsg.Bytes()) + sig2 := vdr2sk.Sign(unsignedMsg.Bytes()) + sig3 := vdr3sk.Sign(unsignedMsg.Bytes()) vdrToSig := map[*avalancheWarp.Validator]*bls.Signature{ vdr1: sig1, vdr2: sig2, vdr3: sig3, } - nonVdrSk, err := bls.NewSecretKey() + nonVdrSk, err := bls.NewSigner() require.NoError(t, err) - nonVdrSig := bls.Sign(nonVdrSk, unsignedMsg.Bytes()) + nonVdrSig := nonVdrSk.Sign(unsignedMsg.Bytes()) vdrs := []*avalancheWarp.Validator{ { PublicKey: vdr1.PublicKey, @@ -230,7 +230,7 @@ func TestAggregateSignatures(t *testing.T) { expectedErr: nil, }, { - name: "early termination of signature fetching on parent context cancelation", + name: "early termination of signature fetching on parent context cancellation", contextWithCancelFunc: func() (context.Context, context.CancelFunc) { ctx, cancel := context.WithCancel(context.Background()) cancel() diff --git a/warp/aggregator/mock_signature_getter.go b/warp/aggregator/mock_signature_getter.go index 144d8a2f98..70365da0ce 100644 --- a/warp/aggregator/mock_signature_getter.go +++ b/warp/aggregator/mock_signature_getter.go @@ -1,5 +1,10 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/coreth/warp/aggregator (interfaces: SignatureGetter) +// Source: signature_getter.go +// +// Generated by this command: +// +// mockgen -package=aggregator -source=signature_getter.go -destination=mock_signature_getter.go +// // Package aggregator is a generated GoMock package. package aggregator @@ -18,6 +23,7 @@ import ( type MockSignatureGetter struct { ctrl *gomock.Controller recorder *MockSignatureGetterMockRecorder + isgomock struct{} } // MockSignatureGetterMockRecorder is the mock recorder for MockSignatureGetter. @@ -38,16 +44,55 @@ func (m *MockSignatureGetter) EXPECT() *MockSignatureGetterMockRecorder { } // GetSignature mocks base method. -func (m *MockSignatureGetter) GetSignature(arg0 context.Context, arg1 ids.NodeID, arg2 *warp.UnsignedMessage) (*bls.Signature, error) { +func (m *MockSignatureGetter) GetSignature(ctx context.Context, nodeID ids.NodeID, unsignedWarpMessage *warp.UnsignedMessage) (*bls.Signature, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSignature", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "GetSignature", ctx, nodeID, unsignedWarpMessage) ret0, _ := ret[0].(*bls.Signature) ret1, _ := ret[1].(error) return ret0, ret1 } // GetSignature indicates an expected call of GetSignature. -func (mr *MockSignatureGetterMockRecorder) GetSignature(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSignatureGetterMockRecorder) GetSignature(ctx, nodeID, unsignedWarpMessage any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSignature", reflect.TypeOf((*MockSignatureGetter)(nil).GetSignature), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSignature", reflect.TypeOf((*MockSignatureGetter)(nil).GetSignature), ctx, nodeID, unsignedWarpMessage) +} + +// MockNetworkClient is a mock of NetworkClient interface. +type MockNetworkClient struct { + ctrl *gomock.Controller + recorder *MockNetworkClientMockRecorder + isgomock struct{} +} + +// MockNetworkClientMockRecorder is the mock recorder for MockNetworkClient. +type MockNetworkClientMockRecorder struct { + mock *MockNetworkClient +} + +// NewMockNetworkClient creates a new mock instance. +func NewMockNetworkClient(ctrl *gomock.Controller) *MockNetworkClient { + mock := &MockNetworkClient{ctrl: ctrl} + mock.recorder = &MockNetworkClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNetworkClient) EXPECT() *MockNetworkClientMockRecorder { + return m.recorder +} + +// SendAppRequest mocks base method. +func (m *MockNetworkClient) SendAppRequest(ctx context.Context, nodeID ids.NodeID, message []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAppRequest", ctx, nodeID, message) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendAppRequest indicates an expected call of SendAppRequest. +func (mr *MockNetworkClientMockRecorder) SendAppRequest(ctx, nodeID, message any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockNetworkClient)(nil).SendAppRequest), ctx, nodeID, message) } diff --git a/warp/aggregator/mocks_generate_test.go b/warp/aggregator/mocks_generate_test.go new file mode 100644 index 0000000000..46388a6c7c --- /dev/null +++ b/warp/aggregator/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package aggregator + +//go:generate go run go.uber.org/mock/mockgen -package=$GOPACKAGE -source=signature_getter.go -destination=mock_signature_getter.go diff --git a/warp/backend.go b/warp/backend.go index 3d83440bc0..77167f3fd8 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -11,20 +11,19 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/libevm/ethdb" "github.com/ava-labs/libevm/log" ) var ( _ Backend = &backend{} errParsingOffChainMessage = errors.New("failed to parse off-chain message") -) -const batchSize = ethdb.IdealBatchSize + messageCacheSize = 500 +) type BlockClient interface { GetAcceptedBlock(ctx context.Context, blockID ids.ID) (snowman.Block, error) @@ -36,19 +35,18 @@ type Backend interface { // AddMessage signs [unsignedMessage] and adds it to the warp backend database AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error - // GetMessageSignature returns the signature of the requested message hash. - GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) + // GetMessageSignature validates the message and returns the signature of the requested message. + GetMessageSignature(ctx context.Context, message *avalancheWarp.UnsignedMessage) ([]byte, error) - // GetBlockSignature returns the signature of the requested message hash. - GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) + // GetBlockSignature returns the signature of a hash payload containing blockID if it's the ID of an accepted block. + GetBlockSignature(ctx context.Context, blockID ids.ID) ([]byte, error) // GetMessage retrieves the [unsignedMessage] from the warp backend database if available - // TODO: After E-Upgrade, the backend no longer needs to store the mapping from messageHash + // TODO: After Etna, the backend no longer needs to store the mapping from messageHash // to unsignedMessage (and this method can be removed). GetMessage(messageHash ids.ID) (*avalancheWarp.UnsignedMessage, error) - // Clear clears the entire db - Clear() error + acp118.Verifier } // backend implements Backend, keeps track of warp messages, and generates message signatures. @@ -58,10 +56,10 @@ type backend struct { db database.Database warpSigner avalancheWarp.Signer blockClient BlockClient - messageSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] - blockSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] + signatureCache cache.Cacher[ids.ID, []byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage + stats *verifierStats } // NewBackend creates a new Backend, and initializes the signature cache and message tracking database. @@ -71,7 +69,7 @@ func NewBackend( warpSigner avalancheWarp.Signer, blockClient BlockClient, db database.Database, - cacheSize int, + signatureCache cache.Cacher[ids.ID, []byte], offchainMessages [][]byte, ) (Backend, error) { b := &backend{ @@ -80,9 +78,9 @@ func NewBackend( db: db, warpSigner: warpSigner, blockClient: blockClient, - messageSignatureCache: &cache.LRU[ids.ID, [bls.SignatureLen]byte]{Size: cacheSize}, - blockSignatureCache: &cache.LRU[ids.ID, [bls.SignatureLen]byte]{Size: cacheSize}, - messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: cacheSize}, + signatureCache: signatureCache, + messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: messageCacheSize}, + stats: newVerifierStats(), offchainAddressedCallMsgs: make(map[ids.ID]*avalancheWarp.UnsignedMessage), } return b, b.initOffChainMessages(offchainMessages) @@ -113,15 +111,9 @@ func (b *backend) initOffChainMessages(offchainMessages [][]byte) error { return nil } -func (b *backend) Clear() error { - b.messageSignatureCache.Flush() - b.blockSignatureCache.Flush() - b.messageCache.Flush() - return database.Clear(b.db, batchSize) -} - func (b *backend) AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { messageID := unsignedMessage.ID() + log.Debug("Adding warp message to backend", "messageID", messageID) // In the case when a node restarts, and possibly changes its bls key, the cache gets emptied but the database does not. // So to avoid having incorrect signatures saved in the database after a bls key change, we save the full message in the database. @@ -130,68 +122,52 @@ func (b *backend) AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) err return fmt.Errorf("failed to put warp signature in db: %w", err) } - var signature [bls.SignatureLen]byte - sig, err := b.warpSigner.Sign(unsignedMessage) - if err != nil { + if _, err := b.signMessage(unsignedMessage); err != nil { return fmt.Errorf("failed to sign warp message: %w", err) } - - copy(signature[:], sig) - b.messageSignatureCache.Put(messageID, signature) - log.Debug("Adding warp message to backend", "messageID", messageID) return nil } -func (b *backend) GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) { +func (b *backend) GetMessageSignature(ctx context.Context, unsignedMessage *avalancheWarp.UnsignedMessage) ([]byte, error) { + messageID := unsignedMessage.ID() + log.Debug("Getting warp message from backend", "messageID", messageID) - if sig, ok := b.messageSignatureCache.Get(messageID); ok { + if sig, ok := b.signatureCache.Get(messageID); ok { return sig, nil } - unsignedMessage, err := b.GetMessage(messageID) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + if err := b.Verify(ctx, unsignedMessage, nil); err != nil { + return nil, fmt.Errorf("failed to validate warp message: %w", err) } - - var signature [bls.SignatureLen]byte - sig, err := b.warpSigner.Sign(unsignedMessage) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) - } - - copy(signature[:], sig) - b.messageSignatureCache.Put(messageID, signature) - return signature, nil + return b.signMessage(unsignedMessage) } -func (b *backend) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { +func (b *backend) GetBlockSignature(ctx context.Context, blockID ids.ID) ([]byte, error) { log.Debug("Getting block from backend", "blockID", blockID) - if sig, ok := b.blockSignatureCache.Get(blockID); ok { - return sig, nil - } - _, err := b.blockClient.GetAcceptedBlock(context.TODO(), blockID) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get block %s: %w", blockID, err) - } - - var signature [bls.SignatureLen]byte blockHashPayload, err := payload.NewHash(blockID) if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new block hash payload: %w", err) + return nil, fmt.Errorf("failed to create new block hash payload: %w", err) } + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(b.networkID, b.sourceChainID, blockHashPayload.Bytes()) if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new unsigned warp message: %w", err) + return nil, fmt.Errorf("failed to create new unsigned warp message: %w", err) } - sig, err := b.warpSigner.Sign(unsignedMessage) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) + + if sig, ok := b.signatureCache.Get(unsignedMessage.ID()); ok { + return sig, nil + } + + if err := b.verifyBlockMessage(ctx, blockHashPayload); err != nil { + return nil, fmt.Errorf("failed to validate block message: %w", err) } - copy(signature[:], sig) - b.blockSignatureCache.Put(blockID, signature) - return signature, nil + sig, err := b.signMessage(unsignedMessage) + if err != nil { + return nil, fmt.Errorf("failed to sign block message: %w", err) + } + return sig, nil } func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, error) { @@ -204,7 +180,7 @@ func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, unsignedMessageBytes, err := b.db.Get(messageID[:]) if err != nil { - return nil, fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + return nil, err } unsignedMessage, err := avalancheWarp.ParseUnsignedMessage(unsignedMessageBytes) @@ -215,3 +191,13 @@ func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, return unsignedMessage, nil } + +func (b *backend) signMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([]byte, error) { + sig, err := b.warpSigner.Sign(unsignedMessage) + if err != nil { + return nil, fmt.Errorf("failed to sign warp message: %w", err) + } + + b.signatureCache.Put(unsignedMessage.ID(), sig) + return sig, nil +} diff --git a/warp/backend_test.go b/warp/backend_test.go index 7b2208a2d4..cd7aa1ea76 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -4,13 +4,15 @@ package warp import ( + "context" "testing" + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/hashing" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/warp/warptest" @@ -36,57 +38,14 @@ func init() { } } -func TestClearDB(t *testing.T) { - db := memdb.New() - - sk, err := bls.NewSecretKey() - require.NoError(t, err) - warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backendIntf, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) - require.NoError(t, err) - backend, ok := backendIntf.(*backend) - require.True(t, ok) - - // use multiple messages to test that all messages get cleared - payloads := [][]byte{[]byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), []byte("test5")} - messageIDs := []ids.ID{} - - // add all messages - for _, payload := range payloads { - unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, payload) - require.NoError(t, err) - messageID := hashing.ComputeHash256Array(unsignedMsg.Bytes()) - messageIDs = append(messageIDs, messageID) - err = backend.AddMessage(unsignedMsg) - require.NoError(t, err) - // ensure that the message was added - _, err = backend.GetMessageSignature(messageID) - require.NoError(t, err) - } - - err = backend.Clear() - require.NoError(t, err) - require.Zero(t, backend.messageCache.Len()) - require.Zero(t, backend.messageSignatureCache.Len()) - require.Zero(t, backend.blockSignatureCache.Len()) - it := db.NewIterator() - defer it.Release() - require.False(t, it.Next()) - - // ensure all messages have been deleted - for _, messageID := range messageIDs { - _, err := backend.GetMessageSignature(messageID) - require.ErrorContains(t, err, "failed to get warp message") - } -} - func TestAddAndGetValidMessage(t *testing.T) { db := memdb.New() - sk, err := bls.NewSecretKey() + sk, err := bls.NewSigner() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -94,8 +53,7 @@ func TestAddAndGetValidMessage(t *testing.T) { require.NoError(t, err) // Verify that a signature is returned successfully, and compare to expected signature. - messageID := testUnsignedMessage.ID() - signature, err := backend.GetMessageSignature(messageID) + signature, err := backend.GetMessageSignature(context.TODO(), testUnsignedMessage) require.NoError(t, err) expectedSig, err := warpSigner.Sign(testUnsignedMessage) @@ -106,15 +64,15 @@ func TestAddAndGetValidMessage(t *testing.T) { func TestAddAndGetUnknownMessage(t *testing.T) { db := memdb.New() - sk, err := bls.NewSecretKey() + sk, err := bls.NewSigner() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) require.NoError(t, err) // Try getting a signature for a message that was not added. - messageID := testUnsignedMessage.ID() - _, err = backend.GetMessageSignature(messageID) + _, err = backend.GetMessageSignature(context.TODO(), testUnsignedMessage) require.Error(t, err) } @@ -125,10 +83,11 @@ func TestGetBlockSignature(t *testing.T) { blockClient := warptest.MakeBlockClient(blkID) db := memdb.New() - sk, err := bls.NewSecretKey() + sk, err := bls.NewSigner() require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, db, 500, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, db, messageSignatureCache, nil) require.NoError(err) blockHashPayload, err := payload.NewHash(blkID) @@ -138,23 +97,24 @@ func TestGetBlockSignature(t *testing.T) { expectedSig, err := warpSigner.Sign(unsignedMessage) require.NoError(err) - signature, err := backend.GetBlockSignature(blkID) + signature, err := backend.GetBlockSignature(context.TODO(), blkID) require.NoError(err) require.Equal(expectedSig, signature[:]) - _, err = backend.GetBlockSignature(ids.GenerateTestID()) + _, err = backend.GetBlockSignature(context.TODO(), ids.GenerateTestID()) require.Error(err) } func TestZeroSizedCache(t *testing.T) { db := memdb.New() - sk, err := bls.NewSecretKey() + sk, err := bls.NewSigner() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) // Verify zero sized cache works normally, because the lru cache will be initialized to size 1 for any size parameter <= 0. - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 0, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -162,8 +122,7 @@ func TestZeroSizedCache(t *testing.T) { require.NoError(t, err) // Verify that a signature is returned successfully, and compare to expected signature. - messageID := testUnsignedMessage.ID() - signature, err := backend.GetMessageSignature(messageID) + signature, err := backend.GetMessageSignature(context.TODO(), testUnsignedMessage) require.NoError(t, err) expectedSig, err := warpSigner.Sign(testUnsignedMessage) @@ -177,7 +136,7 @@ func TestOffChainMessages(t *testing.T) { check func(require *require.Assertions, b Backend) err error } - sk, err := bls.NewSecretKey() + sk, err := bls.NewSigner() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) @@ -192,13 +151,19 @@ func TestOffChainMessages(t *testing.T) { require.NoError(err) require.Equal(testUnsignedMessage.Bytes(), msg.Bytes()) - signature, err := b.GetMessageSignature(testUnsignedMessage.ID()) + signature, err := b.GetMessageSignature(context.TODO(), testUnsignedMessage) require.NoError(err) expectedSignatureBytes, err := warpSigner.Sign(msg) require.NoError(err) require.Equal(expectedSignatureBytes, signature[:]) }, }, + "unknown message": { + check: func(require *require.Assertions, b Backend) { + _, err := b.GetMessage(testUnsignedMessage.ID()) + require.ErrorIs(err, database.ErrNotFound) + }, + }, "invalid message": { offchainMessages: [][]byte{{1, 2, 3}}, err: errParsingOffChainMessage, @@ -208,7 +173,8 @@ func TestOffChainMessages(t *testing.T) { require := require.New(t) db := memdb.New() - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 0, test.offchainMessages) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, test.offchainMessages) require.ErrorIs(err, test.err) if test.check != nil { test.check(require, backend) diff --git a/warp/handlers/signature_request.go b/warp/handlers/signature_request.go index 08fc6399aa..9a5dbfd77f 100644 --- a/warp/handlers/signature_request.go +++ b/warp/handlers/signature_request.go @@ -16,7 +16,7 @@ import ( ) // SignatureRequestHandler serves warp signature requests. It is a peer.RequestHandler for message.MessageSignatureRequest. -// TODO: After E-Upgrade, this handler can be removed and SignatureRequestHandlerP2P is sufficient. +// TODO: After Etna, this handler can be removed and SignatureRequestHandlerP2P is sufficient. type SignatureRequestHandler struct { backend warp.Backend codec codec.Manager @@ -45,13 +45,20 @@ func (s *SignatureRequestHandler) OnMessageSignatureRequest(ctx context.Context, s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) }() - signature, err := s.backend.GetMessageSignature(signatureRequest.MessageID) + var signature [bls.SignatureLen]byte + unsignedMessage, err := s.backend.GetMessage(signatureRequest.MessageID) if err != nil { - log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) + log.Debug("Unknown warp message requested", "messageID", signatureRequest.MessageID) s.stats.IncMessageSignatureMiss() - signature = [bls.SignatureLen]byte{} } else { - s.stats.IncMessageSignatureHit() + sig, err := s.backend.GetMessageSignature(ctx, unsignedMessage) + if err != nil { + log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) + s.stats.IncMessageSignatureMiss() + } else { + s.stats.IncMessageSignatureHit() + copy(signature[:], sig) + } } response := message.SignatureResponse{Signature: signature} @@ -73,13 +80,14 @@ func (s *SignatureRequestHandler) OnBlockSignatureRequest(ctx context.Context, n s.stats.UpdateBlockSignatureRequestTime(time.Since(startTime)) }() - signature, err := s.backend.GetBlockSignature(request.BlockID) + var signature [bls.SignatureLen]byte + sig, err := s.backend.GetBlockSignature(ctx, request.BlockID) if err != nil { log.Debug("Unknown warp signature requested", "blockID", request.BlockID) s.stats.IncBlockSignatureMiss() - signature = [bls.SignatureLen]byte{} } else { s.stats.IncBlockSignatureHit() + copy(signature[:], sig) } response := message.SignatureResponse{Signature: signature} diff --git a/warp/handlers/signature_request_p2p.go b/warp/handlers/signature_request_p2p.go deleted file mode 100644 index 47fe2d0908..0000000000 --- a/warp/handlers/signature_request_p2p.go +++ /dev/null @@ -1,151 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package handlers - -import ( - "context" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/coreth/warp" - "google.golang.org/protobuf/proto" -) - -var _ p2p.Handler = (*SignatureRequestHandlerP2P)(nil) - -const ( - ErrFailedToParse = iota - ErrFailedToGetSig - ErrFailedToMarshal -) - -// SignatureRequestHandlerP2P serves warp signature requests using the p2p -// framework from avalanchego. It is a peer.RequestHandler for -// message.MessageSignatureRequest. -type SignatureRequestHandlerP2P struct { - backend warp.Backend - codec codec.Manager - stats *handlerStats -} - -func NewSignatureRequestHandlerP2P(backend warp.Backend, codec codec.Manager) *SignatureRequestHandlerP2P { - return &SignatureRequestHandlerP2P{ - backend: backend, - codec: codec, - stats: newStats(), - } -} - -func (s *SignatureRequestHandlerP2P) AppRequest( - ctx context.Context, - nodeID ids.NodeID, - deadline time.Time, - requestBytes []byte, -) ([]byte, *common.AppError) { - // Per ACP-118, the requestBytes are the serialized form of - // sdk.SignatureRequest. - req := new(sdk.SignatureRequest) - if err := proto.Unmarshal(requestBytes, req); err != nil { - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: "failed to unmarshal request: " + err.Error(), - } - } - - unsignedMessage, err := avalancheWarp.ParseUnsignedMessage(req.Message) - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: "failed to parse unsigned message: " + err.Error(), - } - } - parsed, err := payload.Parse(unsignedMessage.Payload) - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: "failed to parse payload: " + err.Error(), - } - } - - var sig [bls.SignatureLen]byte - switch p := parsed.(type) { - case *payload.AddressedCall: - // Note we pass the unsigned message ID to GetMessageSignature since - // that is what the backend expects. - // However, we verify the types and format of the payload to ensure - // the message conforms to the ACP-118 spec. - sig, err = s.GetMessageSignature(unsignedMessage.ID()) - if err != nil { - s.stats.IncMessageSignatureMiss() - } else { - s.stats.IncMessageSignatureHit() - } - case *payload.Hash: - sig, err = s.GetBlockSignature(p.Hash) - if err != nil { - s.stats.IncBlockSignatureMiss() - } else { - s.stats.IncBlockSignatureHit() - } - default: - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: fmt.Sprintf("unknown payload type: %T", p), - } - } - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToGetSig, - Message: "failed to get signature: " + err.Error(), - } - } - - // Per ACP-118, the responseBytes are the serialized form of - // sdk.SignatureResponse. - resp := &sdk.SignatureResponse{Signature: sig[:]} - respBytes, err := proto.Marshal(resp) - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToMarshal, - Message: "failed to marshal response: " + err.Error(), - } - } - return respBytes, nil -} - -func (s *SignatureRequestHandlerP2P) GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) { - startTime := time.Now() - s.stats.IncMessageSignatureRequest() - - // Always report signature request time - defer func() { - s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) - }() - - return s.backend.GetMessageSignature(messageID) -} - -func (s *SignatureRequestHandlerP2P) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { - startTime := time.Now() - s.stats.IncBlockSignatureRequest() - - // Always report signature request time - defer func() { - s.stats.UpdateBlockSignatureRequestTime(time.Since(startTime)) - }() - - return s.backend.GetBlockSignature(blockID) -} - -func (s *SignatureRequestHandlerP2P) AppGossip( - ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { -} diff --git a/warp/handlers/signature_request_p2p_test.go b/warp/handlers/signature_request_p2p_test.go deleted file mode 100644 index 36d6507771..0000000000 --- a/warp/handlers/signature_request_p2p_test.go +++ /dev/null @@ -1,232 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package handlers - -import ( - "context" - "testing" - "time" - - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/utils" - "github.com/ava-labs/coreth/warp" - "github.com/ava-labs/coreth/warp/warptest" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" -) - -func TestMessageSignatureHandlerP2P(t *testing.T) { - database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSecretKey() - require.NoError(t, err) - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) - - addressedPayload, err := payload.NewAddressedCall([]byte{1, 2, 3}, []byte{1, 2, 3}) - require.NoError(t, err) - offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, addressedPayload.Bytes()) - require.NoError(t, err) - - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, 100, [][]byte{offchainMessage.Bytes()}) - require.NoError(t, err) - - offchainPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("test")) - require.NoError(t, err) - msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, offchainPayload.Bytes()) - require.NoError(t, err) - messageID := msg.ID() - require.NoError(t, backend.AddMessage(msg)) - signature, err := backend.GetMessageSignature(messageID) - require.NoError(t, err) - offchainSignature, err := backend.GetMessageSignature(offchainMessage.ID()) - require.NoError(t, err) - - unknownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("unknown message")) - require.NoError(t, err) - unknownMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, unknownPayload.Bytes()) - require.NoError(t, err) - - tests := map[string]struct { - setup func() (request sdk.SignatureRequest, expectedResponse []byte) - verifyStats func(t *testing.T, stats *handlerStats) - err error - }{ - "known message": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: msg.Bytes()}, signature[:] - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 1, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - }, - "offchain message": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: offchainMessage.Bytes()}, offchainSignature[:] - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 1, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - }, - "unknown message": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: unknownMessage.Bytes()}, nil - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 1, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - err: &common.AppError{Code: ErrFailedToGetSig}, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - handler := NewSignatureRequestHandlerP2P(backend, message.Codec) - handler.stats.Clear() - - request, expectedResponse := test.setup() - requestBytes, err := proto.Marshal(&request) - require.NoError(t, err) - responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, requestBytes) - if test.err != nil { - require.ErrorIs(t, appErr, test.err) - } else { - require.Nil(t, appErr) - } - - test.verifyStats(t, handler.stats) - - // If the expected response is empty, assert that the handler returns an empty response and return early. - if len(expectedResponse) == 0 { - require.Len(t, responseBytes, 0, "expected response to be empty") - return - } - var response sdk.SignatureResponse - err = proto.Unmarshal(responseBytes, &response) - require.NoError(t, err, "error unmarshalling SignatureResponse") - - require.Equal(t, expectedResponse, response.Signature) - }) - } -} - -func TestBlockSignatureHandlerP2P(t *testing.T) { - database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSecretKey() - require.NoError(t, err) - - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) - blkID := ids.GenerateTestID() - blockClient := warptest.MakeBlockClient(blkID) - backend, err := warp.NewBackend( - snowCtx.NetworkID, - snowCtx.ChainID, - warpSigner, - blockClient, - database, - 100, - nil, - ) - require.NoError(t, err) - - signature, err := backend.GetBlockSignature(blkID) - require.NoError(t, err) - unknownBlockID := ids.GenerateTestID() - - toMessageBytes := func(id ids.ID) []byte { - idPayload, err := payload.NewHash(id) - require.NoError(t, err) - - msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, idPayload.Bytes()) - require.NoError(t, err) - - return msg.Bytes() - } - - tests := map[string]struct { - setup func() (request sdk.SignatureRequest, expectedResponse []byte) - verifyStats func(t *testing.T, stats *handlerStats) - err error - }{ - "known block": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: toMessageBytes(blkID)}, signature[:] - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - }, - "unknown block": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: toMessageBytes(unknownBlockID)}, nil - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureMiss.Snapshot().Count()) - }, - err: &common.AppError{Code: ErrFailedToGetSig}, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - handler := NewSignatureRequestHandlerP2P(backend, message.Codec) - handler.stats.Clear() - - request, expectedResponse := test.setup() - requestBytes, err := proto.Marshal(&request) - require.NoError(t, err) - responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, requestBytes) - if test.err != nil { - require.ErrorIs(t, appErr, test.err) - } else { - require.Nil(t, appErr) - } - - test.verifyStats(t, handler.stats) - - // If the expected response is empty, assert that the handler returns an empty response and return early. - if len(expectedResponse) == 0 { - require.Len(t, responseBytes, 0, "expected response to be empty") - return - } - var response sdk.SignatureResponse - err = proto.Unmarshal(responseBytes, &response) - require.NoError(t, err, "error unmarshalling SignatureResponse") - - require.Equal(t, expectedResponse, response.Signature) - }) - } -} diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index b50a1b519b..77a4af087e 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -7,6 +7,7 @@ import ( "context" "testing" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -22,7 +23,7 @@ import ( func TestMessageSignatureHandler(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSecretKey() + blsSecretKey, err := bls.NewSigner() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) @@ -31,16 +32,17 @@ func TestMessageSignatureHandler(t *testing.T) { offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, addressedPayload.Bytes()) require.NoError(t, err) - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, 100, [][]byte{offchainMessage.Bytes()}) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} + backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) require.NoError(t, err) messageID := msg.ID() require.NoError(t, backend.AddMessage(msg)) - signature, err := backend.GetMessageSignature(messageID) + signature, err := backend.GetMessageSignature(context.TODO(), msg) require.NoError(t, err) - offchainSignature, err := backend.GetMessageSignature(offchainMessage.ID()) + offchainSignature, err := backend.GetMessageSignature(context.TODO(), offchainMessage) require.NoError(t, err) unknownMessageID := ids.GenerateTestID() @@ -101,7 +103,6 @@ func TestMessageSignatureHandler(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { handler := NewSignatureRequestHandler(backend, message.Codec) - handler.stats.Clear() request, expectedResponse := test.setup() responseBytes, err := handler.OnMessageSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) @@ -126,24 +127,25 @@ func TestMessageSignatureHandler(t *testing.T) { func TestBlockSignatureHandler(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSecretKey() + blsSecretKey, err := bls.NewSigner() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) blkID := ids.GenerateTestID() blockClient := warptest.MakeBlockClient(blkID) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} backend, err := warp.NewBackend( snowCtx.NetworkID, snowCtx.ChainID, warpSigner, blockClient, database, - 100, + messageSignatureCache, nil, ) require.NoError(t, err) - signature, err := backend.GetBlockSignature(blkID) + signature, err := backend.GetBlockSignature(context.TODO(), blkID) require.NoError(t, err) unknownMessageID := ids.GenerateTestID() @@ -188,7 +190,6 @@ func TestBlockSignatureHandler(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { handler := NewSignatureRequestHandler(backend, message.Codec) - handler.stats.Clear() request, expectedResponse := test.setup() responseBytes, err := handler.OnBlockSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) diff --git a/warp/handlers/stats.go b/warp/handlers/stats.go index 9e2ea373aa..ef3f31ae9a 100644 --- a/warp/handlers/stats.go +++ b/warp/handlers/stats.go @@ -24,14 +24,14 @@ type handlerStats struct { func newStats() *handlerStats { return &handlerStats{ - messageSignatureRequest: metrics.GetOrRegisterCounter("message_signature_request_count", nil), - messageSignatureHit: metrics.GetOrRegisterCounter("message_signature_request_hit", nil), - messageSignatureMiss: metrics.GetOrRegisterCounter("message_signature_request_miss", nil), - messageSignatureRequestDuration: metrics.GetOrRegisterGauge("message_signature_request_duration", nil), - blockSignatureRequest: metrics.GetOrRegisterCounter("block_signature_request_count", nil), - blockSignatureHit: metrics.GetOrRegisterCounter("block_signature_request_hit", nil), - blockSignatureMiss: metrics.GetOrRegisterCounter("block_signature_request_miss", nil), - blockSignatureRequestDuration: metrics.GetOrRegisterGauge("block_signature_request_duration", nil), + messageSignatureRequest: metrics.NewRegisteredCounter("message_signature_request_count", nil), + messageSignatureHit: metrics.NewRegisteredCounter("message_signature_request_hit", nil), + messageSignatureMiss: metrics.NewRegisteredCounter("message_signature_request_miss", nil), + messageSignatureRequestDuration: metrics.NewRegisteredGauge("message_signature_request_duration", nil), + blockSignatureRequest: metrics.NewRegisteredCounter("block_signature_request_count", nil), + blockSignatureHit: metrics.NewRegisteredCounter("block_signature_request_hit", nil), + blockSignatureMiss: metrics.NewRegisteredCounter("block_signature_request_miss", nil), + blockSignatureRequestDuration: metrics.NewRegisteredGauge("block_signature_request_duration", nil), } } @@ -47,13 +47,3 @@ func (h *handlerStats) IncBlockSignatureMiss() { h.blockSignatureMiss.Inc(1) func (h *handlerStats) UpdateBlockSignatureRequestTime(duration time.Duration) { h.blockSignatureRequestDuration.Inc(int64(duration)) } -func (h *handlerStats) Clear() { - h.messageSignatureRequest.Clear() - h.messageSignatureHit.Clear() - h.messageSignatureMiss.Clear() - h.messageSignatureRequestDuration.Update(0) - h.blockSignatureRequest.Clear() - h.blockSignatureHit.Clear() - h.blockSignatureMiss.Clear() - h.blockSignatureRequestDuration.Update(0) -} diff --git a/warp/service.go b/warp/service.go index 8e0289ddf2..7e4f11c98f 100644 --- a/warp/service.go +++ b/warp/service.go @@ -54,7 +54,11 @@ func (a *API) GetMessage(ctx context.Context, messageID ids.ID) (hexutil.Bytes, // GetMessageSignature returns the BLS signature associated with a messageID. func (a *API) GetMessageSignature(ctx context.Context, messageID ids.ID) (hexutil.Bytes, error) { - signature, err := a.backend.GetMessageSignature(messageID) + unsignedMessage, err := a.backend.GetMessage(messageID) + if err != nil { + return nil, fmt.Errorf("failed to get message %s with error %w", messageID, err) + } + signature, err := a.backend.GetMessageSignature(ctx, unsignedMessage) if err != nil { return nil, fmt.Errorf("failed to get signature for message %s with error %w", messageID, err) } @@ -63,7 +67,7 @@ func (a *API) GetMessageSignature(ctx context.Context, messageID ids.ID) (hexuti // GetBlockSignature returns the BLS signature associated with a blockID. func (a *API) GetBlockSignature(ctx context.Context, blockID ids.ID) (hexutil.Bytes, error) { - signature, err := a.backend.GetBlockSignature(blockID) + signature, err := a.backend.GetBlockSignature(ctx, blockID) if err != nil { return nil, fmt.Errorf("failed to get signature for block %s with error %w", blockID, err) } diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go new file mode 100644 index 0000000000..3c8427b8ff --- /dev/null +++ b/warp/verifier_backend.go @@ -0,0 +1,70 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/snow/engine/common" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" +) + +const ( + ParseErrCode = iota + 1 + VerifyErrCode +) + +// Verify verifies the signature of the message +// It also implements the acp118.Verifier interface +func (b *backend) Verify(ctx context.Context, unsignedMessage *avalancheWarp.UnsignedMessage, _ []byte) *common.AppError { + messageID := unsignedMessage.ID() + // Known on-chain messages should be signed + if _, err := b.GetMessage(messageID); err == nil { + return nil + } else if err != database.ErrNotFound { + return &common.AppError{ + Code: ParseErrCode, + Message: fmt.Sprintf("failed to get message %s: %s", messageID, err.Error()), + } + } + + parsed, err := payload.Parse(unsignedMessage.Payload) + if err != nil { + b.stats.IncMessageParseFail() + return &common.AppError{ + Code: ParseErrCode, + Message: "failed to parse payload: " + err.Error(), + } + } + + switch p := parsed.(type) { + case *payload.Hash: + return b.verifyBlockMessage(ctx, p) + default: + b.stats.IncMessageParseFail() + return &common.AppError{ + Code: ParseErrCode, + Message: fmt.Sprintf("unknown payload type: %T", p), + } + } +} + +// verifyBlockMessage returns nil if blockHashPayload contains the ID +// of an accepted block indicating it should be signed by the VM. +func (b *backend) verifyBlockMessage(ctx context.Context, blockHashPayload *payload.Hash) *common.AppError { + blockID := blockHashPayload.Hash + _, err := b.blockClient.GetAcceptedBlock(ctx, blockID) + if err != nil { + b.stats.IncBlockValidationFail() + return &common.AppError{ + Code: VerifyErrCode, + Message: fmt.Sprintf("failed to get block %s: %s", blockID, err.Error()), + } + } + + return nil +} diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go new file mode 100644 index 0000000000..a58726aa0f --- /dev/null +++ b/warp/verifier_backend_test.go @@ -0,0 +1,255 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "testing" + "time" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/acp118" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/warp/warptest" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestAddressedCallSignatures(t *testing.T) { + database := memdb.New() + snowCtx := utils.TestSnowContext() + blsSecretKey, err := bls.NewSigner() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + + offChainPayload, err := payload.NewAddressedCall([]byte{1, 2, 3}, []byte{1, 2, 3}) + require.NoError(t, err) + offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, offChainPayload.Bytes()) + require.NoError(t, err) + offchainSignature, err := warpSigner.Sign(offchainMessage) + require.NoError(t, err) + + tests := map[string]struct { + setup func(backend Backend) (request []byte, expectedResponse []byte) + verifyStats func(t *testing.T, stats *verifierStats) + err error + }{ + "known message": { + setup: func(backend Backend) (request []byte, expectedResponse []byte) { + knownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("test")) + require.NoError(t, err) + msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, knownPayload.Bytes()) + require.NoError(t, err) + signature, err := warpSigner.Sign(msg) + require.NoError(t, err) + + backend.AddMessage(msg) + return msg.Bytes(), signature[:] + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) + }, + }, + "offchain message": { + setup: func(_ Backend) (request []byte, expectedResponse []byte) { + return offchainMessage.Bytes(), offchainSignature[:] + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) + }, + }, + "unknown message": { + setup: func(_ Backend) (request []byte, expectedResponse []byte) { + unknownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("unknown message")) + require.NoError(t, err) + unknownMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, unknownPayload.Bytes()) + require.NoError(t, err) + return unknownMessage.Bytes(), nil + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 1, stats.messageParseFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) + }, + err: &common.AppError{Code: ParseErrCode}, + }, + } + + for name, test := range tests { + for _, withCache := range []bool{true, false} { + if withCache { + name += "_with_cache" + } else { + name += "_no_cache" + } + t.Run(name, func(t *testing.T) { + var sigCache cache.Cacher[ids.ID, []byte] + if withCache { + sigCache = &cache.LRU[ids.ID, []byte]{Size: 100} + } else { + sigCache = &cache.Empty[ids.ID, []byte]{} + } + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, sigCache, [][]byte{offchainMessage.Bytes()}) + require.NoError(t, err) + handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + + requestBytes, expectedResponse := test.setup(warpBackend) + protoMsg := &sdk.SignatureRequest{Message: requestBytes} + protoBytes, err := proto.Marshal(protoMsg) + require.NoError(t, err) + responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + if test.err != nil { + require.Error(t, appErr) + require.ErrorIs(t, appErr, test.err) + } else { + require.Nil(t, appErr) + } + + test.verifyStats(t, warpBackend.(*backend).stats) + + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + // check cache is populated + if withCache { + require.NotZero(t, warpBackend.(*backend).signatureCache.Len()) + } else { + require.Zero(t, warpBackend.(*backend).signatureCache.Len()) + } + response := &sdk.SignatureResponse{} + require.NoError(t, proto.Unmarshal(responseBytes, response)) + require.NoError(t, err, "error unmarshalling SignatureResponse") + + require.Equal(t, expectedResponse, response.Signature) + }) + } + } +} + +func TestBlockSignatures(t *testing.T) { + database := memdb.New() + snowCtx := utils.TestSnowContext() + blsSecretKey, err := bls.NewSigner() + require.NoError(t, err) + + warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + knownBlkID := ids.GenerateTestID() + blockClient := warptest.MakeBlockClient(knownBlkID) + + toMessageBytes := func(id ids.ID) []byte { + idPayload, err := payload.NewHash(id) + if err != nil { + panic(err) + } + + msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, idPayload.Bytes()) + if err != nil { + panic(err) + } + + return msg.Bytes() + } + + tests := map[string]struct { + setup func() (request []byte, expectedResponse []byte) + verifyStats func(t *testing.T, stats *verifierStats) + err error + }{ + "known block": { + setup: func() (request []byte, expectedResponse []byte) { + hashPayload, err := payload.NewHash(knownBlkID) + require.NoError(t, err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, hashPayload.Bytes()) + require.NoError(t, err) + signature, err := warpSigner.Sign(unsignedMessage) + require.NoError(t, err) + return toMessageBytes(knownBlkID), signature[:] + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + }, + }, + "unknown block": { + setup: func() (request []byte, expectedResponse []byte) { + unknownBlockID := ids.GenerateTestID() + return toMessageBytes(unknownBlockID), nil + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 1, stats.blockValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + }, + err: &common.AppError{Code: VerifyErrCode}, + }, + } + + for name, test := range tests { + for _, withCache := range []bool{true, false} { + if withCache { + name += "_with_cache" + } else { + name += "_no_cache" + } + t.Run(name, func(t *testing.T) { + var sigCache cache.Cacher[ids.ID, []byte] + if withCache { + sigCache = &cache.LRU[ids.ID, []byte]{Size: 100} + } else { + sigCache = &cache.Empty[ids.ID, []byte]{} + } + warpBackend, err := NewBackend( + snowCtx.NetworkID, + snowCtx.ChainID, + warpSigner, + blockClient, + database, + sigCache, + nil, + ) + require.NoError(t, err) + handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + + requestBytes, expectedResponse := test.setup() + protoMsg := &sdk.SignatureRequest{Message: requestBytes} + protoBytes, err := proto.Marshal(protoMsg) + require.NoError(t, err) + responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + if test.err != nil { + require.NotNil(t, appErr) + require.ErrorIs(t, test.err, appErr) + } else { + require.Nil(t, appErr) + } + + test.verifyStats(t, warpBackend.(*backend).stats) + + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + // check cache is populated + if withCache { + require.NotZero(t, warpBackend.(*backend).signatureCache.Len()) + } else { + require.Zero(t, warpBackend.(*backend).signatureCache.Len()) + } + var response sdk.SignatureResponse + err = proto.Unmarshal(responseBytes, &response) + require.NoError(t, err, "error unmarshalling SignatureResponse") + require.Equal(t, expectedResponse, response.Signature) + }) + } + } +} diff --git a/warp/verifier_stats.go b/warp/verifier_stats.go new file mode 100644 index 0000000000..980d464429 --- /dev/null +++ b/warp/verifier_stats.go @@ -0,0 +1,29 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "github.com/ava-labs/coreth/metrics" +) + +type verifierStats struct { + messageParseFail metrics.Counter + // BlockRequest metrics + blockValidationFail metrics.Counter +} + +func newVerifierStats() *verifierStats { + return &verifierStats{ + messageParseFail: metrics.NewRegisteredCounter("warp_backend_message_parse_fail", nil), + blockValidationFail: metrics.NewRegisteredCounter("warp_backend_block_validation_fail", nil), + } +} + +func (h *verifierStats) IncBlockValidationFail() { + h.blockValidationFail.Inc(1) +} + +func (h *verifierStats) IncMessageParseFail() { + h.messageParseFail.Inc(1) +}