From 65f999e92f686d6f0750ded524d9376fe4b8fa77 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Thu, 20 Feb 2025 04:08:25 -0800 Subject: [PATCH] Merge master (2e4e6bd) into libevm (#812) At commit https://github.com/ava-labs/coreth/commit/2e4e6bd85132e7c655775e1b62a6397e0270cdb5 --- README.md | 4 +- RELEASES.md | 9 + consensus/dummy/consensus.go | 157 ++-- consensus/dummy/consensus_test.go | 72 +- consensus/dummy/dynamic_fee_window.go | 88 ++ consensus/dummy/dynamic_fee_window_test.go | 246 ++++++ consensus/dummy/dynamic_fees.go | 413 +++++----- consensus/dummy/dynamic_fees_test.go | 323 ++------ core/blockchain.go | 2 +- core/chain_makers.go | 16 +- core/main_test.go | 2 +- core/rawdb/schema.go | 2 +- core/state/snapshot/snapshot.go | 6 +- core/state_manager.go | 6 +- core/state_manager_test.go | 6 +- core/state_processor_test.go | 5 +- core/test_blockchain.go | 10 +- core/txpool/blobpool/blobpool.go | 6 +- core/txpool/blobpool/blobpool_test.go | 2 +- core/txpool/blobpool/metrics.go | 2 +- core/txpool/legacypool/legacypool.go | 18 +- core/txpool/txpool.go | 2 +- eth/api_backend.go | 15 + eth/backend.go | 11 +- eth/ethconfig/config.go | 5 + eth/gasprice/feehistory.go | 7 + eth/gasprice/gasprice.go | 3 +- eth/tracers/api.go | 4 + go.mod | 11 +- go.sum | 6 +- internal/ethapi/api.coreth.go | 134 ++++ internal/ethapi/api.coreth_test.go | 130 +++ internal/ethapi/api.go | 13 +- internal/ethapi/api_extra.go | 44 + internal/ethapi/api_extra_test.go | 132 +++ internal/ethapi/api_test.go | 6 + internal/ethapi/backend.go | 2 + internal/ethapi/mocks_generate_test.go | 3 + internal/ethapi/mocks_test.go | 757 ++++++++++++++++++ libevm/options/options.go | 42 + libevm/sync/sync.go | 52 ++ metrics/FORK.md | 1 - metrics/LICENSE | 29 - metrics/README.md | 102 --- metrics/config.go | 43 - metrics/counter.go | 112 --- metrics/counter_float64.go | 126 --- metrics/counter_float_64_test.go | 99 --- metrics/counter_test.go | 77 -- metrics/cpu.go | 35 - metrics/cpu_disabled.go | 34 - metrics/cpu_enabled.go | 54 -- metrics/cputime_nop.go | 36 - metrics/cputime_unix.go | 46 -- metrics/debug.go | 76 -- metrics/debug_test.go | 48 -- metrics/disk.go | 35 - metrics/disk_linux.go | 82 -- metrics/disk_nop.go | 37 - metrics/ewma.go | 111 --- metrics/ewma_test.go | 89 -- metrics/gauge.go | 98 --- metrics/gauge_float64.go | 73 -- metrics/gauge_float64_test.go | 51 -- metrics/gauge_info.go | 84 -- metrics/gauge_info_test.go | 36 - metrics/gauge_test.go | 31 - metrics/graphite.go | 117 --- metrics/graphite_test.go | 22 - metrics/healthcheck.go | 61 -- metrics/histogram.go | 73 -- metrics/histogram_test.go | 95 --- metrics/inactive.go | 48 -- metrics/init_test.go | 5 - metrics/json.go | 31 - metrics/json_test.go | 28 - metrics/log.go | 86 -- metrics/memory.md | 285 ------- metrics/meter.go | 189 ----- metrics/meter_test.go | 89 -- metrics/metrics.go | 18 - metrics/metrics_test.go | 99 --- metrics/opentsdb.go | 128 --- metrics/opentsdb_test.go | 67 -- metrics/prometheus/interfaces.go | 12 + metrics/prometheus/prometheus.go | 332 ++++---- metrics/prometheus/prometheus_test.go | 100 +-- metrics/prometheus/testdata/prometheus.want | 70 -- metrics/registry.go | 372 --------- metrics/registry_test.go | 335 -------- metrics/resetting_sample.go | 24 - metrics/resetting_timer.go | 171 ---- metrics/resetting_timer_test.go | 197 ----- metrics/sample.go | 446 ----------- metrics/sample_test.go | 360 --------- metrics/syslog.go | 83 -- metrics/testdata/opentsb.want | 23 - metrics/timer.go | 182 ----- metrics/timer_test.go | 114 --- metrics/validate.sh | 10 - metrics/writer.go | 100 --- metrics/writer_test.go | 23 - miner/ordering_ext.go | 15 - miner/worker.go | 16 +- params/avalanche_params.go | 21 +- params/config.go | 94 ++- params/config_extra.go | 39 +- params/extras/network_upgrades.go | 19 +- peer/peer_tracker.go | 2 +- peer/stats/stats.go | 2 +- plugin/evm/acp176/acp176.go | 182 +++++ plugin/evm/acp176/acp176_test.go | 704 ++++++++++++++++ plugin/evm/ap4/cost.go | 88 ++ plugin/evm/ap4/cost_test.go | 77 ++ plugin/evm/api.go | 211 +---- plugin/evm/atomic/mempool.go | 2 +- plugin/evm/atomic_backend.go | 11 - plugin/evm/atomic_syncer.go | 5 +- plugin/evm/atomic_syncer_test.go | 4 +- plugin/evm/atomic_trie.go | 19 +- plugin/evm/atomic_trie_test.go | 170 ++++ plugin/evm/block_verification.go | 35 +- plugin/evm/client/client.go | 141 ---- plugin/evm/client/client_interface_test.go | 17 - plugin/evm/config/config.go | 8 + plugin/evm/gossip.go | 2 +- plugin/evm/header/block_gas_cost.go | 68 ++ plugin/evm/header/block_gas_cost_test.go | 161 ++++ plugin/evm/header/extra.go | 58 ++ plugin/evm/header/extra_test.go | 104 +++ plugin/evm/network_handler.go | 2 +- plugin/evm/syncervm_client.go | 9 +- plugin/evm/syncervm_test.go | 12 +- plugin/evm/testutils/metrics.go | 20 + plugin/evm/user.go | 137 ---- plugin/evm/validators.go | 19 + plugin/evm/vm.go | 96 +-- plugin/evm/vm_test.go | 33 +- plugin/evm/vm_warp_test.go | 7 +- precompile/contracts/warp/README.md | 32 +- precompile/contracts/warp/config.go | 38 +- precompile/contracts/warp/contract_test.go | 5 +- precompile/contracts/warp/predicate_test.go | 3 +- .../warp/signature_verification_test.go | 59 +- rpc/handler.go | 2 +- rpc/http.go | 2 +- rpc/metrics.go | 2 +- scripts/eth-allowed-packages.txt | 1 + scripts/lint_allowed_eth_imports.sh | 4 +- scripts/versions.sh | 2 +- sync/client/stats/stats.go | 2 +- sync/handlers/stats/stats.go | 2 +- sync/statesync/trie_sync_stats.go | 2 +- sync/statesync/trie_sync_stats_test.go | 2 +- triedb/hashdb/database.go | 2 +- triedb/pathdb/metrics.go | 2 +- utils/metered_cache.go | 2 +- utils/numbers.go | 18 + utils/numbers_test.go | 93 +++ utils/snow.go | 4 +- warp/aggregator/aggregator_test.go | 5 +- warp/backend_test.go | 12 +- warp/handlers/signature_request_test.go | 10 +- warp/handlers/stats.go | 2 +- warp/service.go | 10 +- warp/verifier_backend_test.go | 11 +- warp/verifier_stats.go | 2 +- 167 files changed, 4445 insertions(+), 7310 deletions(-) create mode 100644 consensus/dummy/dynamic_fee_window.go create mode 100644 consensus/dummy/dynamic_fee_window_test.go create mode 100644 internal/ethapi/api.coreth.go create mode 100644 internal/ethapi/api.coreth_test.go create mode 100644 internal/ethapi/api_extra_test.go create mode 100644 internal/ethapi/mocks_generate_test.go create mode 100644 internal/ethapi/mocks_test.go create mode 100644 libevm/options/options.go create mode 100644 libevm/sync/sync.go delete mode 100644 metrics/FORK.md delete mode 100644 metrics/LICENSE delete mode 100644 metrics/README.md delete mode 100644 metrics/config.go delete mode 100644 metrics/counter.go delete mode 100644 metrics/counter_float64.go delete mode 100644 metrics/counter_float_64_test.go delete mode 100644 metrics/counter_test.go delete mode 100644 metrics/cpu.go delete mode 100644 metrics/cpu_disabled.go delete mode 100644 metrics/cpu_enabled.go delete mode 100644 metrics/cputime_nop.go delete mode 100644 metrics/cputime_unix.go delete mode 100644 metrics/debug.go delete mode 100644 metrics/debug_test.go delete mode 100644 metrics/disk.go delete mode 100644 metrics/disk_linux.go delete mode 100644 metrics/disk_nop.go delete mode 100644 metrics/ewma.go delete mode 100644 metrics/ewma_test.go delete mode 100644 metrics/gauge.go delete mode 100644 metrics/gauge_float64.go delete mode 100644 metrics/gauge_float64_test.go delete mode 100644 metrics/gauge_info.go delete mode 100644 metrics/gauge_info_test.go delete mode 100644 metrics/gauge_test.go delete mode 100644 metrics/graphite.go delete mode 100644 metrics/graphite_test.go delete mode 100644 metrics/healthcheck.go delete mode 100644 metrics/histogram.go delete mode 100644 metrics/histogram_test.go delete mode 100644 metrics/inactive.go delete mode 100644 metrics/init_test.go delete mode 100644 metrics/json.go delete mode 100644 metrics/json_test.go delete mode 100644 metrics/log.go delete mode 100644 metrics/memory.md delete mode 100644 metrics/meter.go delete mode 100644 metrics/meter_test.go delete mode 100644 metrics/metrics.go delete mode 100644 metrics/metrics_test.go delete mode 100644 metrics/opentsdb.go delete mode 100644 metrics/opentsdb_test.go create mode 100644 metrics/prometheus/interfaces.go delete mode 100644 metrics/prometheus/testdata/prometheus.want delete mode 100644 metrics/registry.go delete mode 100644 metrics/registry_test.go delete mode 100644 metrics/resetting_sample.go delete mode 100644 metrics/resetting_timer.go delete mode 100644 metrics/resetting_timer_test.go delete mode 100644 metrics/sample.go delete mode 100644 metrics/sample_test.go delete mode 100644 metrics/syslog.go delete mode 100644 metrics/testdata/opentsb.want delete mode 100644 metrics/timer.go delete mode 100644 metrics/timer_test.go delete mode 100755 metrics/validate.sh delete mode 100644 metrics/writer.go delete mode 100644 metrics/writer_test.go delete mode 100644 miner/ordering_ext.go create mode 100644 plugin/evm/acp176/acp176.go create mode 100644 plugin/evm/acp176/acp176_test.go create mode 100644 plugin/evm/ap4/cost.go create mode 100644 plugin/evm/ap4/cost_test.go delete mode 100644 plugin/evm/client/client_interface_test.go create mode 100644 plugin/evm/header/block_gas_cost.go create mode 100644 plugin/evm/header/block_gas_cost_test.go create mode 100644 plugin/evm/header/extra.go create mode 100644 plugin/evm/header/extra_test.go create mode 100644 plugin/evm/testutils/metrics.go delete mode 100644 plugin/evm/user.go create mode 100644 plugin/evm/validators.go create mode 100644 utils/numbers_test.go diff --git a/README.md b/README.md index 40cd3a8407..08ea17aa62 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Coreth and the C-Chain -[Avalanche](https://docs.avax.network/intro) is a network composed of multiple blockchains. +[Avalanche](https://www.avax.network/) is a network composed of multiple blockchains. Each blockchain is an instance of a Virtual Machine (VM), much like an object in an object-oriented language is an instance of a class. That is, the VM defines the behavior of the blockchain. Coreth (from core Ethereum) is the [Virtual Machine (VM)](https://docs.avax.network/learn/virtual-machines) that defines the Contract Chain (C-Chain). @@ -39,7 +39,7 @@ Full documentation for the C-Chain's API can be found [here.](https://docs.avax. ## Compatibility -The C-Chain is compatible with almost all Ethereum tooling, including [Core,](https://docs.avax.network/build/dapp/launch-dapp#through-core) [Metamask,](https://docs.avax.network/build/dapp/launch-dapp#through-metamask) [Remix](https://docs.avax.network/build/tutorials/smart-contracts/deploy-a-smart-contract-on-avalanche-using-remix-and-metamask) and [Truffle.](https://docs.avax.network/build/tutorials/smart-contracts/using-truffle-with-the-avalanche-c-chain) +The C-Chain is compatible with almost all Ethereum tooling, including [Core,](https://docs.avax.network/build/dapp/launch-dapp#through-core) [Metamask,](https://docs.avax.network/build/dapp/launch-dapp#through-metamask) [Remix](https://docs.avax.network/dapps/smart-contract-dev/deploy-with-remix-ide) and [Truffle.](https://docs.avax.network/build/tutorials/smart-contracts/using-truffle-with-the-avalanche-c-chain) ## Differences Between Avalanche C-Chain and Ethereum diff --git a/RELEASES.md b/RELEASES.md index bce253816e..0cce20cbc4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,6 +1,15 @@ # Release Notes +## Pending Release +- Bump golang version to v1.23.6 +- Bump golangci-lint to v1.63 and add linters + ## [v0.14.1](https://github.com/ava-labs/coreth/releases/tag/v0.14.1) + +- Removed deprecated `ExportKey`, `ExportAVAX`, `Export`, `ImportKey`, `ImportAVAX`, `Import` APIs +- IMPORTANT: `eth_getProof` calls for historical state will be rejected by default. + - On archive nodes (`"pruning-enabled": false`): queries for historical proofs for state older than approximately 24 hours preceding the last accepted block will be rejected by default. This can be adjusted with the new option `historical-proof-query-window` which defines the number of blocks before the last accepted block which should be accepted for state proof queries, or set to `0` to accept any block number state query (previous behavior). + - On `pruning` nodes: queries for proofs past the tip buffer (32 blocks) will be rejected. This is in support of moving to a path based storage scheme, which does not support historical state proofs. - Remove API eth_getAssetBalance that was used to query ANT balances (deprecated since v0.10.0) - Remove legacy gossip handler and metrics (deprecated since v0.10.0) - Refactored trie_prefetcher.go to be structurally similar to [upstream](https://github.com/ethereum/go-ethereum/tree/v1.13.14). diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 822ea8fe22..ae60583bfe 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -10,14 +10,18 @@ import ( "math/big" "time" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/consensus/misc/eip4844" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/trie" + + customheader "github.com/ava-labs/coreth/plugin/evm/header" ) var ( @@ -26,7 +30,6 @@ var ( errInvalidBlockTime = errors.New("timestamp less than parent's") errUnclesUnsupported = errors.New("uncles unsupported") errBlockGasCostNil = errors.New("block gas cost is nil") - errBlockGasCostTooLarge = errors.New("block gas cost is not uint64") errBaseFeeNil = errors.New("base fee is nil") errExtDataGasUsedNil = errors.New("extDataGasUsed is nil") errExtDataGasUsedTooLarge = errors.New("extDataGasUsed is not uint64") @@ -130,41 +133,29 @@ func (eng *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, header } } else { // Verify that the gas limit remains within allowed bounds - diff := int64(parent.GasLimit) - int64(header.GasLimit) - if diff < 0 { - diff *= -1 - } + diff := math.AbsDiff(parent.GasLimit, header.GasLimit) limit := parent.GasLimit / params.GasLimitBoundDivisor - - if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit { + if diff >= limit || header.GasLimit < params.MinGasLimit { return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit) } } - if !configExtra.IsApricotPhase3(header.Time) { - // Verify BaseFee is not present before AP3 - if header.BaseFee != nil { - return fmt.Errorf("invalid baseFee before fork: have %d, want ", header.BaseFee) - } - } else { - // Verify baseFee and rollupWindow encoding as part of header verification - // starting in AP3 - expectedRollupWindowBytes, expectedBaseFee, err := CalcBaseFee(config, parent, header.Time) - if err != nil { - return fmt.Errorf("failed to calculate base fee: %w", err) - } - if len(header.Extra) < len(expectedRollupWindowBytes) || !bytes.Equal(expectedRollupWindowBytes, header.Extra[:len(expectedRollupWindowBytes)]) { - return fmt.Errorf("expected rollup window bytes: %x, found %x", expectedRollupWindowBytes, header.Extra) - } - if header.BaseFee == nil { - return errors.New("expected baseFee to be non-nil") - } - if bfLen := header.BaseFee.BitLen(); bfLen > 256 { - return fmt.Errorf("too large base fee: bitlen %d", bfLen) - } - if header.BaseFee.Cmp(expectedBaseFee) != 0 { - return fmt.Errorf("expected base fee (%d), found (%d)", expectedBaseFee, header.BaseFee) - } + // Verify header.Extra matches the expected value. + expectedExtraPrefix, err := CalcExtraPrefix(config, parent, header.Time) + if err != nil { + return fmt.Errorf("failed to calculate extra prefix: %w", err) + } + if !bytes.HasPrefix(header.Extra, expectedExtraPrefix) { + return fmt.Errorf("expected header.Extra to have prefix: %x, found %x", expectedExtraPrefix, header.Extra) + } + + // Verify header.BaseFee matches the expected value. + expectedBaseFee, err := CalcBaseFee(config, parent, header.Time) + if err != nil { + return fmt.Errorf("failed to calculate base fee: %w", err) + } + if !utils.BigEqual(header.BaseFee, expectedBaseFee) { + return fmt.Errorf("expected base fee %d, found %d", expectedBaseFee, header.BaseFee) } // Verify BlockGasCost, ExtDataGasUsed not present before AP4 @@ -179,27 +170,15 @@ func (eng *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, header } // Enforce BlockGasCost constraints - blockGasCostStep := ApricotPhase4BlockGasCostStep - if configExtra.IsApricotPhase5(header.Time) { - blockGasCostStep = ApricotPhase5BlockGasCostStep - } - expectedBlockGasCost := calcBlockGasCost( - ApricotPhase4TargetBlockRate, - ApricotPhase4MinBlockGasCost, - ApricotPhase4MaxBlockGasCost, - blockGasCostStep, - parent.BlockGasCost, - parent.Time, header.Time, + expectedBlockGasCost := customheader.BlockGasCost( + configExtra, + parent, + header.Time, ) - if header.BlockGasCost == nil { - return errBlockGasCostNil - } - if !header.BlockGasCost.IsUint64() { - return errBlockGasCostTooLarge - } - if header.BlockGasCost.Cmp(expectedBlockGasCost) != 0 { + if !utils.BigEqualUint64(header.BlockGasCost, expectedBlockGasCost) { return fmt.Errorf("invalid block gas cost: have %d, want %d", header.BlockGasCost, expectedBlockGasCost) } + // ExtDataGasUsed correctness is checked during block validation // (when the validator has access to the block contents) if header.ExtDataGasUsed == nil { @@ -213,26 +192,18 @@ func (eng *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, header // modified from consensus.go func (eng *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header, uncle bool) error { - config := chain.Config() - configExtra := params.GetExtra(config) // Ensure that we do not verify an uncle if uncle { return errUnclesUnsupported } - switch { - case configExtra.IsDurango(header.Time): - if len(header.Extra) < params.DynamicFeeExtraDataSize { - return fmt.Errorf("expected extra-data field length >= %d, found %d", params.DynamicFeeExtraDataSize, len(header.Extra)) - } - case configExtra.IsApricotPhase3(header.Time): - if len(header.Extra) != params.DynamicFeeExtraDataSize { - return fmt.Errorf("expected extra-data field to be: %d, but found %d", params.DynamicFeeExtraDataSize, len(header.Extra)) - } - default: - if uint64(len(header.Extra)) > params.MaximumExtraDataSize { - return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) - } + + // Verify the extra data is well-formed. + config := chain.Config() + rules := params.GetExtra(config).GetAvalancheRules(header.Time) + if err := customheader.VerifyExtra(rules, header.Extra); err != nil { + return err } + // Ensure gas-related header fields are correct if err := eng.verifyHeaderGasFields(config, header, parent); err != nil { return err @@ -252,7 +223,7 @@ func (eng *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header * return consensus.ErrInvalidNumber } // Verify the existence / non-existence of excessBlobGas - cancun := chain.Config().IsCancun(header.Number, header.Time) + cancun := config.IsCancun(header.Number, header.Time) if !cancun { switch { case header.ExcessBlobGas != nil: @@ -394,8 +365,10 @@ func (eng *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *types return err } } + configExtra := params.GetExtra(chain.Config()) - if configExtra.IsApricotPhase4(block.Time()) { + timestamp := block.Time() + if configExtra.IsApricotPhase4(timestamp) { // Validate extDataGasUsed and BlockGasCost match expectations // // NOTE: This is a duplicate check of what is already performed in @@ -406,28 +379,22 @@ func (eng *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *types if blockExtDataGasUsed := block.ExtDataGasUsed(); blockExtDataGasUsed == nil || !blockExtDataGasUsed.IsUint64() || blockExtDataGasUsed.Cmp(extDataGasUsed) != 0 { return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", blockExtDataGasUsed, extDataGasUsed) } - blockGasCostStep := ApricotPhase4BlockGasCostStep - if configExtra.IsApricotPhase5(block.Time()) { - blockGasCostStep = ApricotPhase5BlockGasCostStep - } - // Calculate the expected blockGasCost for this block. - // Note: this is a deterministic transition that defines an exact block fee for this block. - blockGasCost := calcBlockGasCost( - ApricotPhase4TargetBlockRate, - ApricotPhase4MinBlockGasCost, - ApricotPhase4MaxBlockGasCost, - blockGasCostStep, - parent.BlockGasCost, - parent.Time, block.Time(), + + // Verify the BlockGasCost set in the header matches the expected value. + blockGasCost := block.BlockGasCost() + expectedBlockGasCost := customheader.BlockGasCost( + configExtra, + parent, + timestamp, ) - // Verify the BlockGasCost set in the header matches the calculated value. - if blockBlockGasCost := block.BlockGasCost(); blockBlockGasCost == nil || !blockBlockGasCost.IsUint64() || blockBlockGasCost.Cmp(blockGasCost) != 0 { - return fmt.Errorf("invalid blockGasCost: have %d, want %d", blockBlockGasCost, blockGasCost) + if !utils.BigEqualUint64(blockGasCost, expectedBlockGasCost) { + return fmt.Errorf("invalid blockGasCost: have %d, want %d", blockGasCost, expectedBlockGasCost) } + // Verify the block fee was paid. if err := eng.verifyBlockFee( block.BaseFee(), - block.BlockGasCost(), + blockGasCost, block.Transactions(), receipts, contribution, @@ -453,25 +420,21 @@ func (eng *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, h return nil, err } } - chainConfigExtra := params.GetExtra(chain.Config()) - if chainConfigExtra.IsApricotPhase4(header.Time) { + + configExtra := params.GetExtra(chain.Config()) + if configExtra.IsApricotPhase4(header.Time) { header.ExtDataGasUsed = extDataGasUsed if header.ExtDataGasUsed == nil { header.ExtDataGasUsed = new(big.Int).Set(common.Big0) } - blockGasCostStep := ApricotPhase4BlockGasCostStep - if chainConfigExtra.IsApricotPhase5(header.Time) { - blockGasCostStep = ApricotPhase5BlockGasCostStep - } // Calculate the required block gas cost for this block. - header.BlockGasCost = calcBlockGasCost( - ApricotPhase4TargetBlockRate, - ApricotPhase4MinBlockGasCost, - ApricotPhase4MaxBlockGasCost, - blockGasCostStep, - parent.BlockGasCost, - parent.Time, header.Time, + blockGasCost := customheader.BlockGasCost( + configExtra, + parent, + header.Time, ) + header.BlockGasCost = new(big.Int).SetUint64(blockGasCost) + // Verify that this block covers the block fee. if err := eng.verifyBlockFee( header.BaseFee, @@ -489,7 +452,7 @@ func (eng *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, h // Header seems complete, assemble into a block and return return types.NewBlockWithExtData( header, txs, uncles, receipts, trie.NewStackTrie(nil), - extraData, chainConfigExtra.IsApricotPhase1(header.Time), + extraData, configExtra.IsApricotPhase1(header.Time), ), nil } diff --git a/consensus/dummy/consensus_test.go b/consensus/dummy/consensus_test.go index 5c7c69cc3a..76b3fb7a5e 100644 --- a/consensus/dummy/consensus_test.go +++ b/consensus/dummy/consensus_test.go @@ -9,24 +9,25 @@ import ( "testing" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/plugin/evm/ap4" + "github.com/ava-labs/coreth/plugin/evm/header" "github.com/ava-labs/libevm/common" ) func TestVerifyBlockFee(t *testing.T) { tests := map[string]struct { - baseFee *big.Int - parentBlockGasCost *big.Int - parentTime, currentTime uint64 - txs []*types.Transaction - receipts []*types.Receipt - extraStateContribution *big.Int - shouldErr bool + baseFee *big.Int + parentBlockGasCost *big.Int + timeElapsed uint64 + txs []*types.Transaction + receipts []*types.Receipt + extraStateContribution *big.Int + shouldErr bool }{ "tx only base fee": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100, big.NewInt(100), nil), }, @@ -39,8 +40,7 @@ func TestVerifyBlockFee(t *testing.T) { "tx covers exactly block fee": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100_000, big.NewInt(200), nil), }, @@ -53,8 +53,7 @@ func TestVerifyBlockFee(t *testing.T) { "txs share block fee": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100_000, big.NewInt(200), nil), types.NewTransaction(1, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100_000, big.NewInt(100), nil), @@ -69,8 +68,7 @@ func TestVerifyBlockFee(t *testing.T) { "txs split block fee": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100_000, big.NewInt(150), nil), types.NewTransaction(1, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100_000, big.NewInt(150), nil), @@ -85,8 +83,7 @@ func TestVerifyBlockFee(t *testing.T) { "split block fee with extra state contribution": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100_000, big.NewInt(150), nil), }, @@ -99,8 +96,7 @@ func TestVerifyBlockFee(t *testing.T) { "extra state contribution insufficient": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: nil, receipts: nil, extraStateContribution: big.NewInt(9_999_999), @@ -109,8 +105,7 @@ func TestVerifyBlockFee(t *testing.T) { "negative extra state contribution": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: nil, receipts: nil, extraStateContribution: big.NewInt(-1), @@ -119,8 +114,7 @@ func TestVerifyBlockFee(t *testing.T) { "extra state contribution covers block fee": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: nil, receipts: nil, extraStateContribution: big.NewInt(10_000_000), @@ -129,8 +123,7 @@ func TestVerifyBlockFee(t *testing.T) { "extra state contribution covers more than block fee": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(0), - parentTime: 10, - currentTime: 10, + timeElapsed: 0, txs: nil, receipts: nil, extraStateContribution: big.NewInt(10_000_001), @@ -139,8 +132,7 @@ func TestVerifyBlockFee(t *testing.T) { "tx only base fee after full time window": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(500_000), - parentTime: 10, - currentTime: 22, // 2s target + 10 + timeElapsed: ap4.TargetBlockRate + 10, txs: []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100, big.NewInt(100), nil), }, @@ -153,8 +145,7 @@ func TestVerifyBlockFee(t *testing.T) { "tx only base fee after large time window": { baseFee: big.NewInt(100), parentBlockGasCost: big.NewInt(100_000), - parentTime: 0, - currentTime: math.MaxUint64, + timeElapsed: math.MaxUint64, txs: []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), big.NewInt(0), 100, big.NewInt(100), nil), }, @@ -164,30 +155,19 @@ func TestVerifyBlockFee(t *testing.T) { extraStateContribution: nil, shouldErr: false, }, - "parent time > current time": { - baseFee: big.NewInt(100), - parentBlockGasCost: big.NewInt(0), - parentTime: 11, - currentTime: 10, - txs: nil, - receipts: nil, - extraStateContribution: big.NewInt(10_000_000), - shouldErr: false, - }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - blockGasCost := calcBlockGasCost( - ApricotPhase4TargetBlockRate, - ApricotPhase4MinBlockGasCost, - ApricotPhase4MaxBlockGasCost, - ApricotPhase4BlockGasCostStep, + blockGasCost := header.BlockGasCostWithStep( test.parentBlockGasCost, - test.parentTime, test.currentTime, + ap4.BlockGasCostStep, + test.timeElapsed, ) + bigBlockGasCost := new(big.Int).SetUint64(blockGasCost) + engine := NewFaker() - if err := engine.verifyBlockFee(test.baseFee, blockGasCost, test.txs, test.receipts, test.extraStateContribution); err != nil { + if err := engine.verifyBlockFee(test.baseFee, bigBlockGasCost, test.txs, test.receipts, test.extraStateContribution); err != nil { if !test.shouldErr { t.Fatalf("Unexpected error: %s", err) } diff --git a/consensus/dummy/dynamic_fee_window.go b/consensus/dummy/dynamic_fee_window.go new file mode 100644 index 0000000000..81d61c5c78 --- /dev/null +++ b/consensus/dummy/dynamic_fee_window.go @@ -0,0 +1,88 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package dummy + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/libevm/common/math" +) + +var ErrDynamicFeeWindowInsufficientLength = errors.New("insufficient length for dynamic fee window") + +// DynamicFeeWindow is a window of the last [params.RollupWindow] seconds of gas +// usage. +// +// Index 0 is the oldest entry, and [params.RollupWindow]-1 is the current +// entry. +type DynamicFeeWindow [params.RollupWindow]uint64 + +func ParseDynamicFeeWindow(bytes []byte) (DynamicFeeWindow, error) { + if len(bytes) < params.DynamicFeeExtraDataSize { + return DynamicFeeWindow{}, fmt.Errorf("%w: expected at least %d bytes but got %d bytes", + ErrDynamicFeeWindowInsufficientLength, + params.DynamicFeeExtraDataSize, + len(bytes), + ) + } + + var window DynamicFeeWindow + for i := range window { + offset := i * wrappers.LongLen + window[i] = binary.BigEndian.Uint64(bytes[offset:]) + } + return window, nil +} + +// Add adds the amounts to the most recent entry in the window. +// +// If the most recent entry overflows, it is set to [math.MaxUint64]. +func (w *DynamicFeeWindow) Add(amounts ...uint64) { + const lastIndex uint = params.RollupWindow - 1 + w[lastIndex] = add(w[lastIndex], amounts...) +} + +// Shift removes the oldest n entries from the window and adds n new empty +// entries. +func (w *DynamicFeeWindow) Shift(n uint64) { + if n >= params.RollupWindow { + *w = DynamicFeeWindow{} + return + } + + var newWindow DynamicFeeWindow + copy(newWindow[:], w[n:]) + *w = newWindow +} + +// Sum returns the sum of all the entries in the window. +// +// If the sum overflows, [math.MaxUint64] is returned. +func (w *DynamicFeeWindow) Sum() uint64 { + return add(0, w[:]...) +} + +func (w *DynamicFeeWindow) Bytes() []byte { + bytes := make([]byte, params.DynamicFeeExtraDataSize) + for i, v := range w { + offset := i * wrappers.LongLen + binary.BigEndian.PutUint64(bytes[offset:], v) + } + return bytes +} + +func add(sum uint64, values ...uint64) uint64 { + var overflow bool + for _, v := range values { + sum, overflow = math.SafeAdd(sum, v) + if overflow { + return math.MaxUint64 + } + } + return sum +} diff --git a/consensus/dummy/dynamic_fee_window_test.go b/consensus/dummy/dynamic_fee_window_test.go new file mode 100644 index 0000000000..b93dce9d64 --- /dev/null +++ b/consensus/dummy/dynamic_fee_window_test.go @@ -0,0 +1,246 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package dummy + +import ( + "strconv" + "testing" + + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/libevm/common/math" + "github.com/stretchr/testify/require" +) + +func TestDynamicFeeWindow_Add(t *testing.T) { + tests := []struct { + name string + window DynamicFeeWindow + amount uint64 + expected DynamicFeeWindow + }{ + { + name: "normal_addition", + window: DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + }, + amount: 5, + expected: DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 15, + }, + }, + { + name: "amount_overflow", + window: DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + }, + amount: math.MaxUint64, + expected: DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, math.MaxUint64, + }, + }, + { + name: "window_overflow", + window: DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, math.MaxUint64, + }, + amount: 5, + expected: DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, math.MaxUint64, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.window.Add(test.amount) + require.Equal(t, test.expected, test.window) + }) + } +} + +func TestDynamicFeeWindow_Shift(t *testing.T) { + window := DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + } + tests := []struct { + n uint64 + expected DynamicFeeWindow + }{ + { + n: 0, + expected: window, + }, + { + n: 1, + expected: DynamicFeeWindow{ + 2, 3, 4, 5, 6, 7, 8, 9, 10, + }, + }, + { + n: 2, + expected: DynamicFeeWindow{ + 3, 4, 5, 6, 7, 8, 9, 10, + }, + }, + { + n: 3, + expected: DynamicFeeWindow{ + 4, 5, 6, 7, 8, 9, 10, + }, + }, + { + n: 4, + expected: DynamicFeeWindow{ + 5, 6, 7, 8, 9, 10, + }, + }, + { + n: 5, + expected: DynamicFeeWindow{ + 6, 7, 8, 9, 10, + }, + }, + { + n: 6, + expected: DynamicFeeWindow{ + 7, 8, 9, 10, + }, + }, + { + n: 7, + expected: DynamicFeeWindow{ + 8, 9, 10, + }, + }, + { + n: 8, + expected: DynamicFeeWindow{ + 9, 10, + }, + }, + { + n: 9, + expected: DynamicFeeWindow{ + 10, + }, + }, + { + n: 10, + expected: DynamicFeeWindow{}, + }, + { + n: 100, + expected: DynamicFeeWindow{}, + }, + } + for _, test := range tests { + t.Run(strconv.FormatUint(test.n, 10), func(t *testing.T) { + window := window + window.Shift(test.n) + require.Equal(t, test.expected, window) + }) + } +} + +func TestDynamicFeeWindow_Sum(t *testing.T) { + tests := []struct { + name string + window DynamicFeeWindow + expected uint64 + }{ + { + name: "empty", + window: DynamicFeeWindow{}, + expected: 0, + }, + { + name: "full", + window: DynamicFeeWindow{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + }, + expected: 55, + }, + { + name: "overflow", + window: DynamicFeeWindow{ + math.MaxUint64, 2, 3, 4, 5, 6, 7, 8, 9, 10, + }, + expected: math.MaxUint64, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.expected, test.window.Sum()) + }) + } +} + +func TestDynamicFeeWindow_Bytes(t *testing.T) { + tests := []struct { + name string + bytes []byte + window DynamicFeeWindow + parseErr error + }{ + { + name: "insufficient_length", + bytes: make([]byte, params.DynamicFeeExtraDataSize-1), + parseErr: ErrDynamicFeeWindowInsufficientLength, + }, + { + name: "zero_window", + bytes: make([]byte, params.DynamicFeeExtraDataSize), + window: DynamicFeeWindow{}, + }, + { + name: "truncate_bytes", + bytes: []byte{ + params.DynamicFeeExtraDataSize: 1, + }, + window: DynamicFeeWindow{}, + }, + { + name: "endianess", + bytes: []byte{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + }, + window: DynamicFeeWindow{ + 0x0102030405060708, + 0x1112131415161718, + 0x2122232425262728, + 0x3132333435363738, + 0x4142434445464748, + 0x5152535455565758, + 0x6162636465666768, + 0x7172737475767778, + 0x8182838485868788, + 0x9192939495969798, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + window, err := ParseDynamicFeeWindow(test.bytes) + require.Equal(test.window, window) + require.ErrorIs(err, test.parseErr) + if test.parseErr != nil { + return + } + + expectedBytes := test.bytes[:params.DynamicFeeExtraDataSize] + bytes := window.Bytes() + require.Equal(expectedBytes, bytes) + }) + } +} diff --git a/consensus/dummy/dynamic_fees.go b/consensus/dummy/dynamic_fees.go index 0d3355a97f..0b066203c4 100644 --- a/consensus/dummy/dynamic_fees.go +++ b/consensus/dummy/dynamic_fees.go @@ -4,150 +4,112 @@ package dummy import ( - "encoding/binary" + "errors" "fmt" "math/big" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/params/extras" + "github.com/ava-labs/coreth/plugin/evm/ap4" + "github.com/ava-labs/coreth/plugin/evm/header" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/math" ) +const ApricotPhase3BlockGasFee = 1_000_000 + var ( + MaxUint256Plus1 = new(big.Int).Lsh(common.Big1, 256) + MaxUint256 = new(big.Int).Sub(MaxUint256Plus1, common.Big1) + ApricotPhase3MinBaseFee = big.NewInt(params.ApricotPhase3MinBaseFee) ApricotPhase3MaxBaseFee = big.NewInt(params.ApricotPhase3MaxBaseFee) - ApricotPhase4MinBaseFee = big.NewInt(params.ApricotPhase4MinBaseFee) - ApricotPhase4MaxBaseFee = big.NewInt(params.ApricotPhase4MaxBaseFee) + ApricotPhase4MinBaseFee = big.NewInt(ap4.MinBaseFee) + ApricotPhase4MaxBaseFee = big.NewInt(ap4.MaxBaseFee) ApricotPhase3InitialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) EtnaMinBaseFee = big.NewInt(params.EtnaMinBaseFee) - ApricotPhase4BaseFeeChangeDenominator = new(big.Int).SetUint64(params.ApricotPhase4BaseFeeChangeDenominator) + ApricotPhase3BaseFeeChangeDenominator = new(big.Int).SetUint64(params.ApricotPhase3BaseFeeChangeDenominator) ApricotPhase5BaseFeeChangeDenominator = new(big.Int).SetUint64(params.ApricotPhase5BaseFeeChangeDenominator) - ApricotPhase3BlockGasFee uint64 = 1_000_000 - ApricotPhase4MinBlockGasCost = new(big.Int).Set(common.Big0) - ApricotPhase4MaxBlockGasCost = big.NewInt(1_000_000) - ApricotPhase4BlockGasCostStep = big.NewInt(50_000) - ApricotPhase4TargetBlockRate uint64 = 2 // in seconds - ApricotPhase5BlockGasCostStep = big.NewInt(200_000) - rollupWindow uint64 = 10 + errEstimateBaseFeeWithoutActivation = errors.New("cannot estimate base fee for chain without apricot phase 3 scheduled") ) -// CalcBaseFee takes the previous header and the timestamp of its child block -// and calculates the expected base fee as well as the encoding of the past -// pricing information for the child block. -// CalcBaseFee should only be called if [timestamp] >= [config.ApricotPhase3Timestamp] -func CalcBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uint64) ([]byte, *big.Int, error) { - // If the current block is the first EIP-1559 block, or it is the genesis block - // return the initial slice and initial base fee. - var ( - configExtra = params.GetExtra(config) - isApricotPhase3 = configExtra.IsApricotPhase3(parent.Time) - isApricotPhase4 = configExtra.IsApricotPhase4(parent.Time) - isApricotPhase5 = configExtra.IsApricotPhase5(parent.Time) - isEtna = configExtra.IsEtna(parent.Time) - ) - if !isApricotPhase3 || parent.Number.Cmp(common.Big0) == 0 { - initialSlice := make([]byte, params.DynamicFeeExtraDataSize) - initialBaseFee := big.NewInt(params.ApricotPhase3InitialBaseFee) - return initialSlice, initialBaseFee, nil +// CalcExtraPrefix takes the previous header and the timestamp of its child +// block and calculates the expected extra prefix for the child block. +func CalcExtraPrefix( + config *params.ChainConfig, + parent *types.Header, + timestamp uint64, +) ([]byte, error) { + configExtra := params.GetExtra(config) + switch { + case configExtra.IsApricotPhase3(timestamp): + window, err := calcFeeWindow(configExtra, parent, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to calculate fee window: %w", err) + } + return window.Bytes(), nil + default: + // Prior to AP3 there was no expected extra prefix. + return nil, nil } +} - if uint64(len(parent.Extra)) < params.DynamicFeeExtraDataSize { - return nil, nil, fmt.Errorf("expected length of parent extra data to be %d, but found %d", params.DynamicFeeExtraDataSize, len(parent.Extra)) +// CalcBaseFee takes the previous header and the timestamp of its child block +// and calculates the expected base fee for the child block. +// +// Prior to AP3, the returned base fee will be nil. +func CalcBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uint64) (*big.Int, error) { + configExtra := params.GetExtra(config) + switch { + case configExtra.IsApricotPhase3(timestamp): + return calcBaseFeeWithWindow(configExtra, parent, timestamp) + default: + // Prior to AP3 the expected base fee is nil. + return nil, nil } - dynamicFeeWindow := parent.Extra[:params.DynamicFeeExtraDataSize] +} - if timestamp < parent.Time { - return nil, nil, fmt.Errorf("cannot calculate base fee for timestamp (%d) prior to parent timestamp (%d)", timestamp, parent.Time) +// calcBaseFeeWithWindow should only be called if `timestamp` >= `config.ApricotPhase3Timestamp` +func calcBaseFeeWithWindow(config *extras.ChainConfig, parent *types.Header, timestamp uint64) (*big.Int, error) { + // If the current block is the first EIP-1559 block, or it is the genesis block + // return the initial slice and initial base fee. + if !config.IsApricotPhase3(parent.Time) || parent.Number.Cmp(common.Big0) == 0 { + return big.NewInt(params.ApricotPhase3InitialBaseFee), nil } - roll := timestamp - parent.Time - // roll the window over by the difference between the timestamps to generate - // the new rollup window. - newRollupWindow, err := rollLongWindow(dynamicFeeWindow, int(roll)) + dynamicFeeWindow, err := calcFeeWindow(config, parent, timestamp) if err != nil { - return nil, nil, err + return nil, err } - // If AP5, use a less responsive [BaseFeeChangeDenominator] and a higher gas + // If AP5, use a less responsive BaseFeeChangeDenominator and a higher gas // block limit var ( - baseFee = new(big.Int).Set(parent.BaseFee) - baseFeeChangeDenominator = ApricotPhase4BaseFeeChangeDenominator - parentGasTarget = params.ApricotPhase3TargetGas + isApricotPhase5 = config.IsApricotPhase5(parent.Time) + baseFeeChangeDenominator = ApricotPhase3BaseFeeChangeDenominator + parentGasTarget uint64 = params.ApricotPhase3TargetGas ) if isApricotPhase5 { baseFeeChangeDenominator = ApricotPhase5BaseFeeChangeDenominator parentGasTarget = params.ApricotPhase5TargetGas } - parentGasTargetBig := new(big.Int).SetUint64(parentGasTarget) - - // Add in the gas used by the parent block in the correct place - // If the parent consumed gas within the rollup window, add the consumed - // gas in. - if roll < rollupWindow { - var blockGasCost, parentExtraStateGasUsed uint64 - switch { - case isApricotPhase5: - // [blockGasCost] has been removed in AP5, so it is left as 0. - - // At the start of a new network, the parent - // may not have a populated [ExtDataGasUsed]. - if parent.ExtDataGasUsed != nil { - parentExtraStateGasUsed = parent.ExtDataGasUsed.Uint64() - } - case isApricotPhase4: - // The [blockGasCost] is paid by the effective tips in the block using - // the block's value of [baseFee]. - blockGasCost = calcBlockGasCost( - ApricotPhase4TargetBlockRate, - ApricotPhase4MinBlockGasCost, - ApricotPhase4MaxBlockGasCost, - ApricotPhase4BlockGasCostStep, - parent.BlockGasCost, - parent.Time, timestamp, - ).Uint64() - - // On the boundary of AP3 and AP4 or at the start of a new network, the parent - // may not have a populated [ExtDataGasUsed]. - if parent.ExtDataGasUsed != nil { - parentExtraStateGasUsed = parent.ExtDataGasUsed.Uint64() - } - default: - blockGasCost = ApricotPhase3BlockGasFee - } - - // Compute the new state of the gas rolling window. - addedGas, overflow := math.SafeAdd(parent.GasUsed, parentExtraStateGasUsed) - if overflow { - addedGas = math.MaxUint64 - } - - // Only add the [blockGasCost] to the gas used if it isn't AP5 - if !isApricotPhase5 { - addedGas, overflow = math.SafeAdd(addedGas, blockGasCost) - if overflow { - addedGas = math.MaxUint64 - } - } - - slot := rollupWindow - 1 - roll - start := slot * wrappers.LongLen - updateLongWindow(newRollupWindow, start, addedGas) - } // Calculate the amount of gas consumed within the rollup window. - totalGas := sumLongWindow(newRollupWindow, int(rollupWindow)) - + var ( + baseFee = new(big.Int).Set(parent.BaseFee) + totalGas = dynamicFeeWindow.Sum() + ) if totalGas == parentGasTarget { - return newRollupWindow, baseFee, nil + return baseFee, nil } - num := new(big.Int) - + var ( + num = new(big.Int) + parentGasTargetBig = new(big.Int).SetUint64(parentGasTarget) + ) if totalGas > parentGasTarget { // If the parent block used more gas than its target, the baseFee should increase. num.SetUint64(totalGas - parentGasTarget) @@ -164,41 +126,139 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uin num.Div(num, parentGasTargetBig) num.Div(num, baseFeeChangeDenominator) baseFeeDelta := math.BigMax(num, common.Big1) - // If [roll] is greater than [rollupWindow], apply the state transition to the base fee to account - // for the interval during which no blocks were produced. - // We use roll/rollupWindow, so that the transition is applied for every [rollupWindow] seconds - // that has elapsed between the parent and this block. - if roll > rollupWindow { - // Note: roll/rollupWindow must be greater than 1 since we've checked that roll > rollupWindow - baseFeeDelta = new(big.Int).Mul(baseFeeDelta, new(big.Int).SetUint64(roll/rollupWindow)) + + if timestamp < parent.Time { + // This should never happen as the fee window calculations should + // have already failed, but it is kept for clarity. + return nil, fmt.Errorf("cannot calculate base fee for timestamp %d prior to parent timestamp %d", + timestamp, + parent.Time, + ) + } + + // If timeElapsed is greater than [params.RollupWindow], apply the + // state transition to the base fee to account for the interval during + // which no blocks were produced. + // + // We use timeElapsed/params.RollupWindow, so that the transition is + // applied for every [params.RollupWindow] seconds that has elapsed + // between the parent and this block. + var ( + timeElapsed = timestamp - parent.Time + windowsElapsed = timeElapsed / params.RollupWindow + ) + if windowsElapsed > 1 { + bigWindowsElapsed := new(big.Int).SetUint64(windowsElapsed) + // Because baseFeeDelta could actually be [common.Big1], we must not + // modify the existing value of `baseFeeDelta` but instead allocate + // a new one. + baseFeeDelta = new(big.Int).Mul(baseFeeDelta, bigWindowsElapsed) } baseFee.Sub(baseFee, baseFeeDelta) } // Ensure that the base fee does not increase/decrease outside of the bounds switch { - case isEtna: - baseFee = selectBigWithinBounds(EtnaMinBaseFee, baseFee, nil) + case config.IsEtna(parent.Time): + baseFee = selectBigWithinBounds(EtnaMinBaseFee, baseFee, MaxUint256) case isApricotPhase5: - baseFee = selectBigWithinBounds(ApricotPhase4MinBaseFee, baseFee, nil) - case isApricotPhase4: + baseFee = selectBigWithinBounds(ApricotPhase4MinBaseFee, baseFee, MaxUint256) + case config.IsApricotPhase4(parent.Time): baseFee = selectBigWithinBounds(ApricotPhase4MinBaseFee, baseFee, ApricotPhase4MaxBaseFee) default: baseFee = selectBigWithinBounds(ApricotPhase3MinBaseFee, baseFee, ApricotPhase3MaxBaseFee) } - return newRollupWindow, baseFee, nil + return baseFee, nil } -// EstimateNextBaseFee attempts to estimate the next base fee based on a block with [parent] being built at -// [timestamp]. -// If [timestamp] is less than the timestamp of [parent], then it uses the same timestamp as parent. -// Warning: This function should only be used in estimation and should not be used when calculating the canonical -// base fee for a subsequent block. -func EstimateNextBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uint64) ([]byte, *big.Int, error) { +// calcFeeWindow takes the previous header and the timestamp of its child block +// and calculates the expected fee window. +// +// calcFeeWindow should only be called if timestamp >= config.ApricotPhase3Timestamp +func calcFeeWindow( + config *extras.ChainConfig, + parent *types.Header, + timestamp uint64, +) (DynamicFeeWindow, error) { + // If the current block is the first EIP-1559 block, or it is the genesis block + // return the initial window. + if !config.IsApricotPhase3(parent.Time) || parent.Number.Cmp(common.Big0) == 0 { + return DynamicFeeWindow{}, nil + } + + dynamicFeeWindow, err := ParseDynamicFeeWindow(parent.Extra) + if err != nil { + return DynamicFeeWindow{}, err + } + if timestamp < parent.Time { - timestamp = parent.Time + return DynamicFeeWindow{}, fmt.Errorf("cannot calculate fee window for timestamp %d prior to parent timestamp %d", + timestamp, + parent.Time, + ) } + timeElapsed := timestamp - parent.Time + + // Add in parent's consumed gas + var blockGasCost, parentExtraStateGasUsed uint64 + switch { + case config.IsApricotPhase5(parent.Time): + // blockGasCost is not included in the fee window after AP5, so it is + // left as 0. + + // At the start of a new network, the parent + // may not have a populated ExtDataGasUsed. + if parent.ExtDataGasUsed != nil { + parentExtraStateGasUsed = parent.ExtDataGasUsed.Uint64() + } + case config.IsApricotPhase4(parent.Time): + // The blockGasCost is paid by the effective tips in the block using + // the block's value of baseFee. + // + // Although the child block may be in AP5 here, the blockGasCost is + // still calculated using the AP4 step. This is different than the + // actual BlockGasCost calculation used for the child block. This + // behavior is kept to preserve the original behavior of this function. + blockGasCost = header.BlockGasCostWithStep( + parent.BlockGasCost, + ap4.BlockGasCostStep, + timeElapsed, + ) + + // On the boundary of AP3 and AP4 or at the start of a new network, the + // parent may not have a populated ExtDataGasUsed. + if parent.ExtDataGasUsed != nil { + parentExtraStateGasUsed = parent.ExtDataGasUsed.Uint64() + } + default: + blockGasCost = ApricotPhase3BlockGasFee + } + + // Compute the new state of the gas rolling window. + dynamicFeeWindow.Add(parent.GasUsed, parentExtraStateGasUsed, blockGasCost) + + // roll the window over by the timeElapsed to generate the new rollup + // window. + dynamicFeeWindow.Shift(timeElapsed) + return dynamicFeeWindow, nil +} + +// EstimateNextBaseFee attempts to estimate the base fee of a block built at +// `timestamp` on top of `parent`. +// +// If timestamp is before parent.Time or the AP3 activation time, then timestamp +// is set to the maximum of parent.Time and the AP3 activation time. +// +// Warning: This function should only be used in estimation and should not be +// used when calculating the canonical base fee for a block. +func EstimateNextBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uint64) (*big.Int, error) { + configExtra := params.GetExtra(config) + if configExtra.ApricotPhase3BlockTimestamp == nil { + return nil, errEstimateBaseFeeWithoutActivation + } + + timestamp = max(timestamp, parent.Time, *configExtra.ApricotPhase3BlockTimestamp) return CalcBaseFee(config, parent, timestamp) } @@ -216,113 +276,6 @@ func selectBigWithinBounds(lowerBound, value, upperBound *big.Int) *big.Int { } } -// rollWindow rolls the longs within [consumptionWindow] over by [roll] places. -// For example, if there are 4 longs encoded in a 32 byte slice, rollWindow would -// have the following effect: -// Original: -// [1, 2, 3, 4] -// Roll = 0 -// [1, 2, 3, 4] -// Roll = 1 -// [2, 3, 4, 0] -// Roll = 2 -// [3, 4, 0, 0] -// Roll = 3 -// [4, 0, 0, 0] -// Roll >= 4 -// [0, 0, 0, 0] -// Assumes that [roll] is greater than or equal to 0 -func rollWindow(consumptionWindow []byte, size, roll int) ([]byte, error) { - if len(consumptionWindow)%size != 0 { - return nil, fmt.Errorf("expected consumption window length (%d) to be a multiple of size (%d)", len(consumptionWindow), size) - } - - // Note: make allocates a zeroed array, so we are guaranteed - // that what we do not copy into, will be set to 0 - res := make([]byte, len(consumptionWindow)) - bound := roll * size - if bound > len(consumptionWindow) { - return res, nil - } - copy(res[:], consumptionWindow[roll*size:]) - return res, nil -} - -func rollLongWindow(consumptionWindow []byte, roll int) ([]byte, error) { - // Passes in [wrappers.LongLen] as the size of the individual value to be rolled over - // so that it can be used to roll an array of long values. - return rollWindow(consumptionWindow, wrappers.LongLen, roll) -} - -// sumLongWindow sums [numLongs] encoded in [window]. Assumes that the length of [window] -// is sufficient to contain [numLongs] or else this function panics. -// If an overflow occurs, while summing the contents, the maximum uint64 value is returned. -func sumLongWindow(window []byte, numLongs int) uint64 { - var ( - sum uint64 = 0 - overflow bool - ) - for i := 0; i < numLongs; i++ { - // If an overflow occurs while summing the elements of the window, return the maximum - // uint64 value immediately. - sum, overflow = math.SafeAdd(sum, binary.BigEndian.Uint64(window[wrappers.LongLen*i:])) - if overflow { - return math.MaxUint64 - } - } - return sum -} - -// updateLongWindow adds [gasConsumed] in at index within [window]. -// Assumes that [index] has already been validated. -// If an overflow occurs, the maximum uint64 value is used. -func updateLongWindow(window []byte, start uint64, gasConsumed uint64) { - prevGasConsumed := binary.BigEndian.Uint64(window[start:]) - - totalGasConsumed, overflow := math.SafeAdd(prevGasConsumed, gasConsumed) - if overflow { - totalGasConsumed = math.MaxUint64 - } - binary.BigEndian.PutUint64(window[start:], totalGasConsumed) -} - -// calcBlockGasCost calculates the required block gas cost. If [parentTime] -// > [currentTime], the timeElapsed will be treated as 0. -func calcBlockGasCost( - targetBlockRate uint64, - minBlockGasCost *big.Int, - maxBlockGasCost *big.Int, - blockGasCostStep *big.Int, - parentBlockGasCost *big.Int, - parentTime, currentTime uint64, -) *big.Int { - // Handle AP3/AP4 boundary by returning the minimum value as the boundary. - if parentBlockGasCost == nil { - return new(big.Int).Set(minBlockGasCost) - } - - // Treat an invalid parent/current time combination as 0 elapsed time. - var timeElapsed uint64 - if parentTime <= currentTime { - timeElapsed = currentTime - parentTime - } - - var blockGasCost *big.Int - if timeElapsed < targetBlockRate { - blockGasCostDelta := new(big.Int).Mul(blockGasCostStep, new(big.Int).SetUint64(targetBlockRate-timeElapsed)) - blockGasCost = new(big.Int).Add(parentBlockGasCost, blockGasCostDelta) - } else { - blockGasCostDelta := new(big.Int).Mul(blockGasCostStep, new(big.Int).SetUint64(timeElapsed-targetBlockRate)) - blockGasCost = new(big.Int).Sub(parentBlockGasCost, blockGasCostDelta) - } - - blockGasCost = selectBigWithinBounds(minBlockGasCost, blockGasCost, maxBlockGasCost) - if !blockGasCost.IsUint64() { - blockGasCost = new(big.Int).SetUint64(math.MaxUint64) - } - return blockGasCost -} - // MinRequiredTip is the estimated minimum tip a transaction would have // needed to pay to be included in a given block (assuming it paid a tip // proportional to its gas usage). In reality, there is no minimum tip that diff --git a/consensus/dummy/dynamic_fees_test.go b/consensus/dummy/dynamic_fees_test.go index 69f148eea4..59021e69cc 100644 --- a/consensus/dummy/dynamic_fees_test.go +++ b/consensus/dummy/dynamic_fees_test.go @@ -4,12 +4,13 @@ package dummy import ( - "encoding/binary" "math/big" "testing" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/params/extras" + "github.com/ava-labs/coreth/plugin/evm/ap4" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/math" "github.com/ava-labs/libevm/log" @@ -17,83 +18,6 @@ import ( "github.com/stretchr/testify/require" ) -func testRollup(t *testing.T, longs []uint64, roll int) { - slice := make([]byte, len(longs)*8) - numLongs := len(longs) - for i := 0; i < numLongs; i++ { - binary.BigEndian.PutUint64(slice[8*i:], longs[i]) - } - - newSlice, err := rollLongWindow(slice, roll) - if err != nil { - t.Fatal(err) - } - // numCopies is the number of longs that should have been copied over from the previous - // slice as opposed to being left empty. - numCopies := numLongs - roll - for i := 0; i < numLongs; i++ { - // Extract the long value that is encoded at position [i] in [newSlice] - num := binary.BigEndian.Uint64(newSlice[8*i:]) - // If the current index is past the point where we should have copied the value - // over from the previous slice, assert that the value encoded in [newSlice] - // is 0 - if i >= numCopies { - if num != 0 { - t.Errorf("Expected num encoded in newSlice at position %d to be 0, but found %d", i, num) - } - } else { - // Otherwise, check that the value was copied over correctly - prevIndex := i + roll - prevNum := longs[prevIndex] - if prevNum != num { - t.Errorf("Expected num encoded in new slice at position %d to be %d, but found %d", i, prevNum, num) - } - } - } -} - -func TestRollupWindow(t *testing.T) { - type test struct { - longs []uint64 - roll int - } - - var tests []test = []test{ - { - []uint64{1, 2, 3, 4}, - 0, - }, - { - []uint64{1, 2, 3, 4}, - 1, - }, - { - []uint64{1, 2, 3, 4}, - 2, - }, - { - []uint64{1, 2, 3, 4}, - 3, - }, - { - []uint64{1, 2, 3, 4}, - 4, - }, - { - []uint64{1, 2, 3, 4}, - 5, - }, - { - []uint64{121, 232, 432}, - 2, - }, - } - - for _, test := range tests { - testRollup(t, test.longs, test.roll) - } -} - type blockDefinition struct { timestamp uint64 gasUsed uint64 @@ -206,7 +130,11 @@ func testDynamicFeesStaysWithinRange(t *testing.T, test test) { } for index, block := range blocks[1:] { - nextExtraData, nextBaseFee, err := CalcBaseFee(params.TestApricotPhase3Config, header, block.timestamp) + nextExtraData, err := CalcExtraPrefix(params.TestApricotPhase3Config, header, block.timestamp) + if err != nil { + t.Fatalf("Failed to calculate extra prefix at index %d: %s", index, err) + } + nextBaseFee, err := CalcBaseFee(params.TestApricotPhase3Config, header, block.timestamp) if err != nil { t.Fatalf("Failed to calculate base fee at index %d: %s", index, err) } @@ -227,53 +155,6 @@ func testDynamicFeesStaysWithinRange(t *testing.T, test test) { } } -func TestLongWindow(t *testing.T) { - longs := []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} - sumLongs := uint64(0) - longWindow := make([]byte, 10*8) - for i, long := range longs { - sumLongs = sumLongs + long - binary.BigEndian.PutUint64(longWindow[i*8:], long) - } - - sum := sumLongWindow(longWindow, 10) - if sum != sumLongs { - t.Fatalf("Expected sum to be %d but found %d", sumLongs, sum) - } - - for i := uint64(0); i < 10; i++ { - updateLongWindow(longWindow, i*8, i) - sum = sumLongWindow(longWindow, 10) - sumLongs += i - - if sum != sumLongs { - t.Fatalf("Expected sum to be %d but found %d (iteration: %d)", sumLongs, sum, i) - } - } -} - -func TestLongWindowOverflow(t *testing.T) { - longs := []uint64{0, 0, 0, 0, 0, 0, 0, 0, 2, math.MaxUint64 - 1} - longWindow := make([]byte, 10*8) - for i, long := range longs { - binary.BigEndian.PutUint64(longWindow[i*8:], long) - } - - sum := sumLongWindow(longWindow, 10) - if sum != math.MaxUint64 { - t.Fatalf("Expected sum to be maxUint64 (%d), but found %d", uint64(math.MaxUint64), sum) - } - - for i := uint64(0); i < 10; i++ { - updateLongWindow(longWindow, i*8, i) - sum = sumLongWindow(longWindow, 10) - - if sum != math.MaxUint64 { - t.Fatalf("Expected sum to be maxUint64 (%d), but found %d", uint64(math.MaxUint64), sum) - } - } -} - func TestSelectBigWithinBounds(t *testing.T) { type test struct { lower, value, upper, expected *big.Int @@ -411,7 +292,9 @@ func TestCalcBaseFeeAP4(t *testing.T) { for index, event := range events { block := event.block - nextExtraData, nextBaseFee, err := CalcBaseFee(params.TestApricotPhase4Config, header, block.timestamp) + nextExtraData, err := CalcExtraPrefix(params.TestApricotPhase4Config, header, block.timestamp) + assert.NoError(t, err) + nextBaseFee, err := CalcBaseFee(params.TestApricotPhase4Config, header, block.timestamp) assert.NoError(t, err) log.Info("Update", "baseFee", nextBaseFee) header = &types.Header{ @@ -422,7 +305,9 @@ func TestCalcBaseFeeAP4(t *testing.T) { Extra: nextExtraData, } - nextExtraData, nextBaseFee, err = CalcBaseFee(params.TestApricotPhase4Config, extDataHeader, block.timestamp) + nextExtraData, err = CalcExtraPrefix(params.TestApricotPhase4Config, extDataHeader, block.timestamp) + assert.NoError(t, err) + nextBaseFee, err = CalcBaseFee(params.TestApricotPhase4Config, extDataHeader, block.timestamp) assert.NoError(t, err) log.Info("Update", "baseFee (w/extData)", nextBaseFee) extDataHeader = &types.Header{ @@ -438,108 +323,6 @@ func TestCalcBaseFeeAP4(t *testing.T) { } } -func TestCalcBlockGasCost(t *testing.T) { - tests := map[string]struct { - parentBlockGasCost *big.Int - parentTime, currentTime uint64 - - expected *big.Int - }{ - "Nil parentBlockGasCost": { - parentBlockGasCost: nil, - parentTime: 1, - currentTime: 1, - expected: ApricotPhase4MinBlockGasCost, - }, - "Same timestamp from 0": { - parentBlockGasCost: big.NewInt(0), - parentTime: 1, - currentTime: 1, - expected: big.NewInt(100_000), - }, - "1s from 0": { - parentBlockGasCost: big.NewInt(0), - parentTime: 1, - currentTime: 2, - expected: big.NewInt(50_000), - }, - "Same timestamp from non-zero": { - parentBlockGasCost: big.NewInt(50_000), - parentTime: 1, - currentTime: 1, - expected: big.NewInt(150_000), - }, - "0s Difference (MAX)": { - parentBlockGasCost: big.NewInt(1_000_000), - parentTime: 1, - currentTime: 1, - expected: big.NewInt(1_000_000), - }, - "1s Difference (MAX)": { - parentBlockGasCost: big.NewInt(1_000_000), - parentTime: 1, - currentTime: 2, - expected: big.NewInt(1_000_000), - }, - "2s Difference": { - parentBlockGasCost: big.NewInt(900_000), - parentTime: 1, - currentTime: 3, - expected: big.NewInt(900_000), - }, - "3s Difference": { - parentBlockGasCost: big.NewInt(1_000_000), - parentTime: 1, - currentTime: 4, - expected: big.NewInt(950_000), - }, - "10s Difference": { - parentBlockGasCost: big.NewInt(1_000_000), - parentTime: 1, - currentTime: 11, - expected: big.NewInt(600_000), - }, - "20s Difference": { - parentBlockGasCost: big.NewInt(1_000_000), - parentTime: 1, - currentTime: 21, - expected: big.NewInt(100_000), - }, - "22s Difference": { - parentBlockGasCost: big.NewInt(1_000_000), - parentTime: 1, - currentTime: 23, - expected: big.NewInt(0), - }, - "23s Difference": { - parentBlockGasCost: big.NewInt(1_000_000), - parentTime: 1, - currentTime: 24, - expected: big.NewInt(0), - }, - "-1s Difference": { - parentBlockGasCost: big.NewInt(50_000), - parentTime: 1, - currentTime: 0, - expected: big.NewInt(150_000), - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - assert.Zero(t, test.expected.Cmp(calcBlockGasCost( - ApricotPhase4TargetBlockRate, - ApricotPhase4MinBlockGasCost, - ApricotPhase4MaxBlockGasCost, - ApricotPhase4BlockGasCostStep, - test.parentBlockGasCost, - test.parentTime, - test.currentTime, - ))) - }) - } -} - func TestDynamicFeesEtna(t *testing.T) { require := require.New(t) header := &types.Header{ @@ -547,7 +330,9 @@ func TestDynamicFeesEtna(t *testing.T) { } timestamp := uint64(1) - extra, nextBaseFee, err := CalcBaseFee(params.TestEtnaChainConfig, header, timestamp) + extra, err := CalcExtraPrefix(params.TestEtnaChainConfig, header, timestamp) + require.NoError(err) + nextBaseFee, err := CalcBaseFee(params.TestEtnaChainConfig, header, timestamp) require.NoError(err) // Genesis matches the initial base fee require.Equal(params.ApricotPhase3InitialBaseFee, nextBaseFee.Int64()) @@ -559,11 +344,11 @@ func TestDynamicFeesEtna(t *testing.T) { BaseFee: nextBaseFee, Extra: extra, } - _, nextBaseFee, err = CalcBaseFee(params.TestEtnaChainConfig, header, timestamp) + nextBaseFee, err = CalcBaseFee(params.TestEtnaChainConfig, header, timestamp) require.NoError(err) // After some time has passed in the Etna phase, the base fee should drop // lower than the prior base fee minimum. - require.Less(nextBaseFee.Int64(), params.ApricotPhase4MinBaseFee) + require.Less(nextBaseFee.Int64(), int64(ap4.MinBaseFee)) } func TestCalcBaseFeeRegression(t *testing.T) { @@ -578,7 +363,77 @@ func TestCalcBaseFeeRegression(t *testing.T) { Extra: make([]byte, params.DynamicFeeExtraDataSize), } - _, _, err := CalcBaseFee(params.TestChainConfig, parentHeader, timestamp) + _, err := CalcBaseFee(params.TestChainConfig, parentHeader, timestamp) require.NoError(t, err) require.Equalf(t, 0, common.Big1.Cmp(big.NewInt(1)), "big1 should be 1, got %s", common.Big1) } + +func TestEstimateNextBaseFee(t *testing.T) { + tests := []struct { + name string + + upgrades extras.NetworkUpgrades + + parentTime uint64 + parentNumber int64 + parentExtra []byte + parentBaseFee *big.Int + parentGasUsed uint64 + parentExtDataGasUsed *big.Int + + timestamp uint64 + + want *big.Int + wantErr error + }{ + { + name: "ap3", + upgrades: params.GetExtra(params.TestApricotPhase3Config).NetworkUpgrades, + parentNumber: 1, + parentExtra: (&DynamicFeeWindow{}).Bytes(), + parentBaseFee: big.NewInt(params.ApricotPhase3MaxBaseFee), + timestamp: 1, + want: func() *big.Int { + const ( + gasTarget = params.ApricotPhase3TargetGas + gasUsed = ApricotPhase3BlockGasFee + amountUnderTarget = gasTarget - gasUsed + parentBaseFee = params.ApricotPhase3MaxBaseFee + smoothingFactor = params.ApricotPhase3BaseFeeChangeDenominator + baseFeeFractionUnderTarget = amountUnderTarget * parentBaseFee / gasTarget + delta = baseFeeFractionUnderTarget / smoothingFactor + baseFee = parentBaseFee - delta + ) + return big.NewInt(baseFee) + }(), + }, + { + name: "ap3_not_scheduled", + upgrades: params.GetExtra(params.TestApricotPhase2Config).NetworkUpgrades, + wantErr: errEstimateBaseFeeWithoutActivation, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + config := params.WithExtra( + ¶ms.ChainConfig{}, + &extras.ChainConfig{ + NetworkUpgrades: test.upgrades, + }) + parentHeader := &types.Header{ + Time: test.parentTime, + Number: big.NewInt(test.parentNumber), + Extra: test.parentExtra, + BaseFee: test.parentBaseFee, + GasUsed: test.parentGasUsed, + ExtDataGasUsed: test.parentExtDataGasUsed, + } + + got, err := EstimateNextBaseFee(config, parentHeader, test.timestamp) + require.ErrorIs(err, test.wantErr) + require.Equal(test.want, got) + }) + } +} diff --git a/core/blockchain.go b/core/blockchain.go index 6998fbb57a..6b2b7d4cf5 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -46,7 +46,6 @@ import ( "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/internal/version" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ava-labs/coreth/triedb/pathdb" @@ -56,6 +55,7 @@ import ( "github.com/ava-labs/libevm/ethdb" "github.com/ava-labs/libevm/event" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/trie" "github.com/ava-labs/libevm/triedb" ) diff --git a/core/chain_makers.go b/core/chain_makers.go index 9a2d833cd7..6e3ba925f1 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -392,13 +392,17 @@ func (cm *chainMaker) makeHeader(parent *types.Block, gap uint64, state *state.S Number: new(big.Int).Add(parent.Number(), common.Big1), Time: time, } - if configExtra.IsApricotPhase3(time) { - var err error - header.Extra, header.BaseFee, err = dummy.CalcBaseFee(cm.config, parent.Header(), time) - if err != nil { - panic(err) - } + + var err error + header.Extra, err = dummy.CalcExtraPrefix(cm.config, parent.Header(), time) + if err != nil { + panic(err) } + header.BaseFee, err = dummy.CalcBaseFee(cm.config, parent.Header(), time) + if err != nil { + panic(err) + } + if cm.config.IsCancun(header.Number, header.Time) { var ( parentExcessBlobGas uint64 diff --git a/core/main_test.go b/core/main_test.go index 1d0e299f4a..6ea70c4b6c 100644 --- a/core/main_test.go +++ b/core/main_test.go @@ -15,7 +15,7 @@ func TestMain(m *testing.M) { opts := []goleak.Option{ // No good way to shut down these goroutines: goleak.IgnoreTopFunction("github.com/ava-labs/coreth/core/state/snapshot.(*diskLayer).generate"), - goleak.IgnoreTopFunction("github.com/ava-labs/coreth/metrics.(*meterArbiter).tick"), + goleak.IgnoreTopFunction("github.com/ava-labs/libevm/metrics.(*meterArbiter).tick"), goleak.IgnoreTopFunction("github.com/syndtr/goleveldb/leveldb.(*DB).mpoolDrain"), } goleak.VerifyTestMain(m, opts...) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 290a565b19..6fa4741a2e 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -32,9 +32,9 @@ import ( "encoding/binary" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/metrics" ) // The fields below define the low level database schema prefixing. diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index b025498e0b..917bbf368c 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -35,12 +35,12 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/libevm/common" ethsnapshot "github.com/ava-labs/libevm/core/state/snapshot" "github.com/ava-labs/libevm/ethdb" "github.com/ava-labs/libevm/libevm/stateconf" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/triedb" ) @@ -920,6 +920,8 @@ func (t *Tree) disklayer() *diskLayer { case *diskLayer: return layer case *diffLayer: + layer.lock.RLock() + defer layer.lock.RUnlock() return layer.origin default: panic(fmt.Sprintf("%T: undefined layer", snap)) @@ -951,7 +953,7 @@ func (t *Tree) generating() (bool, error) { return layer.genMarker != nil, nil } -// DiskRoot is a external helper function to return the disk layer root. +// DiskRoot is an external helper function to return the disk layer root. func (t *Tree) DiskRoot() common.Hash { t.lock.Lock() defer t.lock.Unlock() diff --git a/core/state_manager.go b/core/state_manager.go index 73dc6d3806..30b20be86a 100644 --- a/core/state_manager.go +++ b/core/state_manager.go @@ -41,12 +41,12 @@ func init() { } const ( - // tipBufferSize is the number of recent accepted tries to keep in the TrieDB + // TipBufferSize is the number of recent accepted tries to keep in the TrieDB // dirties cache at tip (only applicable in [pruning] mode). // // Keeping extra tries around at tip enables clients to query data from // recent trie roots. - tipBufferSize = 32 + TipBufferSize = 32 // flushWindow is the distance to the [commitInterval] when we start // optimistically flushing trie nodes to disk (only applicable in [pruning] @@ -79,7 +79,7 @@ func NewTrieWriter(db TrieDB, config *CacheConfig) TrieWriter { targetCommitSize: common.StorageSize(config.TrieDirtyCommitTarget) * 1024 * 1024, imageCap: 4 * 1024 * 1024, commitInterval: config.CommitInterval, - tipBuffer: NewBoundedBuffer(tipBufferSize, db.Dereference), + tipBuffer: NewBoundedBuffer(TipBufferSize, db.Dereference), } cm.flushStepSize = (cm.memoryCap - cm.targetCommitSize) / common.StorageSize(flushWindow) return cm diff --git a/core/state_manager_test.go b/core/state_manager_test.go index 32f74c2dda..ea91e0d113 100644 --- a/core/state_manager_test.go +++ b/core/state_manager_test.go @@ -53,10 +53,10 @@ func TestCappedMemoryTrieWriter(t *testing.T) { assert.Equal(common.Hash{}, m.LastCommit, "should not have committed block on insert") w.AcceptTrie(block) - if i <= tipBufferSize { + if i <= TipBufferSize { assert.Equal(common.Hash{}, m.LastDereference, "should not have dereferenced block on accept") } else { - assert.Equal(common.BigToHash(big.NewInt(int64(i-tipBufferSize))), m.LastDereference, "should have dereferenced old block on last accept") + assert.Equal(common.BigToHash(big.NewInt(int64(i-TipBufferSize))), m.LastDereference, "should have dereferenced old block on last accept") m.LastDereference = common.Hash{} } if i < int(cacheConfig.CommitInterval) { @@ -77,7 +77,7 @@ func TestNoPruningTrieWriter(t *testing.T) { m := &MockTrieDB{} w := NewTrieWriter(m, &CacheConfig{}) assert := assert.New(t) - for i := 0; i < tipBufferSize+1; i++ { + for i := 0; i < TipBufferSize+1; i++ { bigI := big.NewInt(int64(i)) block := types.NewBlock( &types.Header{ diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 64179f4290..f496935b38 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -372,9 +372,8 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr Time: parent.Time() + 10, UncleHash: types.EmptyUncleHash, } - if params.GetExtra(config).IsApricotPhase3(header.Time) { - header.Extra, header.BaseFee, _ = dummy.CalcBaseFee(config, parent.Header(), header.Time) - } + header.Extra, _ = dummy.CalcExtraPrefix(config, parent.Header(), header.Time) + header.BaseFee, _ = dummy.CalcBaseFee(config, parent.Header(), header.Time) if params.GetExtra(config).IsApricotPhase4(header.Time) { header.BlockGasCost = big.NewInt(0) header.ExtDataGasUsed = big.NewInt(0) diff --git a/core/test_blockchain.go b/core/test_blockchain.go index 254d19ff43..3960a10f5f 100644 --- a/core/test_blockchain.go +++ b/core/test_blockchain.go @@ -67,6 +67,10 @@ var tests = []ChainTest{ "EmptyBlocks", TestEmptyBlocks, }, + { + "ReorgReInsert", + TestReorgReInsert, + }, { "AcceptBlockIdenticalStateRoot", TestAcceptBlockIdenticalStateRoot, @@ -880,9 +884,6 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genes key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -892,7 +893,6 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genes Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.ToBlock() blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { @@ -902,7 +902,7 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genes signer := types.HomesteadSigner{} numBlocks := 3 - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 63b363ca1c..5c945b0a4a 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -45,11 +45,11 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/event" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/rlp" "github.com/holiman/billy" "github.com/holiman/uint256" @@ -410,7 +410,7 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.Addres for addr := range p.index { p.recheck(addr, nil) } - _, baseFee, err := dummy.EstimateNextBaseFee( + baseFee, err := dummy.EstimateNextBaseFee( p.chain.Config(), p.head, uint64(time.Now().Unix()), @@ -840,7 +840,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) { if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { p.limbo.finalize(p.chain.CurrentFinalBlock()) } - _, baseFeeBig, err := dummy.EstimateNextBaseFee( + baseFeeBig, err := dummy.EstimateNextBaseFee( p.chain.Config(), p.head, uint64(time.Now().Unix()), diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index a33dcbd56c..b1958a29b9 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -121,7 +121,7 @@ func (bc *testBlockChain) CurrentBlock() *types.Header { BaseFee: mid, Extra: make([]byte, params.DynamicFeeExtraDataSize), } - _, baseFee, err := dummy.CalcBaseFee( + baseFee, err := dummy.CalcBaseFee( bc.config, parent, blockTime, ) if err != nil { diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go index d2d2b7e23c..ccd4dbb574 100644 --- a/core/txpool/blobpool/metrics.go +++ b/core/txpool/blobpool/metrics.go @@ -26,7 +26,7 @@ package blobpool -import "github.com/ava-labs/coreth/metrics" +import "github.com/ava-labs/libevm/metrics" var ( // datacapGauge tracks the user's configured capacity for the blob pool. It diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index c8c4d70c72..e25ed902f2 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -41,13 +41,13 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/prque" "github.com/ava-labs/libevm/event" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" "github.com/holiman/uint256" ) @@ -227,9 +227,6 @@ type LegacyPool struct { signer types.Signer mu sync.RWMutex - // [currentStateLock] is required to allow concurrent access to address nonces - // and balances during reorgs and gossip handling. - currentStateLock sync.Mutex // closed when the transaction pool is stopped. Any goroutine can listen // to this to be notified if it should shut down. generalShutdownChan chan struct{} @@ -685,9 +682,6 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - opts := &txpool.ValidationOptionsWithState{ State: pool.currentState, Rules: pool.chainconfig.Rules( @@ -1501,9 +1495,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { return } pool.currentHead.Store(newHead) - pool.currentStateLock.Lock() pool.currentState = statedb - pool.currentStateLock.Unlock() pool.pendingNonces = newNoncer(statedb) // Inject any transactions discarded due to reorgs @@ -1516,9 +1508,6 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - // Track the promoted transactions to broadcast them at once var promoted []*types.Transaction @@ -1725,9 +1714,6 @@ func (pool *LegacyPool) truncateQueue() { // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function func (pool *LegacyPool) demoteUnexecutables() { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - // Iterate over all accounts and demote any non-executable transactions gasLimit := pool.currentHead.Load().GasLimit for addr, list := range pool.pending { @@ -1831,7 +1817,7 @@ func (pool *LegacyPool) updateBaseFee() { // assumes lock is already held func (pool *LegacyPool) updateBaseFeeAt(head *types.Header) error { - _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, head, uint64(time.Now().Unix())) + baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, head, uint64(time.Now().Unix())) if err != nil { return err } diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index fc4ecf8bf5..de7ed76955 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -35,10 +35,10 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/event" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" ) var ( diff --git a/eth/api_backend.go b/eth/api_backend.go index 4cd3444ef4..629aaa9f5b 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -60,6 +60,10 @@ type EthAPIBackend struct { allowUnfinalizedQueries bool eth *Ethereum gpo *gasprice.Oracle + + // historicalProofQueryWindow is the number of blocks before the last accepted block to be accepted for + // state queries when running archive mode. + historicalProofQueryWindow uint64 } // ChainConfig returns the active chain configuration. @@ -67,6 +71,17 @@ func (b *EthAPIBackend) ChainConfig() *params.ChainConfig { return b.eth.blockchain.Config() } +// IsArchive returns true if the node is running in archive mode, false otherwise. +func (b *EthAPIBackend) IsArchive() bool { + return !b.eth.config.Pruning +} + +// HistoricalProofQueryWindow returns the number of blocks before the last accepted block to be accepted for state queries. +// It returns 0 to indicate to accept any block number for state queries. +func (b *EthAPIBackend) HistoricalProofQueryWindow() uint64 { + return b.historicalProofQueryWindow +} + func (b *EthAPIBackend) IsAllowUnfinalizedQueries() bool { return b.allowUnfinalizedQueries } diff --git a/eth/backend.go b/eth/backend.go index 50c22cd232..59ec5bd056 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -263,11 +263,12 @@ func New( } eth.APIBackend = &EthAPIBackend{ - extRPCEnabled: stack.Config().ExtRPCEnabled(), - allowUnprotectedTxs: config.AllowUnprotectedTxs, - allowUnprotectedTxHashes: allowUnprotectedTxHashes, - allowUnfinalizedQueries: config.AllowUnfinalizedQueries, - eth: eth, + extRPCEnabled: stack.Config().ExtRPCEnabled(), + allowUnprotectedTxs: config.AllowUnprotectedTxs, + allowUnprotectedTxHashes: allowUnprotectedTxHashes, + allowUnfinalizedQueries: config.AllowUnfinalizedQueries, + historicalProofQueryWindow: config.HistoricalProofQueryWindow, + eth: eth, } if config.AllowUnprotectedTxs { log.Info("Unprotected transactions allowed") diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 6032bc0e0b..b358e9aef0 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -137,6 +137,11 @@ type Config struct { // AllowUnfinalizedQueries allow unfinalized queries AllowUnfinalizedQueries bool + // HistoricalProofQueryWindow is the number of blocks before the last accepted block to be accepted for state queries. + // For archive nodes, it defaults to 43200 and can be set to 0 to indicate to accept any block query. + // For non-archive nodes, it is forcibly set to the value of [core.TipBufferSize]. + HistoricalProofQueryWindow uint64 + // AllowUnprotectedTxs allow unprotected transactions to be locally issued. // Unprotected transactions are transactions that are signed without EIP-155 // replay protection. diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index cd955414a9..1a922797e9 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -45,6 +45,10 @@ var ( errBeyondHistoricalLimit = errors.New("request beyond historical limit") ) +const ( + maxQueryLimit = 100 +) + // txGasAndReward is sorted in ascending order based on reward type txGasAndReward struct { gasUsed uint64 @@ -173,6 +177,9 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL if blocks < 1 { return common.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks } + if len(rewardPercentiles) > maxQueryLimit { + return common.Big0, nil, nil, nil, fmt.Errorf("%w: over the query limit %d", errInvalidPercentile, maxQueryLimit) + } if blocks > oracle.maxCallBlockHistory { log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", oracle.maxCallBlockHistory) blocks = oracle.maxCallBlockHistory diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index ca8fff8bf1..206c116b0b 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -252,8 +252,7 @@ func (oracle *Oracle) estimateNextBaseFee(ctx context.Context) (*big.Int, error) // If the block does have a baseFee, calculate the next base fee // based on the current time and add it to the tip to estimate the // total gas price estimate. - _, nextBaseFee, err := dummy.EstimateNextBaseFee(oracle.backend.ChainConfig(), header, oracle.clock.Unix()) - return nextBaseFee, err + return dummy.EstimateNextBaseFee(oracle.backend.ChainConfig(), header, oracle.clock.Unix()) } // SuggestPrice returns an estimated price for legacy transactions. diff --git a/eth/tracers/api.go b/eth/tracers/api.go index b439f3171d..83481901cf 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1108,6 +1108,10 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) params.GetExtra(copy).EtnaTimestamp = timestamp canon = false } + if timestamp := overrideExtra.FUpgradeTimestamp; timestamp != nil { + params.GetExtra(copy).FUpgradeTimestamp = timestamp + canon = false + } if timestamp := override.CancunTime; timestamp != nil { copy.CancunTime = timestamp canon = false diff --git a/go.mod b/go.mod index e498b18a3e..ac07b891e9 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,10 @@ module github.com/ava-labs/coreth -go 1.23 - -toolchain go1.23.6 +go 1.23.6 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8 + github.com/ava-labs/avalanchego v1.12.3-0.20250219020546-daac8c8bbd8c github.com/ava-labs/libevm v1.13.14-0.2.0.rc.3 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -23,7 +21,6 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model v0.3.0 - github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cast v1.5.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 @@ -35,7 +32,6 @@ require ( golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20231127185646-65229373498e golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.22.0 google.golang.org/protobuf v1.34.2 @@ -94,7 +90,6 @@ require ( github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -104,6 +99,7 @@ require ( github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect @@ -126,6 +122,7 @@ require ( go.uber.org/zap v1.26.0 // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/net v0.33.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect diff --git a/go.sum b/go.sum index ee1a057217..77c1af95c0 100644 --- a/go.sum +++ b/go.sum @@ -56,8 +56,8 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8 h1:qN3MOBHB//Ynhgt5Vys3iVe42Sr0EWSeN18VL3ecXzE= -github.com/ava-labs/avalanchego v1.12.1-0.20250107220127-32f58b4fa9c8/go.mod h1:2B7+E5neLvkOr2zursGhebjU26d4AfB7RazPxBs8hHg= +github.com/ava-labs/avalanchego v1.12.3-0.20250219020546-daac8c8bbd8c h1:LAKh2eokSOtM5TRiMtGnl+vaTJa72dL/i/ZitSMR23w= +github.com/ava-labs/avalanchego v1.12.3-0.20250219020546-daac8c8bbd8c/go.mod h1:5e246cVO15EOZp7q3Dx2wRznTXMUgLrbWgnAd1RNgKU= github.com/ava-labs/libevm v1.13.14-0.2.0.rc.3 h1:1CWGo2icnX9dRqGQl7CFywYGIZWxe+ucy0w8NAsVTWE= github.com/ava-labs/libevm v1.13.14-0.2.0.rc.3/go.mod h1:+Iol+sVQ1KyoBsHf3veyrBmHCXr3xXRWq6ZXkgVfNLU= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= @@ -424,8 +424,6 @@ github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5Vgl github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/internal/ethapi/api.coreth.go b/internal/ethapi/api.coreth.go new file mode 100644 index 0000000000..13cc3f91e3 --- /dev/null +++ b/internal/ethapi/api.coreth.go @@ -0,0 +1,134 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ethapi + +import ( + "context" + "fmt" + "math/big" + + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/libevm/common/hexutil" + "github.com/ava-labs/libevm/common/math" +) + +const ( + nAVAX = 1_000_000_000 + + minBaseFee = params.EtnaMinBaseFee // 1 nAVAX + maxNormalBaseFee = 100 * nAVAX + + minGasTip = 1 // 1 wei + maxNormalGasTip = 20 * nAVAX + + slowFeeNumerator = 19 // 19/20 = 0.95 + fastFeeNumerator = 21 // 21/20 = 1.05 + feeDenominator = 20 +) + +var ( + bigMinBaseFee = big.NewInt(minBaseFee) + bigMaxNormalBaseFee = big.NewInt(maxNormalBaseFee) + + bigMinGasTip = big.NewInt(minGasTip) + bigMaxNormalGasTip = big.NewInt(maxNormalGasTip) + + bigSlowFeeNumerator = big.NewInt(slowFeeNumerator) + bigFastFeeNumerator = big.NewInt(fastFeeNumerator) + bigFeeDenominator = big.NewInt(feeDenominator) +) + +type Price struct { + GasTip *hexutil.Big `json:"maxPriorityFeePerGas"` + GasFee *hexutil.Big `json:"maxFeePerGas"` +} + +type PriceOptions struct { + Slow *Price `json:"slow"` + Normal *Price `json:"normal"` + Fast *Price `json:"fast"` +} + +// SuggestPriceOptions returns suggestions for what to display to a user for +// current transaction fees. +func (s *EthereumAPI) SuggestPriceOptions(ctx context.Context) (*PriceOptions, error) { + baseFee, err := s.b.EstimateBaseFee(ctx) + if err != nil { + return nil, fmt.Errorf("failed to estimate base fee: %w", err) + } + gasTip, err := s.b.SuggestGasTipCap(ctx) + if err != nil { + return nil, fmt.Errorf("failed to suggest gas tip cap: %w", err) + } + + // If the chain isn't running with dynamic fees, return nil. + if baseFee == nil || gasTip == nil { + return nil, nil + } + + baseFees := calculateFeeSpeeds( + bigMinBaseFee, + baseFee, + bigMaxNormalBaseFee, + ) + gasTips := calculateFeeSpeeds( + bigMinGasTip, + gasTip, + bigMaxNormalGasTip, + ) + slowGasFee := new(big.Int).Add(baseFees.slow, gasTips.slow) + normalGasFee := new(big.Int).Add(baseFees.normal, gasTips.normal) + fastGasFee := new(big.Int).Add(baseFees.fast, gasTips.fast) + return &PriceOptions{ + Slow: &Price{ + GasTip: (*hexutil.Big)(gasTips.slow), + GasFee: (*hexutil.Big)(slowGasFee), + }, + Normal: &Price{ + GasTip: (*hexutil.Big)(gasTips.normal), + GasFee: (*hexutil.Big)(normalGasFee), + }, + Fast: &Price{ + GasTip: (*hexutil.Big)(gasTips.fast), + GasFee: (*hexutil.Big)(fastGasFee), + }, + }, nil +} + +type feeSpeeds struct { + slow *big.Int + normal *big.Int + fast *big.Int +} + +// calculateFeeSpeeds returns the slow, normal, and fast price options for a +// given min, estimate, and max, +// +// slow = max(0.95 * min(estimate, maxFee), minFee) +// normal = min(estimate, maxFee) +// fast = 1.05 * estimate +func calculateFeeSpeeds( + minFee *big.Int, + estimate *big.Int, + maxFee *big.Int, +) feeSpeeds { + // Cap the fee to keep slow and normal options reasonable during fee spikes. + cappedFee := math.BigMin(estimate, maxFee) + + slowFee := new(big.Int).Set(cappedFee) + slowFee.Mul(slowFee, bigSlowFeeNumerator) + slowFee.Div(slowFee, bigFeeDenominator) + slowFee = math.BigMax(slowFee, minFee) + + normalFee := cappedFee + + fastFee := new(big.Int).Set(estimate) + fastFee.Mul(fastFee, bigFastFeeNumerator) + fastFee.Div(fastFee, bigFeeDenominator) + return feeSpeeds{ + slow: slowFee, + normal: normalFee, + fast: fastFee, + } +} diff --git a/internal/ethapi/api.coreth_test.go b/internal/ethapi/api.coreth_test.go new file mode 100644 index 0000000000..4888e0a364 --- /dev/null +++ b/internal/ethapi/api.coreth_test.go @@ -0,0 +1,130 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ethapi + +import ( + "context" + "math/big" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/common/hexutil" + "github.com/stretchr/testify/require" +) + +type testSuggestPriceOptionsBackend struct { + Backend // embed the interface to avoid implementing unused methods + + estimateBaseFee *big.Int + suggestGasTipCap *big.Int +} + +func (b *testSuggestPriceOptionsBackend) EstimateBaseFee(context.Context) (*big.Int, error) { + return b.estimateBaseFee, nil +} + +func (b *testSuggestPriceOptionsBackend) SuggestGasTipCap(context.Context) (*big.Int, error) { + return b.suggestGasTipCap, nil +} + +func TestSuggestPriceOptions(t *testing.T) { + tests := []struct { + name string + estimateBaseFee *big.Int + suggestGasTipCap *big.Int + want *PriceOptions + }{ + { + name: "nil_base_fee", + estimateBaseFee: nil, + suggestGasTipCap: common.Big1, + want: nil, + }, + { + name: "nil_tip_cap", + estimateBaseFee: common.Big1, + suggestGasTipCap: nil, + want: nil, + }, + { + name: "minimum_values", + estimateBaseFee: bigMinBaseFee, + suggestGasTipCap: bigMinGasTip, + want: &PriceOptions{ + Slow: newPrice( + minGasTip, + minBaseFee+minGasTip, + ), + Normal: newPrice( + minGasTip, + minBaseFee+minGasTip, + ), + Fast: newPrice( + minGasTip, + (fastFeeNumerator*minBaseFee)/feeDenominator+(fastFeeNumerator*minGasTip)/feeDenominator, + ), + }, + }, + { + name: "maximum_values", + estimateBaseFee: bigMaxNormalBaseFee, + suggestGasTipCap: bigMaxNormalGasTip, + want: &PriceOptions{ + Slow: newPrice( + (slowFeeNumerator*maxNormalGasTip)/feeDenominator, + (slowFeeNumerator*maxNormalBaseFee)/feeDenominator+(slowFeeNumerator*maxNormalGasTip)/feeDenominator, + ), + Normal: newPrice( + maxNormalGasTip, + maxNormalBaseFee+maxNormalGasTip, + ), + Fast: newPrice( + (fastFeeNumerator*maxNormalGasTip)/feeDenominator, + (fastFeeNumerator*maxNormalBaseFee)/feeDenominator+(fastFeeNumerator*maxNormalGasTip)/feeDenominator, + ), + }, + }, + { + name: "double_maximum_values", + estimateBaseFee: big.NewInt(2 * maxNormalBaseFee), + suggestGasTipCap: big.NewInt(2 * maxNormalGasTip), + want: &PriceOptions{ + Slow: newPrice( + (slowFeeNumerator*maxNormalGasTip)/feeDenominator, + (slowFeeNumerator*maxNormalBaseFee)/feeDenominator+(slowFeeNumerator*maxNormalGasTip)/feeDenominator, + ), + Normal: newPrice( + maxNormalGasTip, + maxNormalBaseFee+maxNormalGasTip, + ), + Fast: newPrice( + (fastFeeNumerator*2*maxNormalGasTip)/feeDenominator, + (fastFeeNumerator*2*maxNormalBaseFee)/feeDenominator+(fastFeeNumerator*2*maxNormalGasTip)/feeDenominator, + ), + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + backend := &testSuggestPriceOptionsBackend{ + estimateBaseFee: test.estimateBaseFee, + suggestGasTipCap: test.suggestGasTipCap, + } + api := NewEthereumAPI(backend) + + got, err := api.SuggestPriceOptions(context.Background()) + require.NoError(err) + require.Equal(test.want, got) + }) + } +} + +func newPrice(gasTip, gasFee int64) *Price { + return &Price{ + GasTip: (*hexutil.Big)(big.NewInt(gasTip)), + GasFee: (*hexutil.Big)(big.NewInt(gasFee)), + } +} diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index a39fd3fbef..f99d40be29 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -676,7 +676,14 @@ func (n *proofList) Delete(key []byte) error { } // GetProof returns the Merkle-proof for a given account and optionally some storage keys. +// If the requested block is part of historical blocks and the node does not accept +// getting proofs for historical blocks, an error is returned. func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) { + err := s.stateQueryBlockNumberAllowed(blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("historical proof query not allowed: %s", err) + } + var ( keys = make([]common.Hash, len(storageKeys)) keyLengths = make([]int, len(storageKeys)) @@ -2012,7 +2019,7 @@ func (api *DebugAPI) GetRawHeader(ctx context.Context, blockNrOrHash rpc.BlockNu hash = h } else { block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) - if err != nil { + if block == nil || err != nil { return nil, err } hash = block.Hash() @@ -2031,7 +2038,7 @@ func (api *DebugAPI) GetRawBlock(ctx context.Context, blockNrOrHash rpc.BlockNum hash = h } else { block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) - if err != nil { + if block == nil || err != nil { return nil, err } hash = block.Hash() @@ -2050,7 +2057,7 @@ func (api *DebugAPI) GetRawReceipts(ctx context.Context, blockNrOrHash rpc.Block hash = h } else { block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) - if err != nil { + if block == nil || err != nil { return nil, err } hash = block.Hash() diff --git a/internal/ethapi/api_extra.go b/internal/ethapi/api_extra.go index 109b1ac2f4..5a71fc0789 100644 --- a/internal/ethapi/api_extra.go +++ b/internal/ethapi/api_extra.go @@ -5,6 +5,7 @@ package ethapi import ( "context" + "errors" "fmt" "github.com/ava-labs/coreth/core" @@ -92,3 +93,46 @@ func (s *BlockChainAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, erro } return results, nil } + +// stateQueryBlockNumberAllowed returns a nil error if: +// - the node is configured to accept any state query (the query window is zero) +// - the block given has its number within the query window before the last accepted block. +// This query window is set to [core.TipBufferSize] when running in a non-archive mode. +// +// Otherwise, it returns a non-nil error containing block number information. +func (s *BlockChainAPI) stateQueryBlockNumberAllowed(blockNumOrHash rpc.BlockNumberOrHash) (err error) { + queryWindow := uint64(core.TipBufferSize) + if s.b.IsArchive() { + queryWindow = s.b.HistoricalProofQueryWindow() + if queryWindow == 0 { + return nil + } + } + + lastAcceptedNumber := s.b.LastAcceptedBlock().NumberU64() + + var number uint64 + if blockNumOrHash.BlockNumber != nil { + number = uint64(blockNumOrHash.BlockNumber.Int64()) + } else if blockHash, ok := blockNumOrHash.Hash(); ok { + block, err := s.b.BlockByHash(context.Background(), blockHash) + if err != nil { + return fmt.Errorf("failed to get block from hash: %s", err) + } else if block == nil { + return fmt.Errorf("block from hash %s doesn't exist", blockHash) + } + number = block.NumberU64() + } else { + return errors.New("block number or hash not provided") + } + + var oldestAllowed uint64 + if lastAcceptedNumber > queryWindow { + oldestAllowed = lastAcceptedNumber - queryWindow + } + if number >= oldestAllowed { + return nil + } + return fmt.Errorf("block number %d is before the oldest allowed block number %d (window of %d blocks)", + number, oldestAllowed, queryWindow) +} diff --git a/internal/ethapi/api_extra_test.go b/internal/ethapi/api_extra_test.go new file mode 100644 index 0000000000..45ac7085dd --- /dev/null +++ b/internal/ethapi/api_extra_test.go @@ -0,0 +1,132 @@ +// (c) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ethapi + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/rpc" + "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" +) + +func TestBlockChainAPI_stateQueryBlockNumberAllowed(t *testing.T) { + t.Parallel() + + const queryWindow uint64 = 1024 + + makeBlockWithNumber := func(number uint64) *types.Block { + header := &types.Header{ + Number: big.NewInt(int64(number)), + } + return types.NewBlock(header, nil, nil, nil, nil) + } + + testCases := map[string]struct { + blockNumOrHash rpc.BlockNumberOrHash + makeBackend func(ctrl *gomock.Controller) *MockBackend + wantErrMessage string + }{ + "zero_query_window": { + blockNumOrHash: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(1000)), + makeBackend: func(ctrl *gomock.Controller) *MockBackend { + backend := NewMockBackend(ctrl) + backend.EXPECT().IsArchive().Return(true) + backend.EXPECT().HistoricalProofQueryWindow().Return(uint64(0)) + return backend + }, + }, + "block_number_allowed_below_window": { + blockNumOrHash: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(1000)), + makeBackend: func(ctrl *gomock.Controller) *MockBackend { + backend := NewMockBackend(ctrl) + backend.EXPECT().IsArchive().Return(true) + backend.EXPECT().HistoricalProofQueryWindow().Return(queryWindow) + backend.EXPECT().LastAcceptedBlock().Return(makeBlockWithNumber(1020)) + return backend + }, + }, + "block_number_allowed": { + blockNumOrHash: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(2000)), + makeBackend: func(ctrl *gomock.Controller) *MockBackend { + backend := NewMockBackend(ctrl) + backend.EXPECT().IsArchive().Return(true) + backend.EXPECT().HistoricalProofQueryWindow().Return(queryWindow) + backend.EXPECT().LastAcceptedBlock().Return(makeBlockWithNumber(2200)) + return backend + }, + }, + "block_number_allowed_by_hash": { + blockNumOrHash: rpc.BlockNumberOrHashWithHash(common.Hash{99}, false), + makeBackend: func(ctrl *gomock.Controller) *MockBackend { + backend := NewMockBackend(ctrl) + backend.EXPECT().IsArchive().Return(true) + backend.EXPECT().HistoricalProofQueryWindow().Return(queryWindow) + backend.EXPECT().LastAcceptedBlock().Return(makeBlockWithNumber(2200)) + backend.EXPECT(). + BlockByHash(gomock.Any(), gomock.Any()). + Return(makeBlockWithNumber(2000), nil) + return backend + }, + }, + "block_number_allowed_by_hash_error": { + blockNumOrHash: rpc.BlockNumberOrHashWithHash(common.Hash{99}, false), + makeBackend: func(ctrl *gomock.Controller) *MockBackend { + backend := NewMockBackend(ctrl) + backend.EXPECT().IsArchive().Return(true) + backend.EXPECT().HistoricalProofQueryWindow().Return(queryWindow) + backend.EXPECT().LastAcceptedBlock().Return(makeBlockWithNumber(2200)) + backend.EXPECT(). + BlockByHash(gomock.Any(), gomock.Any()). + Return(nil, fmt.Errorf("test error")) + return backend + }, + wantErrMessage: "failed to get block from hash: test error", + }, + "block_number_out_of_window": { + blockNumOrHash: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(1000)), + makeBackend: func(ctrl *gomock.Controller) *MockBackend { + backend := NewMockBackend(ctrl) + backend.EXPECT().IsArchive().Return(true) + backend.EXPECT().HistoricalProofQueryWindow().Return(queryWindow) + backend.EXPECT().LastAcceptedBlock().Return(makeBlockWithNumber(2200)) + return backend + }, + wantErrMessage: "block number 1000 is before the oldest allowed block number 1176 (window of 1024 blocks)", + }, + "block_number_out_of_window_non_archive": { + blockNumOrHash: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(1000)), + makeBackend: func(ctrl *gomock.Controller) *MockBackend { + backend := NewMockBackend(ctrl) + backend.EXPECT().IsArchive().Return(false) + // query window is 32 as set to core.TipBufferSize + backend.EXPECT().LastAcceptedBlock().Return(makeBlockWithNumber(1033)) + return backend + }, + wantErrMessage: "block number 1000 is before the oldest allowed block number 1001 (window of 32 blocks)", + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + api := &BlockChainAPI{ + b: testCase.makeBackend(ctrl), + } + + err := api.stateQueryBlockNumberAllowed(testCase.blockNumOrHash) + if testCase.wantErrMessage == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, testCase.wantErrMessage) + } + }) + } +} diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index c3a6c7c880..7c93b69d39 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -625,6 +625,12 @@ func (b testBackend) LastAcceptedBlock() *types.Block { panic("implement me") } func (b testBackend) SuggestPrice(ctx context.Context) (*big.Int, error) { panic("implement me") } +func (b testBackend) IsArchive() bool { + panic("implement me") +} +func (b testBackend) HistoricalProofQueryWindow() (queryWindow uint64) { + panic("implement me") +} func TestEstimateGas(t *testing.T) { t.Parallel() diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 5758b3b9d1..e64aeb75d9 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -80,6 +80,8 @@ type Backend interface { SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription BadBlocks() ([]*types.Block, []*core.BadBlockReason) + IsArchive() bool + HistoricalProofQueryWindow() uint64 // Transaction pool API SendTx(ctx context.Context, signedTx *types.Transaction) error diff --git a/internal/ethapi/mocks_generate_test.go b/internal/ethapi/mocks_generate_test.go new file mode 100644 index 0000000000..7784142351 --- /dev/null +++ b/internal/ethapi/mocks_generate_test.go @@ -0,0 +1,3 @@ +package ethapi + +//go:generate go run go.uber.org/mock/mockgen -package=$GOPACKAGE -destination=mocks_test.go . Backend diff --git a/internal/ethapi/mocks_test.go b/internal/ethapi/mocks_test.go new file mode 100644 index 0000000000..3d46795f73 --- /dev/null +++ b/internal/ethapi/mocks_test.go @@ -0,0 +1,757 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/coreth/internal/ethapi (interfaces: Backend) +// +// Generated by this command: +// +// mockgen -package=ethapi -destination=mocks_test.go . Backend +// + +// Package ethapi is a generated GoMock package. +package ethapi + +import ( + context "context" + big "math/big" + reflect "reflect" + time "time" + + consensus "github.com/ava-labs/coreth/consensus" + core "github.com/ava-labs/coreth/core" + bloombits "github.com/ava-labs/coreth/core/bloombits" + state "github.com/ava-labs/coreth/core/state" + types "github.com/ava-labs/coreth/core/types" + params "github.com/ava-labs/coreth/params" + rpc "github.com/ava-labs/coreth/rpc" + accounts "github.com/ava-labs/libevm/accounts" + common "github.com/ava-labs/libevm/common" + vm "github.com/ava-labs/libevm/core/vm" + ethdb "github.com/ava-labs/libevm/ethdb" + event "github.com/ava-labs/libevm/event" + gomock "go.uber.org/mock/gomock" +) + +// MockBackend is a mock of Backend interface. +type MockBackend struct { + ctrl *gomock.Controller + recorder *MockBackendMockRecorder + isgomock struct{} +} + +// MockBackendMockRecorder is the mock recorder for MockBackend. +type MockBackendMockRecorder struct { + mock *MockBackend +} + +// NewMockBackend creates a new mock instance. +func NewMockBackend(ctrl *gomock.Controller) *MockBackend { + mock := &MockBackend{ctrl: ctrl} + mock.recorder = &MockBackendMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBackend) EXPECT() *MockBackendMockRecorder { + return m.recorder +} + +// AccountManager mocks base method. +func (m *MockBackend) AccountManager() *accounts.Manager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AccountManager") + ret0, _ := ret[0].(*accounts.Manager) + return ret0 +} + +// AccountManager indicates an expected call of AccountManager. +func (mr *MockBackendMockRecorder) AccountManager() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AccountManager", reflect.TypeOf((*MockBackend)(nil).AccountManager)) +} + +// BadBlocks mocks base method. +func (m *MockBackend) BadBlocks() ([]*types.Block, []*core.BadBlockReason) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BadBlocks") + ret0, _ := ret[0].([]*types.Block) + ret1, _ := ret[1].([]*core.BadBlockReason) + return ret0, ret1 +} + +// BadBlocks indicates an expected call of BadBlocks. +func (mr *MockBackendMockRecorder) BadBlocks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BadBlocks", reflect.TypeOf((*MockBackend)(nil).BadBlocks)) +} + +// BlockByHash mocks base method. +func (m *MockBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByHash", ctx, hash) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByHash indicates an expected call of BlockByHash. +func (mr *MockBackendMockRecorder) BlockByHash(ctx, hash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByHash", reflect.TypeOf((*MockBackend)(nil).BlockByHash), ctx, hash) +} + +// BlockByNumber mocks base method. +func (m *MockBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByNumber", ctx, number) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByNumber indicates an expected call of BlockByNumber. +func (mr *MockBackendMockRecorder) BlockByNumber(ctx, number any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByNumber", reflect.TypeOf((*MockBackend)(nil).BlockByNumber), ctx, number) +} + +// BlockByNumberOrHash mocks base method. +func (m *MockBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByNumberOrHash", ctx, blockNrOrHash) + ret0, _ := ret[0].(*types.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByNumberOrHash indicates an expected call of BlockByNumberOrHash. +func (mr *MockBackendMockRecorder) BlockByNumberOrHash(ctx, blockNrOrHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByNumberOrHash", reflect.TypeOf((*MockBackend)(nil).BlockByNumberOrHash), ctx, blockNrOrHash) +} + +// BloomStatus mocks base method. +func (m *MockBackend) BloomStatus() (uint64, uint64) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BloomStatus") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(uint64) + return ret0, ret1 +} + +// BloomStatus indicates an expected call of BloomStatus. +func (mr *MockBackendMockRecorder) BloomStatus() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BloomStatus", reflect.TypeOf((*MockBackend)(nil).BloomStatus)) +} + +// ChainConfig mocks base method. +func (m *MockBackend) ChainConfig() *params.ChainConfig { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainConfig") + ret0, _ := ret[0].(*params.ChainConfig) + return ret0 +} + +// ChainConfig indicates an expected call of ChainConfig. +func (mr *MockBackendMockRecorder) ChainConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainConfig", reflect.TypeOf((*MockBackend)(nil).ChainConfig)) +} + +// ChainDb mocks base method. +func (m *MockBackend) ChainDb() ethdb.Database { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainDb") + ret0, _ := ret[0].(ethdb.Database) + return ret0 +} + +// ChainDb indicates an expected call of ChainDb. +func (mr *MockBackendMockRecorder) ChainDb() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDb", reflect.TypeOf((*MockBackend)(nil).ChainDb)) +} + +// CurrentBlock mocks base method. +func (m *MockBackend) CurrentBlock() *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentBlock") + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// CurrentBlock indicates an expected call of CurrentBlock. +func (mr *MockBackendMockRecorder) CurrentBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentBlock", reflect.TypeOf((*MockBackend)(nil).CurrentBlock)) +} + +// CurrentHeader mocks base method. +func (m *MockBackend) CurrentHeader() *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentHeader") + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// CurrentHeader indicates an expected call of CurrentHeader. +func (mr *MockBackendMockRecorder) CurrentHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentHeader", reflect.TypeOf((*MockBackend)(nil).CurrentHeader)) +} + +// Engine mocks base method. +func (m *MockBackend) Engine() consensus.Engine { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Engine") + ret0, _ := ret[0].(consensus.Engine) + return ret0 +} + +// Engine indicates an expected call of Engine. +func (mr *MockBackendMockRecorder) Engine() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Engine", reflect.TypeOf((*MockBackend)(nil).Engine)) +} + +// EstimateBaseFee mocks base method. +func (m *MockBackend) EstimateBaseFee(ctx context.Context) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EstimateBaseFee", ctx) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EstimateBaseFee indicates an expected call of EstimateBaseFee. +func (mr *MockBackendMockRecorder) EstimateBaseFee(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EstimateBaseFee", reflect.TypeOf((*MockBackend)(nil).EstimateBaseFee), ctx) +} + +// ExtRPCEnabled mocks base method. +func (m *MockBackend) ExtRPCEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExtRPCEnabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// ExtRPCEnabled indicates an expected call of ExtRPCEnabled. +func (mr *MockBackendMockRecorder) ExtRPCEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExtRPCEnabled", reflect.TypeOf((*MockBackend)(nil).ExtRPCEnabled)) +} + +// FeeHistory mocks base method. +func (m *MockBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FeeHistory", ctx, blockCount, lastBlock, rewardPercentiles) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].([][]*big.Int) + ret2, _ := ret[2].([]*big.Int) + ret3, _ := ret[3].([]float64) + ret4, _ := ret[4].(error) + return ret0, ret1, ret2, ret3, ret4 +} + +// FeeHistory indicates an expected call of FeeHistory. +func (mr *MockBackendMockRecorder) FeeHistory(ctx, blockCount, lastBlock, rewardPercentiles any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeeHistory", reflect.TypeOf((*MockBackend)(nil).FeeHistory), ctx, blockCount, lastBlock, rewardPercentiles) +} + +// GetBody mocks base method. +func (m *MockBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBody", ctx, hash, number) + ret0, _ := ret[0].(*types.Body) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBody indicates an expected call of GetBody. +func (mr *MockBackendMockRecorder) GetBody(ctx, hash, number any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBody", reflect.TypeOf((*MockBackend)(nil).GetBody), ctx, hash, number) +} + +// GetEVM mocks base method. +func (m *MockBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEVM", ctx, msg, state, header, vmConfig, blockCtx) + ret0, _ := ret[0].(*vm.EVM) + return ret0 +} + +// GetEVM indicates an expected call of GetEVM. +func (mr *MockBackendMockRecorder) GetEVM(ctx, msg, state, header, vmConfig, blockCtx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEVM", reflect.TypeOf((*MockBackend)(nil).GetEVM), ctx, msg, state, header, vmConfig, blockCtx) +} + +// GetLogs mocks base method. +func (m *MockBackend) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLogs", ctx, blockHash, number) + ret0, _ := ret[0].([][]*types.Log) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLogs indicates an expected call of GetLogs. +func (mr *MockBackendMockRecorder) GetLogs(ctx, blockHash, number any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockBackend)(nil).GetLogs), ctx, blockHash, number) +} + +// GetPoolNonce mocks base method. +func (m *MockBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPoolNonce", ctx, addr) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPoolNonce indicates an expected call of GetPoolNonce. +func (mr *MockBackendMockRecorder) GetPoolNonce(ctx, addr any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPoolNonce", reflect.TypeOf((*MockBackend)(nil).GetPoolNonce), ctx, addr) +} + +// GetPoolTransaction mocks base method. +func (m *MockBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPoolTransaction", txHash) + ret0, _ := ret[0].(*types.Transaction) + return ret0 +} + +// GetPoolTransaction indicates an expected call of GetPoolTransaction. +func (mr *MockBackendMockRecorder) GetPoolTransaction(txHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPoolTransaction", reflect.TypeOf((*MockBackend)(nil).GetPoolTransaction), txHash) +} + +// GetPoolTransactions mocks base method. +func (m *MockBackend) GetPoolTransactions() (types.Transactions, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPoolTransactions") + ret0, _ := ret[0].(types.Transactions) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPoolTransactions indicates an expected call of GetPoolTransactions. +func (mr *MockBackendMockRecorder) GetPoolTransactions() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPoolTransactions", reflect.TypeOf((*MockBackend)(nil).GetPoolTransactions)) +} + +// GetReceipts mocks base method. +func (m *MockBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetReceipts", ctx, hash) + ret0, _ := ret[0].(types.Receipts) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetReceipts indicates an expected call of GetReceipts. +func (mr *MockBackendMockRecorder) GetReceipts(ctx, hash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReceipts", reflect.TypeOf((*MockBackend)(nil).GetReceipts), ctx, hash) +} + +// GetTransaction mocks base method. +func (m *MockBackend) GetTransaction(ctx context.Context, txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransaction", ctx, txHash) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(*types.Transaction) + ret2, _ := ret[2].(common.Hash) + ret3, _ := ret[3].(uint64) + ret4, _ := ret[4].(uint64) + ret5, _ := ret[5].(error) + return ret0, ret1, ret2, ret3, ret4, ret5 +} + +// GetTransaction indicates an expected call of GetTransaction. +func (mr *MockBackendMockRecorder) GetTransaction(ctx, txHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransaction", reflect.TypeOf((*MockBackend)(nil).GetTransaction), ctx, txHash) +} + +// HeaderByHash mocks base method. +func (m *MockBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeaderByHash", ctx, hash) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeaderByHash indicates an expected call of HeaderByHash. +func (mr *MockBackendMockRecorder) HeaderByHash(ctx, hash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByHash", reflect.TypeOf((*MockBackend)(nil).HeaderByHash), ctx, hash) +} + +// HeaderByNumber mocks base method. +func (m *MockBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeaderByNumber", ctx, number) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeaderByNumber indicates an expected call of HeaderByNumber. +func (mr *MockBackendMockRecorder) HeaderByNumber(ctx, number any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByNumber", reflect.TypeOf((*MockBackend)(nil).HeaderByNumber), ctx, number) +} + +// HeaderByNumberOrHash mocks base method. +func (m *MockBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeaderByNumberOrHash", ctx, blockNrOrHash) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeaderByNumberOrHash indicates an expected call of HeaderByNumberOrHash. +func (mr *MockBackendMockRecorder) HeaderByNumberOrHash(ctx, blockNrOrHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByNumberOrHash", reflect.TypeOf((*MockBackend)(nil).HeaderByNumberOrHash), ctx, blockNrOrHash) +} + +// HistoricalProofQueryWindow mocks base method. +func (m *MockBackend) HistoricalProofQueryWindow() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HistoricalProofQueryWindow") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// HistoricalProofQueryWindow indicates an expected call of HistoricalProofQueryWindow. +func (mr *MockBackendMockRecorder) HistoricalProofQueryWindow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistoricalProofQueryWindow", reflect.TypeOf((*MockBackend)(nil).HistoricalProofQueryWindow)) +} + +// IsArchive mocks base method. +func (m *MockBackend) IsArchive() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsArchive") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsArchive indicates an expected call of IsArchive. +func (mr *MockBackendMockRecorder) IsArchive() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsArchive", reflect.TypeOf((*MockBackend)(nil).IsArchive)) +} + +// LastAcceptedBlock mocks base method. +func (m *MockBackend) LastAcceptedBlock() *types.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastAcceptedBlock") + ret0, _ := ret[0].(*types.Block) + return ret0 +} + +// LastAcceptedBlock indicates an expected call of LastAcceptedBlock. +func (mr *MockBackendMockRecorder) LastAcceptedBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAcceptedBlock", reflect.TypeOf((*MockBackend)(nil).LastAcceptedBlock)) +} + +// RPCEVMTimeout mocks base method. +func (m *MockBackend) RPCEVMTimeout() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RPCEVMTimeout") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// RPCEVMTimeout indicates an expected call of RPCEVMTimeout. +func (mr *MockBackendMockRecorder) RPCEVMTimeout() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RPCEVMTimeout", reflect.TypeOf((*MockBackend)(nil).RPCEVMTimeout)) +} + +// RPCGasCap mocks base method. +func (m *MockBackend) RPCGasCap() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RPCGasCap") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// RPCGasCap indicates an expected call of RPCGasCap. +func (mr *MockBackendMockRecorder) RPCGasCap() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RPCGasCap", reflect.TypeOf((*MockBackend)(nil).RPCGasCap)) +} + +// RPCTxFeeCap mocks base method. +func (m *MockBackend) RPCTxFeeCap() float64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RPCTxFeeCap") + ret0, _ := ret[0].(float64) + return ret0 +} + +// RPCTxFeeCap indicates an expected call of RPCTxFeeCap. +func (mr *MockBackendMockRecorder) RPCTxFeeCap() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RPCTxFeeCap", reflect.TypeOf((*MockBackend)(nil).RPCTxFeeCap)) +} + +// SendTx mocks base method. +func (m *MockBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendTx", ctx, signedTx) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendTx indicates an expected call of SendTx. +func (mr *MockBackendMockRecorder) SendTx(ctx, signedTx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendTx", reflect.TypeOf((*MockBackend)(nil).SendTx), ctx, signedTx) +} + +// ServiceFilter mocks base method. +func (m *MockBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ServiceFilter", ctx, session) +} + +// ServiceFilter indicates an expected call of ServiceFilter. +func (mr *MockBackendMockRecorder) ServiceFilter(ctx, session any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceFilter", reflect.TypeOf((*MockBackend)(nil).ServiceFilter), ctx, session) +} + +// StateAndHeaderByNumber mocks base method. +func (m *MockBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAndHeaderByNumber", ctx, number) + ret0, _ := ret[0].(*state.StateDB) + ret1, _ := ret[1].(*types.Header) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// StateAndHeaderByNumber indicates an expected call of StateAndHeaderByNumber. +func (mr *MockBackendMockRecorder) StateAndHeaderByNumber(ctx, number any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAndHeaderByNumber", reflect.TypeOf((*MockBackend)(nil).StateAndHeaderByNumber), ctx, number) +} + +// StateAndHeaderByNumberOrHash mocks base method. +func (m *MockBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateAndHeaderByNumberOrHash", ctx, blockNrOrHash) + ret0, _ := ret[0].(*state.StateDB) + ret1, _ := ret[1].(*types.Header) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// StateAndHeaderByNumberOrHash indicates an expected call of StateAndHeaderByNumberOrHash. +func (mr *MockBackendMockRecorder) StateAndHeaderByNumberOrHash(ctx, blockNrOrHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAndHeaderByNumberOrHash", reflect.TypeOf((*MockBackend)(nil).StateAndHeaderByNumberOrHash), ctx, blockNrOrHash) +} + +// Stats mocks base method. +func (m *MockBackend) Stats() (int, int) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stats") + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + return ret0, ret1 +} + +// Stats indicates an expected call of Stats. +func (mr *MockBackendMockRecorder) Stats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockBackend)(nil).Stats)) +} + +// SubscribeChainEvent mocks base method. +func (m *MockBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeChainEvent", ch) + ret0, _ := ret[0].(event.Subscription) + return ret0 +} + +// SubscribeChainEvent indicates an expected call of SubscribeChainEvent. +func (mr *MockBackendMockRecorder) SubscribeChainEvent(ch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeChainEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeChainEvent), ch) +} + +// SubscribeChainHeadEvent mocks base method. +func (m *MockBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeChainHeadEvent", ch) + ret0, _ := ret[0].(event.Subscription) + return ret0 +} + +// SubscribeChainHeadEvent indicates an expected call of SubscribeChainHeadEvent. +func (mr *MockBackendMockRecorder) SubscribeChainHeadEvent(ch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeChainHeadEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeChainHeadEvent), ch) +} + +// SubscribeChainSideEvent mocks base method. +func (m *MockBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeChainSideEvent", ch) + ret0, _ := ret[0].(event.Subscription) + return ret0 +} + +// SubscribeChainSideEvent indicates an expected call of SubscribeChainSideEvent. +func (mr *MockBackendMockRecorder) SubscribeChainSideEvent(ch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeChainSideEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeChainSideEvent), ch) +} + +// SubscribeLogsEvent mocks base method. +func (m *MockBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeLogsEvent", ch) + ret0, _ := ret[0].(event.Subscription) + return ret0 +} + +// SubscribeLogsEvent indicates an expected call of SubscribeLogsEvent. +func (mr *MockBackendMockRecorder) SubscribeLogsEvent(ch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeLogsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeLogsEvent), ch) +} + +// SubscribeNewTxsEvent mocks base method. +func (m *MockBackend) SubscribeNewTxsEvent(arg0 chan<- core.NewTxsEvent) event.Subscription { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeNewTxsEvent", arg0) + ret0, _ := ret[0].(event.Subscription) + return ret0 +} + +// SubscribeNewTxsEvent indicates an expected call of SubscribeNewTxsEvent. +func (mr *MockBackendMockRecorder) SubscribeNewTxsEvent(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeNewTxsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeNewTxsEvent), arg0) +} + +// SubscribePendingLogsEvent mocks base method. +func (m *MockBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribePendingLogsEvent", ch) + ret0, _ := ret[0].(event.Subscription) + return ret0 +} + +// SubscribePendingLogsEvent indicates an expected call of SubscribePendingLogsEvent. +func (mr *MockBackendMockRecorder) SubscribePendingLogsEvent(ch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribePendingLogsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribePendingLogsEvent), ch) +} + +// SubscribeRemovedLogsEvent mocks base method. +func (m *MockBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeRemovedLogsEvent", ch) + ret0, _ := ret[0].(event.Subscription) + return ret0 +} + +// SubscribeRemovedLogsEvent indicates an expected call of SubscribeRemovedLogsEvent. +func (mr *MockBackendMockRecorder) SubscribeRemovedLogsEvent(ch any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeRemovedLogsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeRemovedLogsEvent), ch) +} + +// SuggestGasTipCap mocks base method. +func (m *MockBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SuggestGasTipCap", ctx) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SuggestGasTipCap indicates an expected call of SuggestGasTipCap. +func (mr *MockBackendMockRecorder) SuggestGasTipCap(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SuggestGasTipCap", reflect.TypeOf((*MockBackend)(nil).SuggestGasTipCap), ctx) +} + +// SuggestPrice mocks base method. +func (m *MockBackend) SuggestPrice(ctx context.Context) (*big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SuggestPrice", ctx) + ret0, _ := ret[0].(*big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SuggestPrice indicates an expected call of SuggestPrice. +func (mr *MockBackendMockRecorder) SuggestPrice(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SuggestPrice", reflect.TypeOf((*MockBackend)(nil).SuggestPrice), ctx) +} + +// TxPoolContent mocks base method. +func (m *MockBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TxPoolContent") + ret0, _ := ret[0].(map[common.Address][]*types.Transaction) + ret1, _ := ret[1].(map[common.Address][]*types.Transaction) + return ret0, ret1 +} + +// TxPoolContent indicates an expected call of TxPoolContent. +func (mr *MockBackendMockRecorder) TxPoolContent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxPoolContent", reflect.TypeOf((*MockBackend)(nil).TxPoolContent)) +} + +// TxPoolContentFrom mocks base method. +func (m *MockBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TxPoolContentFrom", addr) + ret0, _ := ret[0].([]*types.Transaction) + ret1, _ := ret[1].([]*types.Transaction) + return ret0, ret1 +} + +// TxPoolContentFrom indicates an expected call of TxPoolContentFrom. +func (mr *MockBackendMockRecorder) TxPoolContentFrom(addr any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxPoolContentFrom", reflect.TypeOf((*MockBackend)(nil).TxPoolContentFrom), addr) +} + +// UnprotectedAllowed mocks base method. +func (m *MockBackend) UnprotectedAllowed(tx *types.Transaction) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnprotectedAllowed", tx) + ret0, _ := ret[0].(bool) + return ret0 +} + +// UnprotectedAllowed indicates an expected call of UnprotectedAllowed. +func (mr *MockBackendMockRecorder) UnprotectedAllowed(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnprotectedAllowed", reflect.TypeOf((*MockBackend)(nil).UnprotectedAllowed), tx) +} diff --git a/libevm/options/options.go b/libevm/options/options.go new file mode 100644 index 0000000000..af7bc751a9 --- /dev/null +++ b/libevm/options/options.go @@ -0,0 +1,42 @@ +// Copyright 2024 the libevm authors. +// +// The libevm additions to go-ethereum are free software: you can redistribute +// them and/or modify them under the terms of the GNU Lesser General Public License +// as published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The libevm additions are distributed in the hope that they will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see +// . + +// Package options provides a generic mechanism for defining configuration of +// arbitrary types. +package options + +// An Option configures values of arbitrary type. +type Option[T any] interface { + Configure(*T) +} + +// As applies Options to a zero-value T, which it then returns. +func As[T any](opts ...Option[T]) *T { + var t T + for _, o := range opts { + o.Configure(&t) + } + return &t +} + +// A Func converts a function into an [Option], using itself as the Configure +// method. +type Func[T any] func(*T) + +var _ Option[struct{}] = Func[struct{}](nil) + +// Configure implements the [Option] interface. +func (f Func[T]) Configure(t *T) { f(t) } diff --git a/libevm/sync/sync.go b/libevm/sync/sync.go new file mode 100644 index 0000000000..e280f4bbe9 --- /dev/null +++ b/libevm/sync/sync.go @@ -0,0 +1,52 @@ +// Copyright 2024 the coreth authors. +// +// The libevm additions to go-ethereum are free software: you can redistribute +// them and/or modify them under the terms of the GNU Lesser General Public License +// as published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The libevm additions are distributed in the hope that they will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see +// . + +// Package sync extends the standard library's sync package. +package sync + +import "sync" + +// Aliases of stdlib sync's types to avoid having to import it alongside this +// package. +type ( + Cond = sync.Cond + Locker = sync.Locker + Map = sync.Map + Mutex = sync.Mutex + Once = sync.Once + RWMutex = sync.RWMutex + WaitGroup = sync.WaitGroup +) + +// A Pool is a type-safe wrapper around [sync.Pool]. +type Pool[T any] struct { + New func() T + pool sync.Pool + once Once +} + +// Get is equivalent to [sync.Pool.Get]. +func (p *Pool[T]) Get() T { + p.once.Do(func() { // Do() guarantees at least once, not just only once + p.pool.New = func() any { return p.New() } + }) + return p.pool.Get().(T) //nolint:forcetypeassert +} + +// Put is equivalent to [sync.Pool.Put]. +func (p *Pool[T]) Put(t T) { + p.pool.Put(t) +} diff --git a/metrics/FORK.md b/metrics/FORK.md deleted file mode 100644 index b19985bf56..0000000000 --- a/metrics/FORK.md +++ /dev/null @@ -1 +0,0 @@ -This repo has been forked from https://github.com/rcrowley/go-metrics at commit e181e09 diff --git a/metrics/LICENSE b/metrics/LICENSE deleted file mode 100644 index 363fa9ee77..0000000000 --- a/metrics/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright 2012 Richard Crowley. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation -are those of the authors and should not be interpreted as representing -official policies, either expressed or implied, of Richard Crowley. diff --git a/metrics/README.md b/metrics/README.md deleted file mode 100644 index cf153c8093..0000000000 --- a/metrics/README.md +++ /dev/null @@ -1,102 +0,0 @@ -go-metrics -========== - -![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) - -Go port of Coda Hale's Metrics library: . - -Documentation: . - -Usage ------ - -Create and update metrics: - -```go -c := metrics.NewCounter() -metrics.Register("foo", c) -c.Inc(47) - -g := metrics.NewGauge() -metrics.Register("bar", g) -g.Update(47) - -r := NewRegistry() -g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) - -s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) -h := metrics.NewHistogram(s) -metrics.Register("baz", h) -h.Update(47) - -m := metrics.NewMeter() -metrics.Register("quux", m) -m.Mark(47) - -t := metrics.NewTimer() -metrics.Register("bang", t) -t.Time(func() {}) -t.Update(47) -``` - -Register() is not threadsafe. For threadsafe metric registration use -GetOrRegister: - -```go -t := metrics.GetOrRegisterTimer("account.create.latency", nil) -t.Time(func() {}) -t.Update(47) -``` - -**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will -leak memory: - -```go -// Will call Stop() on the Meter to allow for garbage collection -metrics.Unregister("quux") -// Or similarly for a Timer that embeds a Meter -metrics.Unregister("bang") -``` - -Periodically log every metric in human-readable form to standard error: - -```go -go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) -``` - -Periodically log every metric in slightly-more-parseable form to syslog: - -```go -w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") -go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) -``` - -Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): - -```go - -import "github.com/cyberdelia/go-metrics-graphite" - -addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") -go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) -``` - -Installation ------------- - -```sh -go get github.com/rcrowley/go-metrics -``` - -StatHat support additionally requires their Go client: - -```sh -go get github.com/stathat/go -``` - -Publishing Metrics ------------------- - -Clients are available for the following destinations: - -* Prometheus - https://github.com/deathowl/go-metrics-prometheus diff --git a/metrics/config.go b/metrics/config.go deleted file mode 100644 index a60d96e962..0000000000 --- a/metrics/config.go +++ /dev/null @@ -1,43 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2021 The go-ethereum Authors -// This file is part of go-ethereum. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -// Config contains the configuration for the metric collection. -type Config struct { - Enabled bool `toml:",omitempty"` - EnabledExpensive bool `toml:",omitempty"` - HTTP string `toml:",omitempty"` - Port int `toml:",omitempty"` -} - -// DefaultConfig is the default config for metrics used in go-ethereum. -var DefaultConfig = Config{ - Enabled: false, - EnabledExpensive: false, - HTTP: "127.0.0.1", - Port: 6060, -} diff --git a/metrics/counter.go b/metrics/counter.go deleted file mode 100644 index dbe8e16a90..0000000000 --- a/metrics/counter.go +++ /dev/null @@ -1,112 +0,0 @@ -package metrics - -import ( - "sync/atomic" -) - -type CounterSnapshot interface { - Count() int64 -} - -// Counter hold an int64 value that can be incremented and decremented. -type Counter interface { - Clear() - Dec(int64) - Inc(int64) - Snapshot() CounterSnapshot -} - -// GetOrRegisterCounter returns an existing Counter or constructs and registers -// a new StandardCounter. -func GetOrRegisterCounter(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounter).(Counter) -} - -// GetOrRegisterCounterForced returns an existing Counter or constructs and registers a -// new Counter no matter the global switch is enabled or not. -// Be sure to unregister the counter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterCounterForced(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounterForced).(Counter) -} - -// NewCounter constructs a new StandardCounter. -func NewCounter() Counter { - if !Enabled { - return NilCounter{} - } - return new(StandardCounter) -} - -// NewCounterForced constructs a new StandardCounter and returns it no matter if -// the global switch is enabled or not. -func NewCounterForced() Counter { - return new(StandardCounter) -} - -// NewRegisteredCounter constructs and registers a new StandardCounter. -func NewRegisteredCounter(name string, r Registry) Counter { - c := NewCounter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewRegisteredCounterForced constructs and registers a new StandardCounter -// and launches a goroutine no matter the global switch is enabled or not. -// Be sure to unregister the counter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredCounterForced(name string, r Registry) Counter { - c := NewCounterForced() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// counterSnapshot is a read-only copy of another Counter. -type counterSnapshot int64 - -// Count returns the count at the time the snapshot was taken. -func (c counterSnapshot) Count() int64 { return int64(c) } - -// NilCounter is a no-op Counter. -type NilCounter struct{} - -func (NilCounter) Clear() {} -func (NilCounter) Dec(i int64) {} -func (NilCounter) Inc(i int64) {} -func (NilCounter) Snapshot() CounterSnapshot { return (*emptySnapshot)(nil) } - -// StandardCounter is the standard implementation of a Counter and uses the -// sync/atomic package to manage a single int64 value. -type StandardCounter atomic.Int64 - -// Clear sets the counter to zero. -func (c *StandardCounter) Clear() { - (*atomic.Int64)(c).Store(0) -} - -// Dec decrements the counter by the given amount. -func (c *StandardCounter) Dec(i int64) { - (*atomic.Int64)(c).Add(-i) -} - -// Inc increments the counter by the given amount. -func (c *StandardCounter) Inc(i int64) { - (*atomic.Int64)(c).Add(i) -} - -// Snapshot returns a read-only copy of the counter. -func (c *StandardCounter) Snapshot() CounterSnapshot { - return counterSnapshot((*atomic.Int64)(c).Load()) -} diff --git a/metrics/counter_float64.go b/metrics/counter_float64.go deleted file mode 100644 index 15c81494ef..0000000000 --- a/metrics/counter_float64.go +++ /dev/null @@ -1,126 +0,0 @@ -package metrics - -import ( - "math" - "sync/atomic" -) - -type CounterFloat64Snapshot interface { - Count() float64 -} - -// CounterFloat64 holds a float64 value that can be incremented and decremented. -type CounterFloat64 interface { - Clear() - Dec(float64) - Inc(float64) - Snapshot() CounterFloat64Snapshot -} - -// GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers -// a new StandardCounterFloat64. -func GetOrRegisterCounterFloat64(name string, r Registry) CounterFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounterFloat64).(CounterFloat64) -} - -// GetOrRegisterCounterFloat64Forced returns an existing CounterFloat64 or constructs and registers a -// new CounterFloat64 no matter the global switch is enabled or not. -// Be sure to unregister the counter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterCounterFloat64Forced(name string, r Registry) CounterFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounterFloat64Forced).(CounterFloat64) -} - -// NewCounterFloat64 constructs a new StandardCounterFloat64. -func NewCounterFloat64() CounterFloat64 { - if !Enabled { - return NilCounterFloat64{} - } - return &StandardCounterFloat64{} -} - -// NewCounterFloat64Forced constructs a new StandardCounterFloat64 and returns it no matter if -// the global switch is enabled or not. -func NewCounterFloat64Forced() CounterFloat64 { - return &StandardCounterFloat64{} -} - -// NewRegisteredCounterFloat64 constructs and registers a new StandardCounterFloat64. -func NewRegisteredCounterFloat64(name string, r Registry) CounterFloat64 { - c := NewCounterFloat64() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewRegisteredCounterFloat64Forced constructs and registers a new StandardCounterFloat64 -// and launches a goroutine no matter the global switch is enabled or not. -// Be sure to unregister the counter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredCounterFloat64Forced(name string, r Registry) CounterFloat64 { - c := NewCounterFloat64Forced() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// counterFloat64Snapshot is a read-only copy of another CounterFloat64. -type counterFloat64Snapshot float64 - -// Count returns the value at the time the snapshot was taken. -func (c counterFloat64Snapshot) Count() float64 { return float64(c) } - -type NilCounterFloat64 struct{} - -func (NilCounterFloat64) Clear() {} -func (NilCounterFloat64) Count() float64 { return 0.0 } -func (NilCounterFloat64) Dec(i float64) {} -func (NilCounterFloat64) Inc(i float64) {} -func (NilCounterFloat64) Snapshot() CounterFloat64Snapshot { return NilCounterFloat64{} } - -// StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the -// atomic to manage a single float64 value. -type StandardCounterFloat64 struct { - floatBits atomic.Uint64 -} - -// Clear sets the counter to zero. -func (c *StandardCounterFloat64) Clear() { - c.floatBits.Store(0) -} - -// Dec decrements the counter by the given amount. -func (c *StandardCounterFloat64) Dec(v float64) { - atomicAddFloat(&c.floatBits, -v) -} - -// Inc increments the counter by the given amount. -func (c *StandardCounterFloat64) Inc(v float64) { - atomicAddFloat(&c.floatBits, v) -} - -// Snapshot returns a read-only copy of the counter. -func (c *StandardCounterFloat64) Snapshot() CounterFloat64Snapshot { - v := math.Float64frombits(c.floatBits.Load()) - return counterFloat64Snapshot(v) -} - -func atomicAddFloat(fbits *atomic.Uint64, v float64) { - for { - loadedBits := fbits.Load() - newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) - if fbits.CompareAndSwap(loadedBits, newBits) { - break - } - } -} diff --git a/metrics/counter_float_64_test.go b/metrics/counter_float_64_test.go deleted file mode 100644 index c21bd3307f..0000000000 --- a/metrics/counter_float_64_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package metrics - -import ( - "sync" - "testing" -) - -func BenchmarkCounterFloat64(b *testing.B) { - c := NewCounterFloat64() - b.ResetTimer() - for i := 0; i < b.N; i++ { - c.Inc(1.0) - } -} - -func BenchmarkCounterFloat64Parallel(b *testing.B) { - c := NewCounterFloat64() - b.ResetTimer() - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - for i := 0; i < b.N; i++ { - c.Inc(1.0) - } - wg.Done() - }() - } - wg.Wait() - if have, want := c.Snapshot().Count(), 10.0*float64(b.N); have != want { - b.Fatalf("have %f want %f", have, want) - } -} - -func TestCounterFloat64Clear(t *testing.T) { - c := NewCounterFloat64() - c.Inc(1.0) - c.Clear() - if count := c.Snapshot().Count(); count != 0 { - t.Errorf("c.Count(): 0 != %v\n", count) - } -} - -func TestCounterFloat64Dec1(t *testing.T) { - c := NewCounterFloat64() - c.Dec(1.0) - if count := c.Snapshot().Count(); count != -1.0 { - t.Errorf("c.Count(): -1.0 != %v\n", count) - } -} - -func TestCounterFloat64Dec2(t *testing.T) { - c := NewCounterFloat64() - c.Dec(2.0) - if count := c.Snapshot().Count(); count != -2.0 { - t.Errorf("c.Count(): -2.0 != %v\n", count) - } -} - -func TestCounterFloat64Inc1(t *testing.T) { - c := NewCounterFloat64() - c.Inc(1.0) - if count := c.Snapshot().Count(); count != 1.0 { - t.Errorf("c.Count(): 1.0 != %v\n", count) - } -} - -func TestCounterFloat64Inc2(t *testing.T) { - c := NewCounterFloat64() - c.Inc(2.0) - if count := c.Snapshot().Count(); count != 2.0 { - t.Errorf("c.Count(): 2.0 != %v\n", count) - } -} - -func TestCounterFloat64Snapshot(t *testing.T) { - c := NewCounterFloat64() - c.Inc(1.0) - snapshot := c.Snapshot() - c.Inc(1.0) - if count := snapshot.Count(); count != 1.0 { - t.Errorf("c.Count(): 1.0 != %v\n", count) - } -} - -func TestCounterFloat64Zero(t *testing.T) { - c := NewCounterFloat64() - if count := c.Snapshot().Count(); count != 0 { - t.Errorf("c.Count(): 0 != %v\n", count) - } -} - -func TestGetOrRegisterCounterFloat64(t *testing.T) { - r := NewRegistry() - NewRegisteredCounterFloat64("foo", r).Inc(47.0) - if c := GetOrRegisterCounterFloat64("foo", r).Snapshot(); c.Count() != 47.0 { - t.Fatal(c) - } -} diff --git a/metrics/counter_test.go b/metrics/counter_test.go deleted file mode 100644 index 1b15b23f21..0000000000 --- a/metrics/counter_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkCounter(b *testing.B) { - c := NewCounter() - b.ResetTimer() - for i := 0; i < b.N; i++ { - c.Inc(1) - } -} - -func TestCounterClear(t *testing.T) { - c := NewCounter() - c.Inc(1) - c.Clear() - if count := c.Snapshot().Count(); count != 0 { - t.Errorf("c.Count(): 0 != %v\n", count) - } -} - -func TestCounterDec1(t *testing.T) { - c := NewCounter() - c.Dec(1) - if count := c.Snapshot().Count(); count != -1 { - t.Errorf("c.Count(): -1 != %v\n", count) - } -} - -func TestCounterDec2(t *testing.T) { - c := NewCounter() - c.Dec(2) - if count := c.Snapshot().Count(); count != -2 { - t.Errorf("c.Count(): -2 != %v\n", count) - } -} - -func TestCounterInc1(t *testing.T) { - c := NewCounter() - c.Inc(1) - if count := c.Snapshot().Count(); count != 1 { - t.Errorf("c.Count(): 1 != %v\n", count) - } -} - -func TestCounterInc2(t *testing.T) { - c := NewCounter() - c.Inc(2) - if count := c.Snapshot().Count(); count != 2 { - t.Errorf("c.Count(): 2 != %v\n", count) - } -} - -func TestCounterSnapshot(t *testing.T) { - c := NewCounter() - c.Inc(1) - snapshot := c.Snapshot() - c.Inc(1) - if count := snapshot.Count(); count != 1 { - t.Errorf("c.Count(): 1 != %v\n", count) - } -} - -func TestCounterZero(t *testing.T) { - c := NewCounter() - if count := c.Snapshot().Count(); count != 0 { - t.Errorf("c.Count(): 0 != %v\n", count) - } -} - -func TestGetOrRegisterCounter(t *testing.T) { - r := NewRegistry() - NewRegisteredCounter("foo", r).Inc(47) - if c := GetOrRegisterCounter("foo", r).Snapshot(); c.Count() != 47 { - t.Fatal(c) - } -} diff --git a/metrics/cpu.go b/metrics/cpu.go deleted file mode 100644 index 472a1a42d8..0000000000 --- a/metrics/cpu.go +++ /dev/null @@ -1,35 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -// CPUStats is the system and process CPU stats. -// All values are in seconds. -type CPUStats struct { - GlobalTime float64 // Time spent by the CPU working on all processes - GlobalWait float64 // Time spent by waiting on disk for all processes - LocalTime float64 // Time spent by the CPU working on this process -} diff --git a/metrics/cpu_disabled.go b/metrics/cpu_disabled.go deleted file mode 100644 index f2c3ead5db..0000000000 --- a/metrics/cpu_disabled.go +++ /dev/null @@ -1,34 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build ios || js -// +build ios js - -package metrics - -// ReadCPUStats retrieves the current CPU stats. Internally this uses `gosigar`, -// which is not supported on the platforms in this file. -func ReadCPUStats(stats *CPUStats) {} diff --git a/metrics/cpu_enabled.go b/metrics/cpu_enabled.go deleted file mode 100644 index 34f450e1ab..0000000000 --- a/metrics/cpu_enabled.go +++ /dev/null @@ -1,54 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !ios && !js -// +build !ios,!js - -package metrics - -import ( - "github.com/ava-labs/libevm/log" - "github.com/shirou/gopsutil/cpu" -) - -// ReadCPUStats retrieves the current CPU stats. -func ReadCPUStats(stats *CPUStats) { - // passing false to request all cpu times - timeStats, err := cpu.Times(false) - if err != nil { - log.Error("Could not read cpu stats", "err", err) - return - } - if len(timeStats) == 0 { - log.Error("Empty cpu stats") - return - } - // requesting all cpu times will always return an array with only one time stats entry - timeStat := timeStats[0] - stats.GlobalTime = timeStat.User + timeStat.Nice + timeStat.System - stats.GlobalWait = timeStat.Iowait - stats.LocalTime = getProcessCPUTime() -} diff --git a/metrics/cputime_nop.go b/metrics/cputime_nop.go deleted file mode 100644 index 275b983717..0000000000 --- a/metrics/cputime_nop.go +++ /dev/null @@ -1,36 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build windows || js -// +build windows js - -package metrics - -// getProcessCPUTime returns 0 on Windows as there is no system call to resolve -// the actual process' CPU time. -func getProcessCPUTime() float64 { - return 0 -} diff --git a/metrics/cputime_unix.go b/metrics/cputime_unix.go deleted file mode 100644 index c02303bdce..0000000000 --- a/metrics/cputime_unix.go +++ /dev/null @@ -1,46 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !windows && !js -// +build !windows,!js - -package metrics - -import ( - syscall "golang.org/x/sys/unix" - - "github.com/ava-labs/libevm/log" -) - -// getProcessCPUTime retrieves the process' CPU time since program startup. -func getProcessCPUTime() float64 { - var usage syscall.Rusage - if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil { - log.Warn("Failed to retrieve CPU time", "err", err) - return 0 - } - return float64(usage.Utime.Sec+usage.Stime.Sec) + float64(usage.Utime.Usec+usage.Stime.Usec)/1000000 //nolint:unconvert -} diff --git a/metrics/debug.go b/metrics/debug.go deleted file mode 100644 index de4a2739fe..0000000000 --- a/metrics/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package metrics - -import ( - "runtime/debug" - "time" -) - -var ( - debugMetrics struct { - GCStats struct { - LastGC Gauge - NumGC Gauge - Pause Histogram - //PauseQuantiles Histogram - PauseTotal Gauge - } - ReadGCStats Timer - } - gcStats debug.GCStats -) - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. -func CaptureDebugGCStats(r Registry, d time.Duration) { - for range time.Tick(d) { - CaptureDebugGCStatsOnce(r) - } -} - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. -// -// Be careful (but much less so) with this because debug.ReadGCStats calls -// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world -// operation, isn't something you want to be doing all the time. -func CaptureDebugGCStatsOnce(r Registry) { - lastGC := gcStats.LastGC - t := time.Now() - debug.ReadGCStats(&gcStats) - debugMetrics.ReadGCStats.UpdateSince(t) - - debugMetrics.GCStats.LastGC.Update(gcStats.LastGC.UnixNano()) - debugMetrics.GCStats.NumGC.Update(gcStats.NumGC) - if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { - debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) - } - //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) - debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) -} - -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. -func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() - - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) -} - -// Allocate an initial slice for gcStats.Pause to avoid allocations during -// normal operation. -func init() { - gcStats.Pause = make([]time.Duration, 11) -} diff --git a/metrics/debug_test.go b/metrics/debug_test.go deleted file mode 100644 index 07eb867841..0000000000 --- a/metrics/debug_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package metrics - -import ( - "runtime" - "runtime/debug" - "testing" - "time" -) - -func BenchmarkDebugGCStats(b *testing.B) { - r := NewRegistry() - RegisterDebugGCStats(r) - b.ResetTimer() - for i := 0; i < b.N; i++ { - CaptureDebugGCStatsOnce(r) - } -} - -func TestDebugGCStatsBlocking(t *testing.T) { - if g := runtime.GOMAXPROCS(0); g < 2 { - t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g) - return - } - ch := make(chan int) - go testDebugGCStatsBlocking(ch) - var gcStats debug.GCStats - t0 := time.Now() - debug.ReadGCStats(&gcStats) - t1 := time.Now() - t.Log("i++ during debug.ReadGCStats:", <-ch) - go testDebugGCStatsBlocking(ch) - d := t1.Sub(t0) - t.Log(d) - time.Sleep(d) - t.Log("i++ during time.Sleep:", <-ch) -} - -func testDebugGCStatsBlocking(ch chan int) { - i := 0 - for { - select { - case ch <- i: - return - default: - i++ - } - } -} diff --git a/metrics/disk.go b/metrics/disk.go deleted file mode 100644 index 1fdd32a4d3..0000000000 --- a/metrics/disk.go +++ /dev/null @@ -1,35 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -// DiskStats is the per process disk io stats. -type DiskStats struct { - ReadCount int64 // Number of read operations executed - ReadBytes int64 // Total number of bytes read - WriteCount int64 // Number of write operations executed - WriteBytes int64 // Total number of byte written -} diff --git a/metrics/disk_linux.go b/metrics/disk_linux.go deleted file mode 100644 index 25341d748a..0000000000 --- a/metrics/disk_linux.go +++ /dev/null @@ -1,82 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the Linux implementation of process disk IO counter retrieval. - -package metrics - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// ReadDiskStats retrieves the disk IO stats belonging to the current process. -func ReadDiskStats(stats *DiskStats) error { - // Open the process disk IO counter file - inf, err := os.Open(fmt.Sprintf("/proc/%d/io", os.Getpid())) - if err != nil { - return err - } - defer inf.Close() - in := bufio.NewReader(inf) - - // Iterate over the IO counter, and extract what we need - for { - // Read the next line and split to key and value - line, err := in.ReadString('\n') - if err != nil { - if err == io.EOF { - return nil - } - return err - } - parts := strings.Split(line, ":") - if len(parts) != 2 { - continue - } - key := strings.TrimSpace(parts[0]) - value, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64) - if err != nil { - return err - } - - // Update the counter based on the key - switch key { - case "syscr": - stats.ReadCount = value - case "syscw": - stats.WriteCount = value - case "rchar": - stats.ReadBytes = value - case "wchar": - stats.WriteBytes = value - } - } -} diff --git a/metrics/disk_nop.go b/metrics/disk_nop.go deleted file mode 100644 index b1d6ff9f5d..0000000000 --- a/metrics/disk_nop.go +++ /dev/null @@ -1,37 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !linux -// +build !linux - -package metrics - -import "errors" - -// ReadDiskStats retrieves the disk IO stats belonging to the current process. -func ReadDiskStats(stats *DiskStats) error { - return errors.New("not implemented") -} diff --git a/metrics/ewma.go b/metrics/ewma.go deleted file mode 100644 index 1d7a4f00cf..0000000000 --- a/metrics/ewma.go +++ /dev/null @@ -1,111 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" - "time" -) - -type EWMASnapshot interface { - Rate() float64 -} - -// EWMAs continuously calculate an exponentially-weighted moving average -// based on an outside source of clock ticks. -type EWMA interface { - Snapshot() EWMASnapshot - Tick() - Update(int64) -} - -// NewEWMA constructs a new EWMA with the given alpha. -func NewEWMA(alpha float64) EWMA { - return &StandardEWMA{alpha: alpha} -} - -// NewEWMA1 constructs a new EWMA for a one-minute moving average. -func NewEWMA1() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/1)) -} - -// NewEWMA5 constructs a new EWMA for a five-minute moving average. -func NewEWMA5() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/5)) -} - -// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. -func NewEWMA15() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/15)) -} - -// ewmaSnapshot is a read-only copy of another EWMA. -type ewmaSnapshot float64 - -// Rate returns the rate of events per second at the time the snapshot was -// taken. -func (a ewmaSnapshot) Rate() float64 { return float64(a) } - -// NilEWMA is a no-op EWMA. -type NilEWMA struct{} - -func (NilEWMA) Snapshot() EWMASnapshot { return (*emptySnapshot)(nil) } -func (NilEWMA) Tick() {} -func (NilEWMA) Update(n int64) {} - -// StandardEWMA is the standard implementation of an EWMA and tracks the number -// of uncounted events and processes them on each tick. It uses the -// sync/atomic package to manage uncounted events. -type StandardEWMA struct { - uncounted atomic.Int64 - alpha float64 - rate atomic.Uint64 - init atomic.Bool - mutex sync.Mutex -} - -// Snapshot returns a read-only copy of the EWMA. -func (a *StandardEWMA) Snapshot() EWMASnapshot { - r := math.Float64frombits(a.rate.Load()) * float64(time.Second) - return ewmaSnapshot(r) -} - -// Tick ticks the clock to update the moving average. It assumes it is called -// every five seconds. -func (a *StandardEWMA) Tick() { - // Optimization to avoid mutex locking in the hot-path. - if a.init.Load() { - a.updateRate(a.fetchInstantRate()) - return - } - // Slow-path: this is only needed on the first Tick() and preserves transactional updating - // of init and rate in the else block. The first conditional is needed below because - // a different thread could have set a.init = 1 between the time of the first atomic load and when - // the lock was acquired. - a.mutex.Lock() - if a.init.Load() { - // The fetchInstantRate() uses atomic loading, which is unnecessary in this critical section - // but again, this section is only invoked on the first successful Tick() operation. - a.updateRate(a.fetchInstantRate()) - } else { - a.init.Store(true) - a.rate.Store(math.Float64bits(a.fetchInstantRate())) - } - a.mutex.Unlock() -} - -func (a *StandardEWMA) fetchInstantRate() float64 { - count := a.uncounted.Swap(0) - return float64(count) / float64(5*time.Second) -} - -func (a *StandardEWMA) updateRate(instantRate float64) { - currentRate := math.Float64frombits(a.rate.Load()) - currentRate += a.alpha * (instantRate - currentRate) - a.rate.Store(math.Float64bits(currentRate)) -} - -// Update adds n uncounted events. -func (a *StandardEWMA) Update(n int64) { - a.uncounted.Add(n) -} diff --git a/metrics/ewma_test.go b/metrics/ewma_test.go deleted file mode 100644 index 9a91b43db8..0000000000 --- a/metrics/ewma_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package metrics - -import ( - "math" - "testing" -) - -const epsilon = 0.0000000000000001 - -func BenchmarkEWMA(b *testing.B) { - a := NewEWMA1() - b.ResetTimer() - for i := 0; i < b.N; i++ { - a.Update(1) - a.Tick() - } -} - -func BenchmarkEWMAParallel(b *testing.B) { - a := NewEWMA1() - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - a.Update(1) - a.Tick() - } - }) -} - -func TestEWMA1(t *testing.T) { - a := NewEWMA1() - a.Update(3) - a.Tick() - for i, want := range []float64{0.6, - 0.22072766470286553, 0.08120116994196772, 0.029872241020718428, - 0.01098938333324054, 0.004042768199451294, 0.0014872513059998212, - 0.0005471291793327122, 0.00020127757674150815, 7.404588245200814e-05, - 2.7239957857491083e-05, 1.0021020474147462e-05, 3.6865274119969525e-06, - 1.3561976441886433e-06, 4.989172314621449e-07, 1.8354139230109722e-07, - } { - if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon { - t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate) - } - elapseMinute(a) - } -} - -func TestEWMA5(t *testing.T) { - a := NewEWMA5() - a.Update(3) - a.Tick() - for i, want := range []float64{ - 0.6, 0.49123845184678905, 0.4021920276213837, 0.32928698165641596, - 0.269597378470333, 0.2207276647028654, 0.18071652714732128, - 0.14795817836496392, 0.12113791079679326, 0.09917933293295193, - 0.08120116994196763, 0.06648189501740036, 0.05443077197364752, - 0.04456414692860035, 0.03648603757513079, 0.0298722410207183831020718428, - } { - if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon { - t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate) - } - elapseMinute(a) - } -} - -func TestEWMA15(t *testing.T) { - a := NewEWMA15() - a.Update(3) - a.Tick() - for i, want := range []float64{ - 0.6, 0.5613041910189706, 0.5251039914257684, 0.4912384518467888184678905, - 0.459557003018789, 0.4299187863442732, 0.4021920276213831, - 0.37625345116383313, 0.3519877317060185, 0.3292869816564153165641596, - 0.3080502714195546, 0.2881831806538789, 0.26959737847033216, - 0.2522102307052083, 0.23594443252115815, 0.2207276647028646247028654470286553, - } { - if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon { - t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate) - } - elapseMinute(a) - } -} - -func elapseMinute(a EWMA) { - for i := 0; i < 12; i++ { - a.Tick() - } -} diff --git a/metrics/gauge.go b/metrics/gauge.go deleted file mode 100644 index 5933df3107..0000000000 --- a/metrics/gauge.go +++ /dev/null @@ -1,98 +0,0 @@ -package metrics - -import "sync/atomic" - -// GaugeSnapshot contains a readonly int64. -type GaugeSnapshot interface { - Value() int64 -} - -// Gauge holds an int64 value that can be set arbitrarily. -type Gauge interface { - Snapshot() GaugeSnapshot - Update(int64) - UpdateIfGt(int64) - Dec(int64) - Inc(int64) -} - -// GetOrRegisterGauge returns an existing Gauge or constructs and registers a -// new StandardGauge. -func GetOrRegisterGauge(name string, r Registry) Gauge { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGauge).(Gauge) -} - -// NewGauge constructs a new StandardGauge. -func NewGauge() Gauge { - if !Enabled { - return NilGauge{} - } - return &StandardGauge{} -} - -// NewRegisteredGauge constructs and registers a new StandardGauge. -func NewRegisteredGauge(name string, r Registry) Gauge { - c := NewGauge() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// gaugeSnapshot is a read-only copy of another Gauge. -type gaugeSnapshot int64 - -// Value returns the value at the time the snapshot was taken. -func (g gaugeSnapshot) Value() int64 { return int64(g) } - -// NilGauge is a no-op Gauge. -type NilGauge struct{} - -func (NilGauge) Snapshot() GaugeSnapshot { return (*emptySnapshot)(nil) } -func (NilGauge) Update(v int64) {} -func (NilGauge) UpdateIfGt(v int64) {} -func (NilGauge) Dec(i int64) {} -func (NilGauge) Inc(i int64) {} - -// StandardGauge is the standard implementation of a Gauge and uses the -// sync/atomic package to manage a single int64 value. -type StandardGauge struct { - value atomic.Int64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGauge) Snapshot() GaugeSnapshot { - return gaugeSnapshot(g.value.Load()) -} - -// Update updates the gauge's value. -func (g *StandardGauge) Update(v int64) { - g.value.Store(v) -} - -// Update updates the gauge's value if v is larger then the current value. -func (g *StandardGauge) UpdateIfGt(v int64) { - for { - exist := g.value.Load() - if exist >= v { - break - } - if g.value.CompareAndSwap(exist, v) { - break - } - } -} - -// Dec decrements the gauge's current value by the given amount. -func (g *StandardGauge) Dec(i int64) { - g.value.Add(-i) -} - -// Inc increments the gauge's current value by the given amount. -func (g *StandardGauge) Inc(i int64) { - g.value.Add(i) -} diff --git a/metrics/gauge_float64.go b/metrics/gauge_float64.go deleted file mode 100644 index c1c3c6b6e6..0000000000 --- a/metrics/gauge_float64.go +++ /dev/null @@ -1,73 +0,0 @@ -package metrics - -import ( - "math" - "sync/atomic" -) - -type GaugeFloat64Snapshot interface { - Value() float64 -} - -// GaugeFloat64 hold a float64 value that can be set arbitrarily. -type GaugeFloat64 interface { - Snapshot() GaugeFloat64Snapshot - Update(float64) -} - -// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a -// new StandardGaugeFloat64. -func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) -} - -// NewGaugeFloat64 constructs a new StandardGaugeFloat64. -func NewGaugeFloat64() GaugeFloat64 { - if !Enabled { - return NilGaugeFloat64{} - } - return &StandardGaugeFloat64{} -} - -// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. -func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { - c := NewGaugeFloat64() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// gaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. -type gaugeFloat64Snapshot float64 - -// Value returns the value at the time the snapshot was taken. -func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) } - -// NilGaugeFloat64 is a no-op Gauge. -type NilGaugeFloat64 struct{} - -func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} } -func (NilGaugeFloat64) Update(v float64) {} -func (NilGaugeFloat64) Value() float64 { return 0.0 } - -// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses -// atomic to manage a single float64 value. -type StandardGaugeFloat64 struct { - floatBits atomic.Uint64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64Snapshot { - v := math.Float64frombits(g.floatBits.Load()) - return gaugeFloat64Snapshot(v) -} - -// Update updates the gauge's value. -func (g *StandardGaugeFloat64) Update(v float64) { - g.floatBits.Store(math.Float64bits(v)) -} diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go deleted file mode 100644 index 194a18821f..0000000000 --- a/metrics/gauge_float64_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package metrics - -import ( - "sync" - "testing" -) - -func BenchmarkGaugeFloat64(b *testing.B) { - g := NewGaugeFloat64() - b.ResetTimer() - for i := 0; i < b.N; i++ { - g.Update(float64(i)) - } -} - -func BenchmarkGaugeFloat64Parallel(b *testing.B) { - c := NewGaugeFloat64() - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - for i := 0; i < b.N; i++ { - c.Update(float64(i)) - } - wg.Done() - }() - } - wg.Wait() - if have, want := c.Snapshot().Value(), float64(b.N-1); have != want { - b.Fatalf("have %f want %f", have, want) - } -} - -func TestGaugeFloat64Snapshot(t *testing.T) { - g := NewGaugeFloat64() - g.Update(47.0) - snapshot := g.Snapshot() - g.Update(float64(0)) - if v := snapshot.Value(); v != 47.0 { - t.Errorf("g.Value(): 47.0 != %v\n", v) - } -} - -func TestGetOrRegisterGaugeFloat64(t *testing.T) { - r := NewRegistry() - NewRegisteredGaugeFloat64("foo", r).Update(47.0) - t.Logf("registry: %v", r) - if g := GetOrRegisterGaugeFloat64("foo", r).Snapshot(); g.Value() != 47.0 { - t.Fatal(g) - } -} diff --git a/metrics/gauge_info.go b/metrics/gauge_info.go deleted file mode 100644 index 0010edc324..0000000000 --- a/metrics/gauge_info.go +++ /dev/null @@ -1,84 +0,0 @@ -package metrics - -import ( - "encoding/json" - "sync" -) - -type GaugeInfoSnapshot interface { - Value() GaugeInfoValue -} - -// GaugeInfo holds a GaugeInfoValue value that can be set arbitrarily. -type GaugeInfo interface { - Update(GaugeInfoValue) - Snapshot() GaugeInfoSnapshot -} - -// GaugeInfoValue is a mapping of keys to values -type GaugeInfoValue map[string]string - -func (val GaugeInfoValue) String() string { - data, _ := json.Marshal(val) - return string(data) -} - -// GetOrRegisterGaugeInfo returns an existing GaugeInfo or constructs and registers a -// new StandardGaugeInfo. -func GetOrRegisterGaugeInfo(name string, r Registry) GaugeInfo { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGaugeInfo()).(GaugeInfo) -} - -// NewGaugeInfo constructs a new StandardGaugeInfo. -func NewGaugeInfo() GaugeInfo { - if !Enabled { - return NilGaugeInfo{} - } - return &StandardGaugeInfo{ - value: GaugeInfoValue{}, - } -} - -// NewRegisteredGaugeInfo constructs and registers a new StandardGaugeInfo. -func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo { - c := NewGaugeInfo() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// gaugeInfoSnapshot is a read-only copy of another GaugeInfo. -type gaugeInfoSnapshot GaugeInfoValue - -// Value returns the value at the time the snapshot was taken. -func (g gaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) } - -type NilGaugeInfo struct{} - -func (NilGaugeInfo) Snapshot() GaugeInfoSnapshot { return NilGaugeInfo{} } -func (NilGaugeInfo) Update(v GaugeInfoValue) {} -func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} } - -// StandardGaugeInfo is the standard implementation of a GaugeInfo and uses -// sync.Mutex to manage a single string value. -type StandardGaugeInfo struct { - mutex sync.Mutex - value GaugeInfoValue -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeInfo) Snapshot() GaugeInfoSnapshot { - return gaugeInfoSnapshot(g.value) -} - -// Update updates the gauge's value. -func (g *StandardGaugeInfo) Update(v GaugeInfoValue) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v -} diff --git a/metrics/gauge_info_test.go b/metrics/gauge_info_test.go deleted file mode 100644 index 319afbf92e..0000000000 --- a/metrics/gauge_info_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package metrics - -import ( - "testing" -) - -func TestGaugeInfoJsonString(t *testing.T) { - g := NewGaugeInfo() - g.Update(GaugeInfoValue{ - "chain_id": "5", - "anotherKey": "any_string_value", - "third_key": "anything", - }, - ) - want := `{"anotherKey":"any_string_value","chain_id":"5","third_key":"anything"}` - - original := g.Snapshot() - g.Update(GaugeInfoValue{"value": "updated"}) - - if have := original.Value().String(); have != want { - t.Errorf("\nhave: %v\nwant: %v\n", have, want) - } - if have, want := g.Snapshot().Value().String(), `{"value":"updated"}`; have != want { - t.Errorf("\nhave: %v\nwant: %v\n", have, want) - } -} - -func TestGetOrRegisterGaugeInfo(t *testing.T) { - r := NewRegistry() - NewRegisteredGaugeInfo("foo", r).Update( - GaugeInfoValue{"chain_id": "5"}) - g := GetOrRegisterGaugeInfo("foo", r).Snapshot() - if have, want := g.Value().String(), `{"chain_id":"5"}`; have != want { - t.Errorf("have\n%v\nwant\n%v\n", have, want) - } -} diff --git a/metrics/gauge_test.go b/metrics/gauge_test.go deleted file mode 100644 index f2ba930bc4..0000000000 --- a/metrics/gauge_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package metrics - -import ( - "testing" -) - -func BenchmarkGauge(b *testing.B) { - g := NewGauge() - b.ResetTimer() - for i := 0; i < b.N; i++ { - g.Update(int64(i)) - } -} - -func TestGaugeSnapshot(t *testing.T) { - g := NewGauge() - g.Update(int64(47)) - snapshot := g.Snapshot() - g.Update(int64(0)) - if v := snapshot.Value(); v != 47 { - t.Errorf("g.Value(): 47 != %v\n", v) - } -} - -func TestGetOrRegisterGauge(t *testing.T) { - r := NewRegistry() - NewRegisteredGauge("foo", r).Update(47) - if g := GetOrRegisterGauge("foo", r); g.Snapshot().Value() != 47 { - t.Fatal(g) - } -} diff --git a/metrics/graphite.go b/metrics/graphite.go deleted file mode 100644 index aba752e0ed..0000000000 --- a/metrics/graphite.go +++ /dev/null @@ -1,117 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" -) - -// GraphiteConfig provides a container with configuration parameters for -// the Graphite exporter -type GraphiteConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// Graphite is a blocking exporter function which reports metrics in r -// to a graphite server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// GraphiteWithConfig is a blocking exporter function just like Graphite, -// but it takes a GraphiteConfig instead. -func GraphiteWithConfig(c GraphiteConfig) { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - for range time.Tick(c.FlushInterval) { - if err := graphite(&c); nil != err { - log.Println(err) - } - } -} - -// GraphiteOnce performs a single submission to Graphite, returning a -// non-nil error on failed connections. This can be used in a loop -// similar to GraphiteWithConfig for custom error handling. -func GraphiteOnce(c GraphiteConfig) error { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - return graphite(&c) -} - -func graphite(c *GraphiteConfig) error { - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Snapshot().Count(), now) - case CounterFloat64: - fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Snapshot().Count(), now) - case Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Snapshot().Value(), now) - case GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Snapshot().Value(), now) - case GaugeInfo: - fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Snapshot().Value().String(), now) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) - } - w.Flush() - }) - return nil -} diff --git a/metrics/graphite_test.go b/metrics/graphite_test.go deleted file mode 100644 index c797c781df..0000000000 --- a/metrics/graphite_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package metrics - -import ( - "net" - "time" -) - -func ExampleGraphite() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr) -} - -func ExampleGraphiteWithConfig() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: DefaultRegistry, - FlushInterval: 1 * time.Second, - DurationUnit: time.Millisecond, - Percentiles: []float64{0.5, 0.75, 0.99, 0.999}, - }) -} diff --git a/metrics/healthcheck.go b/metrics/healthcheck.go deleted file mode 100644 index adcd15ab58..0000000000 --- a/metrics/healthcheck.go +++ /dev/null @@ -1,61 +0,0 @@ -package metrics - -// Healthcheck holds an error value describing an arbitrary up/down status. -type Healthcheck interface { - Check() - Error() error - Healthy() - Unhealthy(error) -} - -// NewHealthcheck constructs a new Healthcheck which will use the given -// function to update its status. -func NewHealthcheck(f func(Healthcheck)) Healthcheck { - if !Enabled { - return NilHealthcheck{} - } - return &StandardHealthcheck{nil, f} -} - -// NilHealthcheck is a no-op. -type NilHealthcheck struct{} - -// Check is a no-op. -func (NilHealthcheck) Check() {} - -// Error is a no-op. -func (NilHealthcheck) Error() error { return nil } - -// Healthy is a no-op. -func (NilHealthcheck) Healthy() {} - -// Unhealthy is a no-op. -func (NilHealthcheck) Unhealthy(error) {} - -// StandardHealthcheck is the standard implementation of a Healthcheck and -// stores the status and a function to call to update the status. -type StandardHealthcheck struct { - err error - f func(Healthcheck) -} - -// Check runs the healthcheck function to update the healthcheck's status. -func (h *StandardHealthcheck) Check() { - h.f(h) -} - -// Error returns the healthcheck's status, which will be nil if it is healthy. -func (h *StandardHealthcheck) Error() error { - return h.err -} - -// Healthy marks the healthcheck as healthy. -func (h *StandardHealthcheck) Healthy() { - h.err = nil -} - -// Unhealthy marks the healthcheck as unhealthy. The error is stored and -// may be retrieved by the Error method. -func (h *StandardHealthcheck) Unhealthy(err error) { - h.err = err -} diff --git a/metrics/histogram.go b/metrics/histogram.go deleted file mode 100644 index 10259a2463..0000000000 --- a/metrics/histogram.go +++ /dev/null @@ -1,73 +0,0 @@ -package metrics - -type HistogramSnapshot interface { - SampleSnapshot -} - -// Histogram calculates distribution statistics from a series of int64 values. -type Histogram interface { - Clear() - Update(int64) - Snapshot() HistogramSnapshot -} - -// GetOrRegisterHistogram returns an existing Histogram or constructs and -// registers a new StandardHistogram. -func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) -} - -// GetOrRegisterHistogramLazy returns an existing Histogram or constructs and -// registers a new StandardHistogram. -func GetOrRegisterHistogramLazy(name string, r Registry, s func() Sample) Histogram { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, func() Histogram { return NewHistogram(s()) }).(Histogram) -} - -// NewHistogram constructs a new StandardHistogram from a Sample. -func NewHistogram(s Sample) Histogram { - if !Enabled { - return NilHistogram{} - } - return &StandardHistogram{sample: s} -} - -// NewRegisteredHistogram constructs and registers a new StandardHistogram from -// a Sample. -func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { - c := NewHistogram(s) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NilHistogram is a no-op Histogram. -type NilHistogram struct{} - -func (NilHistogram) Clear() {} -func (NilHistogram) Snapshot() HistogramSnapshot { return (*emptySnapshot)(nil) } -func (NilHistogram) Update(v int64) {} - -// StandardHistogram is the standard implementation of a Histogram and uses a -// Sample to bound its memory use. -type StandardHistogram struct { - sample Sample -} - -// Clear clears the histogram and its sample. -func (h *StandardHistogram) Clear() { h.sample.Clear() } - -// Snapshot returns a read-only copy of the histogram. -func (h *StandardHistogram) Snapshot() HistogramSnapshot { - return h.sample.Snapshot() -} - -// Update samples a new value. -func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } diff --git a/metrics/histogram_test.go b/metrics/histogram_test.go deleted file mode 100644 index 22fc5468b0..0000000000 --- a/metrics/histogram_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package metrics - -import "testing" - -func BenchmarkHistogram(b *testing.B) { - h := NewHistogram(NewUniformSample(100)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Update(int64(i)) - } -} - -func TestGetOrRegisterHistogram(t *testing.T) { - r := NewRegistry() - s := NewUniformSample(100) - NewRegisteredHistogram("foo", r, s).Update(47) - if h := GetOrRegisterHistogram("foo", r, s).Snapshot(); h.Count() != 1 { - t.Fatal(h) - } -} - -func TestHistogram10000(t *testing.T) { - h := NewHistogram(NewUniformSample(100000)) - for i := 1; i <= 10000; i++ { - h.Update(int64(i)) - } - testHistogram10000(t, h.Snapshot()) -} - -func TestHistogramEmpty(t *testing.T) { - h := NewHistogram(NewUniformSample(100)).Snapshot() - if count := h.Count(); count != 0 { - t.Errorf("h.Count(): 0 != %v\n", count) - } - if min := h.Min(); min != 0 { - t.Errorf("h.Min(): 0 != %v\n", min) - } - if max := h.Max(); max != 0 { - t.Errorf("h.Max(): 0 != %v\n", max) - } - if mean := h.Mean(); mean != 0.0 { - t.Errorf("h.Mean(): 0.0 != %v\n", mean) - } - if stdDev := h.StdDev(); stdDev != 0.0 { - t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev) - } - ps := h.Percentiles([]float64{0.5, 0.75, 0.99}) - if ps[0] != 0.0 { - t.Errorf("median: 0.0 != %v\n", ps[0]) - } - if ps[1] != 0.0 { - t.Errorf("75th percentile: 0.0 != %v\n", ps[1]) - } - if ps[2] != 0.0 { - t.Errorf("99th percentile: 0.0 != %v\n", ps[2]) - } -} - -func TestHistogramSnapshot(t *testing.T) { - h := NewHistogram(NewUniformSample(100000)) - for i := 1; i <= 10000; i++ { - h.Update(int64(i)) - } - snapshot := h.Snapshot() - h.Update(0) - testHistogram10000(t, snapshot) -} - -func testHistogram10000(t *testing.T, h HistogramSnapshot) { - if count := h.Count(); count != 10000 { - t.Errorf("h.Count(): 10000 != %v\n", count) - } - if min := h.Min(); min != 1 { - t.Errorf("h.Min(): 1 != %v\n", min) - } - if max := h.Max(); max != 10000 { - t.Errorf("h.Max(): 10000 != %v\n", max) - } - if mean := h.Mean(); mean != 5000.5 { - t.Errorf("h.Mean(): 5000.5 != %v\n", mean) - } - if stdDev := h.StdDev(); stdDev != 2886.751331514372 { - t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev) - } - ps := h.Percentiles([]float64{0.5, 0.75, 0.99}) - if ps[0] != 5000.5 { - t.Errorf("median: 5000.5 != %v\n", ps[0]) - } - if ps[1] != 7500.75 { - t.Errorf("75th percentile: 7500.75 != %v\n", ps[1]) - } - if ps[2] != 9900.99 { - t.Errorf("99th percentile: 9900.99 != %v\n", ps[2]) - } -} diff --git a/metrics/inactive.go b/metrics/inactive.go deleted file mode 100644 index 1f47f0210a..0000000000 --- a/metrics/inactive.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -// compile-time checks that interfaces are implemented. -var ( - _ SampleSnapshot = (*emptySnapshot)(nil) - _ HistogramSnapshot = (*emptySnapshot)(nil) - _ CounterSnapshot = (*emptySnapshot)(nil) - _ GaugeSnapshot = (*emptySnapshot)(nil) - _ MeterSnapshot = (*emptySnapshot)(nil) - _ EWMASnapshot = (*emptySnapshot)(nil) - _ TimerSnapshot = (*emptySnapshot)(nil) -) - -type emptySnapshot struct{} - -func (*emptySnapshot) Count() int64 { return 0 } -func (*emptySnapshot) Max() int64 { return 0 } -func (*emptySnapshot) Mean() float64 { return 0.0 } -func (*emptySnapshot) Min() int64 { return 0 } -func (*emptySnapshot) Percentile(p float64) float64 { return 0.0 } -func (*emptySnapshot) Percentiles(ps []float64) []float64 { return make([]float64, len(ps)) } -func (*emptySnapshot) Size() int { return 0 } -func (*emptySnapshot) StdDev() float64 { return 0.0 } -func (*emptySnapshot) Sum() int64 { return 0 } -func (*emptySnapshot) Values() []int64 { return []int64{} } -func (*emptySnapshot) Variance() float64 { return 0.0 } -func (*emptySnapshot) Value() int64 { return 0 } -func (*emptySnapshot) Rate() float64 { return 0.0 } -func (*emptySnapshot) Rate1() float64 { return 0.0 } -func (*emptySnapshot) Rate5() float64 { return 0.0 } -func (*emptySnapshot) Rate15() float64 { return 0.0 } -func (*emptySnapshot) RateMean() float64 { return 0.0 } diff --git a/metrics/init_test.go b/metrics/init_test.go deleted file mode 100644 index 43401e833c..0000000000 --- a/metrics/init_test.go +++ /dev/null @@ -1,5 +0,0 @@ -package metrics - -func init() { - Enabled = true -} diff --git a/metrics/json.go b/metrics/json.go deleted file mode 100644 index 2087d8211e..0000000000 --- a/metrics/json.go +++ /dev/null @@ -1,31 +0,0 @@ -package metrics - -import ( - "encoding/json" - "io" - "time" -) - -// MarshalJSON returns a byte slice containing a JSON representation of all -// the metrics in the Registry. -func (r *StandardRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(r.GetAll()) -} - -// WriteJSON writes metrics from the given registry periodically to the -// specified io.Writer as JSON. -func WriteJSON(r Registry, d time.Duration, w io.Writer) { - for range time.Tick(d) { - WriteJSONOnce(r, w) - } -} - -// WriteJSONOnce writes metrics from the given registry to the specified -// io.Writer as JSON. -func WriteJSONOnce(r Registry, w io.Writer) { - json.NewEncoder(w).Encode(r) -} - -func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(p.GetAll()) -} diff --git a/metrics/json_test.go b/metrics/json_test.go deleted file mode 100644 index 811bc29f11..0000000000 --- a/metrics/json_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package metrics - -import ( - "bytes" - "encoding/json" - "testing" -) - -func TestRegistryMarshallJSON(t *testing.T) { - b := &bytes.Buffer{} - enc := json.NewEncoder(b) - r := NewRegistry() - r.Register("counter", NewCounter()) - enc.Encode(r) - if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" { - t.Fatal(s) - } -} - -func TestRegistryWriteJSONOnce(t *testing.T) { - r := NewRegistry() - r.Register("counter", NewCounter()) - b := &bytes.Buffer{} - WriteJSONOnce(r, b) - if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" { - t.Fail() - } -} diff --git a/metrics/log.go b/metrics/log.go deleted file mode 100644 index 3b9773faa7..0000000000 --- a/metrics/log.go +++ /dev/null @@ -1,86 +0,0 @@ -package metrics - -import ( - "time" -) - -type Logger interface { - Printf(format string, v ...interface{}) -} - -func Log(r Registry, freq time.Duration, l Logger) { - LogScaled(r, freq, time.Nanosecond, l) -} - -// Output each metric in the given registry periodically using the given -// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. -func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { - du := float64(scale) - duSuffix := scale.String()[1:] - - for range time.Tick(freq) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - l.Printf("counter %s\n", name) - l.Printf(" count: %9d\n", metric.Snapshot().Count()) - case CounterFloat64: - l.Printf("counter %s\n", name) - l.Printf(" count: %f\n", metric.Snapshot().Count()) - case Gauge: - l.Printf("gauge %s\n", name) - l.Printf(" value: %9d\n", metric.Snapshot().Value()) - case GaugeFloat64: - l.Printf("gauge %s\n", name) - l.Printf(" value: %f\n", metric.Snapshot().Value()) - case GaugeInfo: - l.Printf("gauge %s\n", name) - l.Printf(" value: %s\n", metric.Snapshot().Value()) - case Healthcheck: - metric.Check() - l.Printf("healthcheck %s\n", name) - l.Printf(" error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("histogram %s\n", name) - l.Printf(" count: %9d\n", h.Count()) - l.Printf(" min: %9d\n", h.Min()) - l.Printf(" max: %9d\n", h.Max()) - l.Printf(" mean: %12.2f\n", h.Mean()) - l.Printf(" stddev: %12.2f\n", h.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - l.Printf("meter %s\n", name) - l.Printf(" count: %9d\n", m.Count()) - l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) - l.Printf(" mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("timer %s\n", name) - l.Printf(" count: %9d\n", t.Count()) - l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) - l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) - l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) - l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) - l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) - l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) - l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) - l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) - l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) - l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) - l.Printf(" mean rate: %12.2f\n", t.RateMean()) - } - }) - } -} diff --git a/metrics/memory.md b/metrics/memory.md deleted file mode 100644 index 47454f54b6..0000000000 --- a/metrics/memory.md +++ /dev/null @@ -1,285 +0,0 @@ -Memory usage -============ - -(Highly unscientific.) - -Command used to gather static memory usage: - -```sh -grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" -``` - -Program used to gather baseline memory usage: - -```go -package main - -import "time" - -func main() { - time.Sleep(600e9) -} -``` - -Baseline --------- - -``` -VmPeak: 42604 kB -VmSize: 42604 kB -VmLck: 0 kB -VmHWM: 1120 kB -VmRSS: 1120 kB -VmData: 35460 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 36 kB -VmSwap: 0 kB -``` - -Program used to gather metric memory usage (with other metrics being similar): - -```go -package main - -import ( - "fmt" - "metrics" - "time" -) - -func main() { - fmt.Sprintf("foo") - metrics.NewRegistry() - time.Sleep(600e9) -} -``` - -1000 counters registered ------------------------- - -``` -VmPeak: 44016 kB -VmSize: 44016 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.412 kB virtual, TODO 0.808 kB resident per counter.** - -100000 counters registered --------------------------- - -``` -VmPeak: 55024 kB -VmSize: 55024 kB -VmLck: 0 kB -VmHWM: 12440 kB -VmRSS: 12440 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**0.1242 kB virtual, 0.1132 kB resident per counter.** - -1000 gauges registered ----------------------- - -``` -VmPeak: 44012 kB -VmSize: 44012 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.408 kB virtual, 0.808 kB resident per counter.** - -100000 gauges registered ------------------------- - -``` -VmPeak: 55020 kB -VmSize: 55020 kB -VmLck: 0 kB -VmHWM: 12432 kB -VmRSS: 12432 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 60 kB -VmSwap: 0 kB -``` - -**0.12416 kB virtual, 0.11312 resident per gauge.** - -1000 histograms with a uniform sample size of 1028 --------------------------------------------------- - -``` -VmPeak: 72272 kB -VmSize: 72272 kB -VmLck: 0 kB -VmHWM: 16204 kB -VmRSS: 16204 kB -VmData: 65100 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 80 kB -VmSwap: 0 kB -``` - -**29.668 kB virtual, TODO 15.084 resident per histogram.** - -10000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 256912 kB -VmSize: 256912 kB -VmLck: 0 kB -VmHWM: 146204 kB -VmRSS: 146204 kB -VmData: 249740 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 448 kB -VmSwap: 0 kB -``` - -**21.4308 kB virtual, 14.5084 kB resident per histogram.** - -50000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 908112 kB -VmSize: 908112 kB -VmLck: 0 kB -VmHWM: 645832 kB -VmRSS: 645588 kB -VmData: 900940 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1716 kB -VmSwap: 1544 kB -``` - -**17.31016 kB virtual, 12.88936 kB resident per histogram.** - -1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 -------------------------------------------------------------------------------------- - -``` -VmPeak: 62480 kB -VmSize: 62480 kB -VmLck: 0 kB -VmHWM: 11572 kB -VmRSS: 11572 kB -VmData: 55308 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**19.876 kB virtual, 10.452 kB resident per histogram.** - -10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 153296 kB -VmSize: 153296 kB -VmLck: 0 kB -VmHWM: 101176 kB -VmRSS: 101176 kB -VmData: 146124 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 240 kB -VmSwap: 0 kB -``` - -**11.0692 kB virtual, 10.0056 kB resident per histogram.** - -50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 557264 kB -VmSize: 557264 kB -VmLck: 0 kB -VmHWM: 501056 kB -VmRSS: 501056 kB -VmData: 550092 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1032 kB -VmSwap: 0 kB -``` - -**10.2932 kB virtual, 9.99872 kB resident per histogram.** - -1000 meters ------------ - -``` -VmPeak: 74504 kB -VmSize: 74504 kB -VmLck: 0 kB -VmHWM: 24124 kB -VmRSS: 24124 kB -VmData: 67340 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 92 kB -VmSwap: 0 kB -``` - -**31.9 kB virtual, 23.004 kB resident per meter.** - -10000 meters ------------- - -``` -VmPeak: 278920 kB -VmSize: 278920 kB -VmLck: 0 kB -VmHWM: 227300 kB -VmRSS: 227300 kB -VmData: 271756 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 488 kB -VmSwap: 0 kB -``` - -**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/metrics/meter.go b/metrics/meter.go deleted file mode 100644 index 22475ef6eb..0000000000 --- a/metrics/meter.go +++ /dev/null @@ -1,189 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" - "time" -) - -type MeterSnapshot interface { - Count() int64 - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 -} - -// Meters count events to produce exponentially-weighted moving average rates -// at one-, five-, and fifteen-minutes and a mean rate. -type Meter interface { - Mark(int64) - Snapshot() MeterSnapshot - Stop() -} - -// GetOrRegisterMeter returns an existing Meter or constructs and registers a -// new StandardMeter. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterMeter(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeter).(Meter) -} - -// NewMeter constructs a new StandardMeter and launches a goroutine. -// Be sure to call Stop() once the meter is of no use to allow for garbage collection. -func NewMeter() Meter { - if !Enabled { - return NilMeter{} - } - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters[m] = struct{}{} - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - -// NewInactiveMeter returns a meter but does not start any goroutines. This -// method is mainly intended for testing. -func NewInactiveMeter() Meter { - if !Enabled { - return NilMeter{} - } - m := newStandardMeter() - return m -} - -// NewRegisteredMeter constructs and registers a new StandardMeter -// and launches a goroutine. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredMeter(name string, r Registry) Meter { - return GetOrRegisterMeter(name, r) -} - -// meterSnapshot is a read-only copy of the meter's internal values. -type meterSnapshot struct { - count int64 - rate1, rate5, rate15, rateMean float64 -} - -// Count returns the count of events at the time the snapshot was taken. -func (m *meterSnapshot) Count() int64 { return m.count } - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (m *meterSnapshot) Rate1() float64 { return m.rate1 } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (m *meterSnapshot) Rate5() float64 { return m.rate5 } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (m *meterSnapshot) Rate15() float64 { return m.rate15 } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (m *meterSnapshot) RateMean() float64 { return m.rateMean } - -// NilMeter is a no-op Meter. -type NilMeter struct{} - -func (NilMeter) Count() int64 { return 0 } -func (NilMeter) Mark(n int64) {} -func (NilMeter) Snapshot() MeterSnapshot { return (*emptySnapshot)(nil) } -func (NilMeter) Stop() {} - -// StandardMeter is the standard implementation of a Meter. -type StandardMeter struct { - count atomic.Int64 - uncounted atomic.Int64 // not yet added to the EWMAs - rateMean atomic.Uint64 - - a1, a5, a15 EWMA - startTime time.Time - stopped atomic.Bool -} - -func newStandardMeter() *StandardMeter { - return &StandardMeter{ - a1: NewEWMA1(), - a5: NewEWMA5(), - a15: NewEWMA15(), - startTime: time.Now(), - } -} - -// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. -func (m *StandardMeter) Stop() { - if stopped := m.stopped.Swap(true); !stopped { - arbiter.Lock() - delete(arbiter.meters, m) - arbiter.Unlock() - } -} - -// Mark records the occurrence of n events. -func (m *StandardMeter) Mark(n int64) { - m.uncounted.Add(n) -} - -// Snapshot returns a read-only copy of the meter. -func (m *StandardMeter) Snapshot() MeterSnapshot { - return &meterSnapshot{ - count: m.count.Load() + m.uncounted.Load(), - rate1: m.a1.Snapshot().Rate(), - rate5: m.a5.Snapshot().Rate(), - rate15: m.a15.Snapshot().Rate(), - rateMean: math.Float64frombits(m.rateMean.Load()), - } -} - -func (m *StandardMeter) tick() { - // Take the uncounted values, add to count - n := m.uncounted.Swap(0) - count := m.count.Add(n) - m.rateMean.Store(math.Float64bits(float64(count) / time.Since(m.startTime).Seconds())) - // Update the EWMA's internal state - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - // And trigger them to calculate the rates - m.a1.Tick() - m.a5.Tick() - m.a15.Tick() -} - -// meterArbiter ticks meters every 5s from a single goroutine. -// meters are references in a set for future stopping. -type meterArbiter struct { - sync.RWMutex - started bool - meters map[*StandardMeter]struct{} - ticker *time.Ticker -} - -var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})} - -// Ticks meters on the scheduled interval -func (ma *meterArbiter) tick() { - for range ma.ticker.C { - ma.tickMeters() - } -} - -func (ma *meterArbiter) tickMeters() { - ma.RLock() - defer ma.RUnlock() - for meter := range ma.meters { - meter.tick() - } -} diff --git a/metrics/meter_test.go b/metrics/meter_test.go deleted file mode 100644 index 019c4d765b..0000000000 --- a/metrics/meter_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package metrics - -import ( - "testing" - "time" -) - -func BenchmarkMeter(b *testing.B) { - m := NewMeter() - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Mark(1) - } -} -func TestMeter(t *testing.T) { - m := NewMeter() - m.Mark(47) - if v := m.Snapshot().Count(); v != 47 { - t.Fatalf("have %d want %d", v, 47) - } -} -func TestGetOrRegisterMeter(t *testing.T) { - r := NewRegistry() - NewRegisteredMeter("foo", r).Mark(47) - if m := GetOrRegisterMeter("foo", r).Snapshot(); m.Count() != 47 { - t.Fatal(m.Count()) - } -} - -func TestMeterDecay(t *testing.T) { - ma := meterArbiter{ - ticker: time.NewTicker(time.Millisecond), - meters: make(map[*StandardMeter]struct{}), - } - defer ma.ticker.Stop() - m := newStandardMeter() - ma.meters[m] = struct{}{} - m.Mark(1) - ma.tickMeters() - rateMean := m.Snapshot().RateMean() - time.Sleep(100 * time.Millisecond) - ma.tickMeters() - if m.Snapshot().RateMean() >= rateMean { - t.Error("m.RateMean() didn't decrease") - } -} - -func TestMeterNonzero(t *testing.T) { - m := NewMeter() - m.Mark(3) - if count := m.Snapshot().Count(); count != 3 { - t.Errorf("m.Count(): 3 != %v\n", count) - } -} - -func TestMeterStop(t *testing.T) { - l := len(arbiter.meters) - m := NewMeter() - if l+1 != len(arbiter.meters) { - t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters)) - } - m.Stop() - if l != len(arbiter.meters) { - t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters)) - } -} - -func TestMeterZero(t *testing.T) { - m := NewMeter().Snapshot() - if count := m.Count(); count != 0 { - t.Errorf("m.Count(): 0 != %v\n", count) - } -} - -func TestMeterRepeat(t *testing.T) { - m := NewMeter() - for i := 0; i < 101; i++ { - m.Mark(int64(i)) - } - if count := m.Snapshot().Count(); count != 5050 { - t.Errorf("m.Count(): 5050 != %v\n", count) - } - for i := 0; i < 101; i++ { - m.Mark(int64(i)) - } - if count := m.Snapshot().Count(); count != 10100 { - t.Errorf("m.Count(): 10100 != %v\n", count) - } -} diff --git a/metrics/metrics.go b/metrics/metrics.go deleted file mode 100644 index e01beef68e..0000000000 --- a/metrics/metrics.go +++ /dev/null @@ -1,18 +0,0 @@ -// Go port of Coda Hale's Metrics library -// -// -// -// Coda Hale's original work: -package metrics - -// Enabled is checked by the constructor functions for all of the -// standard metrics. If it is true, the metric returned is a stub. -// -// This global kill-switch helps quantify the observer effect and makes -// for less cluttered pprof profiles. -var Enabled = true - -// EnabledExpensive is a soft-flag meant for external packages to check if costly -// metrics gathering is allowed or not. The goal is to separate standard metrics -// for health monitoring and debug metrics that might impact runtime performance. -var EnabledExpensive = false diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go deleted file mode 100644 index 775b247185..0000000000 --- a/metrics/metrics_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package metrics - -import ( - "fmt" - "sync" - "testing" - "time" -) - -const FANOUT = 128 - -func BenchmarkMetrics(b *testing.B) { - r := NewRegistry() - c := NewRegisteredCounter("counter", r) - cf := NewRegisteredCounterFloat64("counterfloat64", r) - g := NewRegisteredGauge("gauge", r) - gf := NewRegisteredGaugeFloat64("gaugefloat64", r) - h := NewRegisteredHistogram("histogram", r, NewUniformSample(100)) - m := NewRegisteredMeter("meter", r) - t := NewRegisteredTimer("timer", r) - RegisterDebugGCStats(r) - b.ResetTimer() - ch := make(chan bool) - - wgD := &sync.WaitGroup{} - /* - wgD.Add(1) - go func() { - defer wgD.Done() - //log.Println("go CaptureDebugGCStats") - for { - select { - case <-ch: - //log.Println("done CaptureDebugGCStats") - return - default: - CaptureDebugGCStatsOnce(r) - } - } - }() - //*/ - - wgW := &sync.WaitGroup{} - /* - wgW.Add(1) - go func() { - defer wgW.Done() - //log.Println("go Write") - for { - select { - case <-ch: - //log.Println("done Write") - return - default: - WriteOnce(r, io.Discard) - } - } - }() - //*/ - - wg := &sync.WaitGroup{} - wg.Add(FANOUT) - for i := 0; i < FANOUT; i++ { - go func(i int) { - defer wg.Done() - //log.Println("go", i) - for i := 0; i < b.N; i++ { - c.Inc(1) - cf.Inc(1.0) - g.Update(int64(i)) - gf.Update(float64(i)) - h.Update(int64(i)) - m.Mark(1) - t.Update(1) - } - //log.Println("done", i) - }(i) - } - wg.Wait() - close(ch) - wgD.Wait() - wgW.Wait() -} - -func Example() { - c := NewCounter() - Register("money", c) - c.Inc(17) - - // Threadsafe registration - t := GetOrRegisterTimer("db.get.latency", nil) - t.Time(func() { time.Sleep(10 * time.Millisecond) }) - t.Update(1) - - fmt.Println(c.Snapshot().Count()) - fmt.Println(t.Snapshot().Min()) - // Output: 17 - // 1 -} diff --git a/metrics/opentsdb.go b/metrics/opentsdb.go deleted file mode 100644 index e81690f943..0000000000 --- a/metrics/opentsdb.go +++ /dev/null @@ -1,128 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "io" - "log" - "net" - "os" - "strings" - "time" -) - -var shortHostName = "" - -// OpenTSDBConfig provides a container with configuration parameters for -// the OpenTSDB exporter -type OpenTSDBConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names -} - -// OpenTSDB is a blocking exporter function which reports metrics in r -// to a TSDB server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - }) -} - -// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, -// but it takes a OpenTSDBConfig instead. -func OpenTSDBWithConfig(c OpenTSDBConfig) { - for range time.Tick(c.FlushInterval) { - if err := openTSDB(&c); nil != err { - log.Println(err) - } - } -} - -func getShortHostname() string { - if shortHostName == "" { - host, _ := os.Hostname() - if index := strings.Index(host, "."); index > 0 { - shortHostName = host[:index] - } else { - shortHostName = host - } - } - return shortHostName -} - -// writeRegistry writes the registry-metrics on the opentsb format. -func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname string) { - du := float64(c.DurationUnit) - - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname) - case CounterFloat64: - fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname) - case Gauge: - fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname) - case GaugeFloat64: - fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname) - case GaugeInfo: - fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Snapshot().Value().String(), shortHostname) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) - } - }) -} - -func openTSDB(c *OpenTSDBConfig) error { - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.writeRegistry(w, time.Now().Unix(), getShortHostname()) - w.Flush() - return nil -} diff --git a/metrics/opentsdb_test.go b/metrics/opentsdb_test.go deleted file mode 100644 index d13973a588..0000000000 --- a/metrics/opentsdb_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package metrics - -import ( - "fmt" - "net" - "os" - "strings" - "testing" - "time" -) - -func ExampleOpenTSDB() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go OpenTSDB(DefaultRegistry, 1*time.Second, "some.prefix", addr) -} - -func ExampleOpenTSDBWithConfig() { - addr, _ := net.ResolveTCPAddr("net", ":2003") - go OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: DefaultRegistry, - FlushInterval: 1 * time.Second, - DurationUnit: time.Millisecond, - }) -} - -func TestExampleOpenTSB(t *testing.T) { - r := NewOrderedRegistry() - NewRegisteredGaugeInfo("foo", r).Update(GaugeInfoValue{"chain_id": "5"}) - NewRegisteredGaugeFloat64("pi", r).Update(3.14) - NewRegisteredCounter("months", r).Inc(12) - NewRegisteredCounterFloat64("tau", r).Inc(1.57) - NewRegisteredMeter("elite", r).Mark(1337) - NewRegisteredTimer("second", r).Update(time.Second) - NewRegisteredCounterFloat64("tau", r).Inc(1.57) - NewRegisteredCounterFloat64("tau", r).Inc(1.57) - - w := new(strings.Builder) - (&OpenTSDBConfig{ - Registry: r, - DurationUnit: time.Millisecond, - Prefix: "pre", - }).writeRegistry(w, 978307200, "hal9000") - - wantB, err := os.ReadFile("./testdata/opentsb.want") - if err != nil { - t.Fatal(err) - } - want := strings.ReplaceAll(string(wantB), "\r\n", "\n") - if have := w.String(); have != want { - t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want) - t.Logf("have vs want:\n%v", findFirstDiffPos(have, want)) - } -} - -func findFirstDiffPos(a, b string) string { - yy := strings.Split(b, "\n") - for i, x := range strings.Split(a, "\n") { - if i >= len(yy) { - return fmt.Sprintf("have:%d: %s\nwant:%d: ", i, x, i) - } - if y := yy[i]; x != y { - return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y) - } - } - return "" -} diff --git a/metrics/prometheus/interfaces.go b/metrics/prometheus/interfaces.go new file mode 100644 index 0000000000..b9484722dd --- /dev/null +++ b/metrics/prometheus/interfaces.go @@ -0,0 +1,12 @@ +// (c) 2025 Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. +package prometheus + +// Registry is a narrower interface of [prometheus.Registry] containing +// only the required functions for the [Gatherer]. +type Registry interface { + // Call the given function for each registered metric. + Each(func(string, any)) + // Get the metric by the given name or nil if none is registered. + Get(string) any +} diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go index 1d79cfbad7..7cc4e68536 100644 --- a/metrics/prometheus/prometheus.go +++ b/metrics/prometheus/prometheus.go @@ -1,196 +1,204 @@ -// (c) 2021, Ava Labs, Inc. All rights reserved. +// (c) 2021-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prometheus import ( + "errors" + "fmt" "sort" "strings" - "github.com/ava-labs/coreth/metrics" - "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/libevm/metrics" + dto "github.com/prometheus/client_model/go" ) -var ( - pv = []float64{.5, .75, .95, .99, .999, .9999} - pvShortPercent = []float64{50, 95, 99} - pvShort = []float64{.50, .95, .99} -) +// Gatherer implements [prometheus.Gatherer] interface by +// gathering all metrics from the given Prometheus registry. +type Gatherer struct { + registry Registry +} + +var _ prometheus.Gatherer = (*Gatherer)(nil) -type gatherer struct { - reg metrics.Registry +// NewGatherer returns a [Gatherer] using the given registry. +func NewGatherer(registry Registry) *Gatherer { + return &Gatherer{ + registry: registry, + } } -func (g gatherer) Gather() ([]*dto.MetricFamily, error) { +// Gather gathers metrics from the registry and converts them to +// a slice of metric families. +func (g *Gatherer) Gather() (mfs []*dto.MetricFamily, err error) { // Gather and pre-sort the metrics to avoid random listings var names []string - g.reg.Each(func(name string, i interface{}) { + g.registry.Each(func(name string, i any) { names = append(names, name) }) sort.Strings(names) - mfs := make([]*dto.MetricFamily, 0, len(names)) + mfs = make([]*dto.MetricFamily, 0, len(names)) for _, name := range names { - mIntf := g.reg.Get(name) - name := strings.Replace(name, "/", "_", -1) - - switch m := mIntf.(type) { - case metrics.Counter: - val := m.Snapshot().Count() - valFloat := float64(val) - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{{ - Counter: &dto.Counter{ - Value: &valFloat, - }, - }}, - }) - case metrics.CounterFloat64: - val := m.Snapshot().Count() - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{{ - Counter: &dto.Counter{ - Value: &val, - }, - }}, - }) - case metrics.Gauge: - val := m.Snapshot().Value() - valFloat := float64(val) - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{{ - Gauge: &dto.Gauge{ - Value: &valFloat, - }, - }}, - }) - case metrics.GaugeFloat64: - val := m.Snapshot().Value() - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{{ - Gauge: &dto.Gauge{ - Value: &val, - }, - }}, - }) - case metrics.Histogram: - snapshot := m.Snapshot() - count := snapshot.Count() - countUint := uint64(count) - sum := snapshot.Sum() - sumFloat := float64(sum) - - ps := snapshot.Percentiles(pv) - qs := make([]*dto.Quantile, len(pv)) - for i := range ps { - v := pv[i] - s := ps[i] - qs[i] = &dto.Quantile{ - Quantile: &v, - Value: &s, - } + mf, err := metricFamily(g.registry, name) + if err != nil { + if errors.Is(err, errMetricSkip) { + continue } + return nil, err + } + mfs = append(mfs, mf) + } - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{{ - Summary: &dto.Summary{ - SampleCount: &countUint, - SampleSum: &sumFloat, - Quantile: qs, - }, - }}, - }) - case metrics.Meter: - val := m.Snapshot().Count() - valFloat := float64(val) - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_GAUGE.Enum(), - Metric: []*dto.Metric{{ - Gauge: &dto.Gauge{ - Value: &valFloat, - }, - }}, - }) - case metrics.Timer: - snapshot := m.Snapshot() - count := snapshot.Count() - countUint := uint64(count) - sum := snapshot.Sum() - sumFloat := float64(sum) - - ps := snapshot.Percentiles(pv) - qs := make([]*dto.Quantile, len(pv)) - for i := range ps { - v := pv[i] - s := ps[i] - qs[i] = &dto.Quantile{ - Quantile: &v, - Value: &s, - } - } + return mfs, nil +} - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{{ - Summary: &dto.Summary{ - SampleCount: &countUint, - SampleSum: &sumFloat, - Quantile: qs, - }, - }}, - }) - case metrics.ResettingTimer: - snapshot := m.Snapshot() - - count := uint64(snapshot.Count()) - if count == 0 { - continue +var ( + errMetricSkip = errors.New("metric skipped") +) + +func ptrTo[T any](x T) *T { return &x } + +func metricFamily(registry Registry, name string) (mf *dto.MetricFamily, err error) { + metric := registry.Get(name) + name = strings.ReplaceAll(name, "/", "_") + + switch m := metric.(type) { + case metrics.NilCounter, metrics.NilCounterFloat64, metrics.NilEWMA, + metrics.NilGauge, metrics.NilGaugeFloat64, metrics.NilGaugeInfo, + metrics.NilHealthcheck, metrics.NilHistogram, metrics.NilMeter, + metrics.NilResettingTimer, metrics.NilSample, metrics.NilTimer: + return nil, fmt.Errorf("%w: %q metric is nil", errMetricSkip, name) + + case metrics.Counter: + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{{ + Counter: &dto.Counter{ + Value: ptrTo(float64(m.Snapshot().Count())), + }, + }}, + }, nil + case metrics.CounterFloat64: + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{{ + Counter: &dto.Counter{ + Value: ptrTo(m.Snapshot().Count()), + }, + }}, + }, nil + case metrics.Gauge: + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{{ + Gauge: &dto.Gauge{ + Value: ptrTo(float64(m.Snapshot().Value())), + }, + }}, + }, nil + case metrics.GaugeFloat64: + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{{ + Gauge: &dto.Gauge{ + Value: ptrTo(m.Snapshot().Value()), + }, + }}, + }, nil + case metrics.Histogram: + snapshot := m.Snapshot() + + quantiles := []float64{.5, .75, .95, .99, .999, .9999} + thresholds := snapshot.Percentiles(quantiles) + dtoQuantiles := make([]*dto.Quantile, len(quantiles)) + for i := range thresholds { + dtoQuantiles[i] = &dto.Quantile{ + Quantile: ptrTo(quantiles[i]), + Value: ptrTo(thresholds[i]), } + } - ps := snapshot.Percentiles(pvShortPercent) - qs := make([]*dto.Quantile, len(pv)) - for i := range pvShort { - v := pv[i] - s := ps[i] - qs[i] = &dto.Quantile{ - Quantile: &v, - Value: &s, - } + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{{ + Summary: &dto.Summary{ + SampleCount: ptrTo(uint64(snapshot.Count())), //nolint:gosec + SampleSum: ptrTo(float64(snapshot.Sum())), + Quantile: dtoQuantiles, + }, + }}, + }, nil + case metrics.Meter: + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{{ + Gauge: &dto.Gauge{ + Value: ptrTo(float64(m.Snapshot().Count())), + }, + }}, + }, nil + case metrics.Timer: + snapshot := m.Snapshot() + + quantiles := []float64{.5, .75, .95, .99, .999, .9999} + thresholds := snapshot.Percentiles(quantiles) + dtoQuantiles := make([]*dto.Quantile, len(quantiles)) + for i := range thresholds { + dtoQuantiles[i] = &dto.Quantile{ + Quantile: ptrTo(quantiles[i]), + Value: ptrTo(thresholds[i]), } + } - mfs = append(mfs, &dto.MetricFamily{ - Name: &name, - Type: dto.MetricType_SUMMARY.Enum(), - Metric: []*dto.Metric{{ - Summary: &dto.Summary{ - SampleCount: &count, - // TODO: do we need to specify SampleSum here? and if so - // what should that be? - Quantile: qs, - }, - }}, - }) + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{{ + Summary: &dto.Summary{ + SampleCount: ptrTo(uint64(snapshot.Count())), //nolint:gosec + SampleSum: ptrTo(float64(snapshot.Sum())), + Quantile: dtoQuantiles, + }, + }}, + }, nil + case metrics.ResettingTimer: + snapshot := m.Snapshot() + if snapshot.Count() == 0 { + return nil, fmt.Errorf("%w: %q resetting timer metric count is zero", errMetricSkip, name) } - } - return mfs, nil -} + pvShortPercent := []float64{50, 95, 99} + thresholds := snapshot.Percentiles(pvShortPercent) + dtoQuantiles := make([]*dto.Quantile, len(pvShortPercent)) + for i := range pvShortPercent { + dtoQuantiles[i] = &dto.Quantile{ + Quantile: ptrTo(pvShortPercent[i]), + Value: ptrTo(thresholds[i]), + } + } -func Gatherer(reg metrics.Registry) prometheus.Gatherer { - return gatherer{reg: reg} + return &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{{ + Summary: &dto.Summary{ + SampleCount: ptrTo(uint64(snapshot.Count())), //nolint:gosec + Quantile: dtoQuantiles, + }, + }}, + }, nil + + default: + return nil, fmt.Errorf("metric %q: type is not supported: %T", name, metric) + } } diff --git a/metrics/prometheus/prometheus_test.go b/metrics/prometheus/prometheus_test.go index 967e3f2602..5cce784a3f 100644 --- a/metrics/prometheus/prometheus_test.go +++ b/metrics/prometheus/prometheus_test.go @@ -1,4 +1,4 @@ -// (c) 2021, Ava Labs, Inc. All rights reserved. +// (c) 2021-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prometheus @@ -7,81 +7,91 @@ import ( "testing" "time" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) -func TestGatherer(t *testing.T) { +func TestGatherer_Gather(t *testing.T) { + testutils.WithMetrics(t) + registry := metrics.NewRegistry() + register := func(t *testing.T, name string, collector any) { + t.Helper() + err := registry.Register(name, collector) + require.NoError(t, err) + } counter := metrics.NewCounter() counter.Inc(12345) + register(t, "test/counter", counter) - err := registry.Register("test/counter", counter) - assert.NoError(t, err) + counterFloat64 := metrics.NewCounterFloat64() + counterFloat64.Inc(1.1) + register(t, "test/counter_float64", counterFloat64) gauge := metrics.NewGauge() gauge.Update(23456) - - err = registry.Register("test/gauge", gauge) - assert.NoError(t, err) + register(t, "test/gauge", gauge) gaugeFloat64 := metrics.NewGaugeFloat64() gaugeFloat64.Update(34567.89) - - err = registry.Register("test/gauge_float64", gaugeFloat64) - assert.NoError(t, err) + register(t, "test/gauge_float64", gaugeFloat64) sample := metrics.NewUniformSample(1028) histogram := metrics.NewHistogram(sample) - - err = registry.Register("test/histogram", histogram) - assert.NoError(t, err) + register(t, "test/histogram", histogram) meter := metrics.NewMeter() - defer meter.Stop() + t.Cleanup(meter.Stop) meter.Mark(9999999) - - err = registry.Register("test/meter", meter) - assert.NoError(t, err) + register(t, "test/meter", meter) timer := metrics.NewTimer() - defer timer.Stop() + t.Cleanup(timer.Stop) timer.Update(20 * time.Millisecond) timer.Update(21 * time.Millisecond) timer.Update(22 * time.Millisecond) timer.Update(120 * time.Millisecond) timer.Update(23 * time.Millisecond) timer.Update(24 * time.Millisecond) - - err = registry.Register("test/timer", timer) - assert.NoError(t, err) + register(t, "test/timer", timer) resettingTimer := metrics.NewResettingTimer() - resettingTimer.Update(10 * time.Millisecond) - resettingTimer.Update(11 * time.Millisecond) - resettingTimer.Update(12 * time.Millisecond) - resettingTimer.Update(120 * time.Millisecond) - resettingTimer.Update(13 * time.Millisecond) - resettingTimer.Update(14 * time.Millisecond) - - err = registry.Register("test/resetting_timer", resettingTimer) - assert.NoError(t, err) - - err = registry.Register("test/resetting_timer_snapshot", resettingTimer.Snapshot()) - assert.NoError(t, err) + register(t, "test/resetting_timer", resettingTimer) + resettingTimer.Update(time.Second) // must be after register call emptyResettingTimer := metrics.NewResettingTimer() - - err = registry.Register("test/empty_resetting_timer", emptyResettingTimer) - assert.NoError(t, err) - - err = registry.Register("test/empty_resetting_timer_snapshot", emptyResettingTimer.Snapshot()) - assert.NoError(t, err) - - g := Gatherer(registry) - - _, err = g.Gather() - assert.NoError(t, err) + register(t, "test/empty_resetting_timer", emptyResettingTimer) + + emptyResettingTimer.Update(time.Second) // no effect because of snapshot below + register(t, "test/empty_resetting_timer_snapshot", emptyResettingTimer.Snapshot()) + + gatherer := NewGatherer(registry) + + families, err := gatherer.Gather() + require.NoError(t, err) + + familyStrings := make([]string, len(families)) + for i := range families { + familyStrings[i] = families[i].String() + } + want := []string{ + `name:"test_counter" type:COUNTER metric: > `, + `name:"test_counter_float64" type:COUNTER metric: > `, + `name:"test_gauge" type:GAUGE metric: > `, + `name:"test_gauge_float64" type:GAUGE metric: > `, + `name:"test_histogram" type:SUMMARY metric: quantile: quantile: quantile: quantile: quantile: > > `, + `name:"test_meter" type:GAUGE metric: > `, + `name:"test_resetting_timer" type:SUMMARY metric: quantile: quantile: > > `, + `name:"test_timer" type:SUMMARY metric: quantile: quantile: quantile: quantile: quantile: > > `, + } + assert.Equal(t, want, familyStrings) + + register(t, "unsupported", metrics.NewGaugeInfo()) + families, err = gatherer.Gather() + assert.EqualError(t, err, "metric \"unsupported\": type is not supported: *metrics.StandardGaugeInfo") + assert.Empty(t, families) } diff --git a/metrics/prometheus/testdata/prometheus.want b/metrics/prometheus/testdata/prometheus.want deleted file mode 100644 index 861c5f5cf0..0000000000 --- a/metrics/prometheus/testdata/prometheus.want +++ /dev/null @@ -1,70 +0,0 @@ -# TYPE system_cpu_schedlatency_count counter -system_cpu_schedlatency_count 5645 - -# TYPE system_cpu_schedlatency summary -system_cpu_schedlatency {quantile="0.5"} 0 -system_cpu_schedlatency {quantile="0.75"} 7168 -system_cpu_schedlatency {quantile="0.95"} 1.6777216e+07 -system_cpu_schedlatency {quantile="0.99"} 2.9360128e+07 -system_cpu_schedlatency {quantile="0.999"} 3.3554432e+07 -system_cpu_schedlatency {quantile="0.9999"} 3.3554432e+07 - -# TYPE system_memory_pauses_count counter -system_memory_pauses_count 14 - -# TYPE system_memory_pauses summary -system_memory_pauses {quantile="0.5"} 32768 -system_memory_pauses {quantile="0.75"} 57344 -system_memory_pauses {quantile="0.95"} 196608 -system_memory_pauses {quantile="0.99"} 196608 -system_memory_pauses {quantile="0.999"} 196608 -system_memory_pauses {quantile="0.9999"} 196608 - -# TYPE test_counter gauge -test_counter 12345 - -# TYPE test_counter_float64 gauge -test_counter_float64 54321.98 - -# TYPE test_gauge gauge -test_gauge 23456 - -# TYPE test_gauge_float64 gauge -test_gauge_float64 34567.89 - -# TYPE test_gauge_info gauge -test_gauge_info {arch="amd64", commit="7caa2d8163ae3132c1c2d6978c76610caee2d949", os="linux", protocol_versions="64 65 66", version="1.10.18-unstable"} 1 - -# TYPE test_histogram_count counter -test_histogram_count 3 - -# TYPE test_histogram summary -test_histogram {quantile="0.5"} 2 -test_histogram {quantile="0.75"} 3 -test_histogram {quantile="0.95"} 3 -test_histogram {quantile="0.99"} 3 -test_histogram {quantile="0.999"} 3 -test_histogram {quantile="0.9999"} 3 - -# TYPE test_meter gauge -test_meter 0 - -# TYPE test_resetting_timer_count counter -test_resetting_timer_count 6 - -# TYPE test_resetting_timer summary -test_resetting_timer {quantile="0.50"} 1.25e+07 -test_resetting_timer {quantile="0.95"} 1.2e+08 -test_resetting_timer {quantile="0.99"} 1.2e+08 - -# TYPE test_timer_count counter -test_timer_count 6 - -# TYPE test_timer summary -test_timer {quantile="0.5"} 2.25e+07 -test_timer {quantile="0.75"} 4.8e+07 -test_timer {quantile="0.95"} 1.2e+08 -test_timer {quantile="0.99"} 1.2e+08 -test_timer {quantile="0.999"} 1.2e+08 -test_timer {quantile="0.9999"} 1.2e+08 - diff --git a/metrics/registry.go b/metrics/registry.go deleted file mode 100644 index 8bfbc08042..0000000000 --- a/metrics/registry.go +++ /dev/null @@ -1,372 +0,0 @@ -package metrics - -import ( - "fmt" - "reflect" - "sort" - "strings" - "sync" -) - -// DuplicateMetric is the error returned by Registry.Register when a metric -// already exists. If you mean to Register that metric you must first -// Unregister the existing metric. -type DuplicateMetric string - -func (err DuplicateMetric) Error() string { - return fmt.Sprintf("duplicate metric: %s", string(err)) -} - -// A Registry holds references to a set of metrics by name and can iterate -// over them, calling callback functions provided by the user. -// -// This is an interface so as to encourage other structs to implement -// the Registry API as appropriate. -type Registry interface { - - // Call the given function for each registered metric. - Each(func(string, interface{})) - - // Get the metric by the given name or nil if none is registered. - Get(string) interface{} - - // GetAll metrics in the Registry. - GetAll() map[string]map[string]interface{} - - // Gets an existing metric or registers the given one. - // The interface can be the metric to register if not found in registry, - // or a function returning the metric for lazy instantiation. - GetOrRegister(string, interface{}) interface{} - - // Register the given metric under the given name. - Register(string, interface{}) error - - // Run all registered healthchecks. - RunHealthchecks() - - // Unregister the metric with the given name. - Unregister(string) -} - -type orderedRegistry struct { - StandardRegistry -} - -// Call the given function for each registered metric. -func (r *orderedRegistry) Each(f func(string, interface{})) { - var names []string - reg := r.registered() - for name := range reg { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - f(name, reg[name]) - } -} - -// NewRegistry creates a new registry. -func NewRegistry() Registry { - return new(StandardRegistry) -} - -// NewOrderedRegistry creates a new ordered registry (for testing). -func NewOrderedRegistry() Registry { - return new(orderedRegistry) -} - -// The standard implementation of a Registry uses sync.map -// of names to metrics. -type StandardRegistry struct { - metrics sync.Map -} - -// Call the given function for each registered metric. -func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) - } -} - -// Get the metric by the given name or nil if none is registered. -func (r *StandardRegistry) Get(name string) interface{} { - item, _ := r.metrics.Load(name) - return item -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { - // fast path - cached, ok := r.metrics.Load(name) - if ok { - return cached - } - if v := reflect.ValueOf(i); v.Kind() == reflect.Func { - i = v.Call(nil)[0].Interface() - } - item, _, ok := r.loadOrRegister(name, i) - if !ok { - return i - } - return item -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func (r *StandardRegistry) Register(name string, i interface{}) error { - // fast path - _, ok := r.metrics.Load(name) - if ok { - return DuplicateMetric(name) - } - - if v := reflect.ValueOf(i); v.Kind() == reflect.Func { - i = v.Call(nil)[0].Interface() - } - _, loaded, _ := r.loadOrRegister(name, i) - if loaded { - return DuplicateMetric(name) - } - return nil -} - -// Run all registered healthchecks. -func (r *StandardRegistry) RunHealthchecks() { - r.metrics.Range(func(key, value any) bool { - if h, ok := value.(Healthcheck); ok { - h.Check() - } - return true - }) -} - -// GetAll metrics in the Registry -func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Snapshot().Count() - case CounterFloat64: - values["count"] = metric.Snapshot().Count() - case Gauge: - values["value"] = metric.Snapshot().Value() - case GaugeFloat64: - values["value"] = metric.Snapshot().Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return data -} - -// Unregister the metric with the given name. -func (r *StandardRegistry) Unregister(name string) { - r.stop(name) - r.metrics.LoadAndDelete(name) -} - -func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) { - switch i.(type) { - case Counter, CounterFloat64, Gauge, GaugeFloat64, GaugeInfo, Healthcheck, Histogram, Meter, Timer, ResettingTimer: - default: - return nil, false, false - } - item, loaded := r.metrics.LoadOrStore(name, i) - return item, loaded, true -} - -func (r *StandardRegistry) registered() map[string]interface{} { - metrics := make(map[string]interface{}) - r.metrics.Range(func(key, value any) bool { - metrics[key.(string)] = value - return true - }) - return metrics -} - -func (r *StandardRegistry) stop(name string) { - if i, ok := r.metrics.Load(name); ok { - if s, ok := i.(Stoppable); ok { - s.Stop() - } - } -} - -// Stoppable defines the metrics which has to be stopped. -type Stoppable interface { - Stop() -} - -type PrefixedRegistry struct { - underlying Registry - prefix string -} - -func NewPrefixedRegistry(prefix string) Registry { - return &PrefixedRegistry{ - underlying: NewRegistry(), - prefix: prefix, - } -} - -func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { - return &PrefixedRegistry{ - underlying: parent, - prefix: prefix, - } -} - -// Call the given function for each registered metric. -func (r *PrefixedRegistry) Each(fn func(string, interface{})) { - wrappedFn := func(prefix string) func(string, interface{}) { - return func(name string, iface interface{}) { - if strings.HasPrefix(name, prefix) { - fn(name, iface) - } else { - return - } - } - } - - baseRegistry, prefix := findPrefix(r, "") - baseRegistry.Each(wrappedFn(prefix)) -} - -func findPrefix(registry Registry, prefix string) (Registry, string) { - switch r := registry.(type) { - case *PrefixedRegistry: - return findPrefix(r.underlying, r.prefix+prefix) - case *StandardRegistry: - return r, prefix - } - return nil, "" -} - -// Get the metric by the given name or nil if none is registered. -func (r *PrefixedRegistry) Get(name string) interface{} { - realName := r.prefix + name - return r.underlying.Get(realName) -} - -// Gets an existing metric or registers the given one. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { - realName := r.prefix + name - return r.underlying.GetOrRegister(realName, metric) -} - -// Register the given metric under the given name. The name will be prefixed. -func (r *PrefixedRegistry) Register(name string, metric interface{}) error { - realName := r.prefix + name - return r.underlying.Register(realName, metric) -} - -// Run all registered healthchecks. -func (r *PrefixedRegistry) RunHealthchecks() { - r.underlying.RunHealthchecks() -} - -// GetAll metrics in the Registry -func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { - return r.underlying.GetAll() -} - -// Unregister the metric with the given name. The name will be prefixed. -func (r *PrefixedRegistry) Unregister(name string) { - realName := r.prefix + name - r.underlying.Unregister(realName) -} - -var ( - DefaultRegistry = NewRegistry() - EphemeralRegistry = NewRegistry() - AccountingRegistry = NewRegistry() // registry used in swarm -) - -// Call the given function for each registered metric. -func Each(f func(string, interface{})) { - DefaultRegistry.Each(f) -} - -// Get the metric by the given name or nil if none is registered. -func Get(name string) interface{} { - return DefaultRegistry.Get(name) -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -func GetOrRegister(name string, i interface{}) interface{} { - return DefaultRegistry.GetOrRegister(name, i) -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func Register(name string, i interface{}) error { - return DefaultRegistry.Register(name, i) -} - -// Register the given metric under the given name. Panics if a metric by the -// given name is already registered. -func MustRegister(name string, i interface{}) { - if err := Register(name, i); err != nil { - panic(err) - } -} - -// Run all registered healthchecks. -func RunHealthchecks() { - DefaultRegistry.RunHealthchecks() -} - -// Unregister the metric with the given name. -func Unregister(name string) { - DefaultRegistry.Unregister(name) -} diff --git a/metrics/registry_test.go b/metrics/registry_test.go deleted file mode 100644 index 75012dd4ac..0000000000 --- a/metrics/registry_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package metrics - -import ( - "sync" - "testing" -) - -func BenchmarkRegistry(b *testing.B) { - r := NewRegistry() - r.Register("foo", NewCounter()) - b.ResetTimer() - for i := 0; i < b.N; i++ { - r.Each(func(string, interface{}) {}) - } -} - -func BenchmarkRegistryGetOrRegisterParallel_8(b *testing.B) { - benchmarkRegistryGetOrRegisterParallel(b, 8) -} - -func BenchmarkRegistryGetOrRegisterParallel_32(b *testing.B) { - benchmarkRegistryGetOrRegisterParallel(b, 32) -} - -func benchmarkRegistryGetOrRegisterParallel(b *testing.B, amount int) { - r := NewRegistry() - b.ResetTimer() - var wg sync.WaitGroup - for i := 0; i < amount; i++ { - wg.Add(1) - go func() { - for i := 0; i < b.N; i++ { - r.GetOrRegister("foo", NewMeter) - } - wg.Done() - }() - } - wg.Wait() -} - -func TestRegistry(t *testing.T) { - r := NewRegistry() - r.Register("foo", NewCounter()) - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if name != "foo" { - t.Fatal(name) - } - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if i != 1 { - t.Fatal(i) - } - r.Unregister("foo") - i = 0 - r.Each(func(string, interface{}) { i++ }) - if i != 0 { - t.Fatal(i) - } -} - -func TestRegistryDuplicate(t *testing.T) { - r := NewRegistry() - if err := r.Register("foo", NewCounter()); nil != err { - t.Fatal(err) - } - if err := r.Register("foo", NewGauge()); nil == err { - t.Fatal(err) - } - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestRegistryGet(t *testing.T) { - r := NewRegistry() - r.Register("foo", NewCounter()) - if count := r.Get("foo").(Counter).Snapshot().Count(); count != 0 { - t.Fatal(count) - } - r.Get("foo").(Counter).Inc(1) - if count := r.Get("foo").(Counter).Snapshot().Count(); count != 1 { - t.Fatal(count) - } -} - -func TestRegistryGetOrRegister(t *testing.T) { - r := NewRegistry() - - // First metric wins with GetOrRegister - _ = r.GetOrRegister("foo", NewCounter()) - m := r.GetOrRegister("foo", NewGauge()) - if _, ok := m.(Counter); !ok { - t.Fatal(m) - } - - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if name != "foo" { - t.Fatal(name) - } - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) { - r := NewRegistry() - - // First metric wins with GetOrRegister - _ = r.GetOrRegister("foo", NewCounter) - m := r.GetOrRegister("foo", NewGauge) - if _, ok := m.(Counter); !ok { - t.Fatal(m) - } - - i := 0 - r.Each(func(name string, iface interface{}) { - i++ - if name != "foo" { - t.Fatal(name) - } - if _, ok := iface.(Counter); !ok { - t.Fatal(iface) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestRegistryUnregister(t *testing.T) { - l := len(arbiter.meters) - r := NewRegistry() - r.Register("foo", NewCounter()) - r.Register("bar", NewMeter()) - r.Register("baz", NewTimer()) - if len(arbiter.meters) != l+2 { - t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters)) - } - r.Unregister("foo") - r.Unregister("bar") - r.Unregister("baz") - if len(arbiter.meters) != l { - t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters)) - } -} - -func TestPrefixedChildRegistryGetOrRegister(t *testing.T) { - r := NewRegistry() - pr := NewPrefixedChildRegistry(r, "prefix.") - - _ = pr.GetOrRegister("foo", NewCounter()) - - i := 0 - r.Each(func(name string, m interface{}) { - i++ - if name != "prefix.foo" { - t.Fatal(name) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestPrefixedRegistryGetOrRegister(t *testing.T) { - r := NewPrefixedRegistry("prefix.") - - _ = r.GetOrRegister("foo", NewCounter()) - - i := 0 - r.Each(func(name string, m interface{}) { - i++ - if name != "prefix.foo" { - t.Fatal(name) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestPrefixedRegistryRegister(t *testing.T) { - r := NewPrefixedRegistry("prefix.") - err := r.Register("foo", NewCounter()) - c := NewCounter() - Register("bar", c) - if err != nil { - t.Fatal(err.Error()) - } - - i := 0 - r.Each(func(name string, m interface{}) { - i++ - if name != "prefix.foo" { - t.Fatal(name) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestPrefixedRegistryUnregister(t *testing.T) { - r := NewPrefixedRegistry("prefix.") - - _ = r.Register("foo", NewCounter()) - - i := 0 - r.Each(func(name string, m interface{}) { - i++ - if name != "prefix.foo" { - t.Fatal(name) - } - }) - if i != 1 { - t.Fatal(i) - } - - r.Unregister("foo") - - i = 0 - r.Each(func(name string, m interface{}) { - i++ - }) - - if i != 0 { - t.Fatal(i) - } -} - -func TestPrefixedRegistryGet(t *testing.T) { - pr := NewPrefixedRegistry("prefix.") - name := "foo" - pr.Register(name, NewCounter()) - - fooCounter := pr.Get(name) - if fooCounter == nil { - t.Fatal(name) - } -} - -func TestPrefixedChildRegistryGet(t *testing.T) { - r := NewRegistry() - pr := NewPrefixedChildRegistry(r, "prefix.") - name := "foo" - pr.Register(name, NewCounter()) - fooCounter := pr.Get(name) - if fooCounter == nil { - t.Fatal(name) - } -} - -func TestChildPrefixedRegistryRegister(t *testing.T) { - r := NewPrefixedChildRegistry(DefaultRegistry, "prefix.") - err := r.Register("foo", NewCounter()) - c := NewCounter() - Register("bar", c) - if err != nil { - t.Fatal(err.Error()) - } - - i := 0 - r.Each(func(name string, m interface{}) { - i++ - if name != "prefix.foo" { - t.Fatal(name) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestChildPrefixedRegistryOfChildRegister(t *testing.T) { - r := NewPrefixedChildRegistry(NewRegistry(), "prefix.") - r2 := NewPrefixedChildRegistry(r, "prefix2.") - err := r.Register("foo2", NewCounter()) - if err != nil { - t.Fatal(err.Error()) - } - err = r2.Register("baz", NewCounter()) - if err != nil { - t.Fatal(err.Error()) - } - c := NewCounter() - Register("bars", c) - - i := 0 - r2.Each(func(name string, m interface{}) { - i++ - if name != "prefix.prefix2.baz" { - t.Fatal(name) - } - }) - if i != 1 { - t.Fatal(i) - } -} - -func TestWalkRegistries(t *testing.T) { - r := NewPrefixedChildRegistry(NewRegistry(), "prefix.") - r2 := NewPrefixedChildRegistry(r, "prefix2.") - err := r.Register("foo2", NewCounter()) - if err != nil { - t.Fatal(err.Error()) - } - err = r2.Register("baz", NewCounter()) - if err != nil { - t.Fatal(err.Error()) - } - c := NewCounter() - Register("bars", c) - - _, prefix := findPrefix(r2, "") - if prefix != "prefix.prefix2." { - t.Fatal(prefix) - } -} diff --git a/metrics/resetting_sample.go b/metrics/resetting_sample.go deleted file mode 100644 index c38ffcd3ec..0000000000 --- a/metrics/resetting_sample.go +++ /dev/null @@ -1,24 +0,0 @@ -package metrics - -// ResettingSample converts an ordinary sample into one that resets whenever its -// snapshot is retrieved. This will break for multi-monitor systems, but when only -// a single metric is being pushed out, this ensure that low-frequency events don't -// skew th charts indefinitely. -func ResettingSample(sample Sample) Sample { - return &resettingSample{ - Sample: sample, - } -} - -// resettingSample is a simple wrapper around a sample that resets it upon the -// snapshot retrieval. -type resettingSample struct { - Sample -} - -// Snapshot returns a read-only copy of the sample with the original reset. -func (rs *resettingSample) Snapshot() SampleSnapshot { - s := rs.Sample.Snapshot() - rs.Sample.Clear() - return s -} diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go deleted file mode 100644 index 6802e3fcea..0000000000 --- a/metrics/resetting_timer.go +++ /dev/null @@ -1,171 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Initial slice capacity for the values stored in a ResettingTimer -const InitialResettingTimerSliceCap = 10 - -type ResettingTimerSnapshot interface { - Count() int - Mean() float64 - Max() int64 - Min() int64 - Percentiles([]float64) []float64 -} - -// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval. -type ResettingTimer interface { - Snapshot() ResettingTimerSnapshot - Time(func()) - Update(time.Duration) - UpdateSince(time.Time) -} - -// GetOrRegisterResettingTimer returns an existing ResettingTimer or constructs and registers a -// new StandardResettingTimer. -func GetOrRegisterResettingTimer(name string, r Registry) ResettingTimer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewResettingTimer).(ResettingTimer) -} - -// NewRegisteredResettingTimer constructs and registers a new StandardResettingTimer. -func NewRegisteredResettingTimer(name string, r Registry) ResettingTimer { - c := NewResettingTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewResettingTimer constructs a new StandardResettingTimer -func NewResettingTimer() ResettingTimer { - if !Enabled { - return NilResettingTimer{} - } - return &StandardResettingTimer{ - values: make([]int64, 0, InitialResettingTimerSliceCap), - } -} - -// NilResettingTimer is a no-op ResettingTimer. -type NilResettingTimer struct{} - -func (NilResettingTimer) Values() []int64 { return nil } -func (n NilResettingTimer) Snapshot() ResettingTimerSnapshot { return n } -func (NilResettingTimer) Time(f func()) { f() } -func (NilResettingTimer) Update(time.Duration) {} -func (NilResettingTimer) Percentiles([]float64) []float64 { return nil } -func (NilResettingTimer) Mean() float64 { return 0.0 } -func (NilResettingTimer) Max() int64 { return 0 } -func (NilResettingTimer) Min() int64 { return 0 } -func (NilResettingTimer) UpdateSince(time.Time) {} -func (NilResettingTimer) Count() int { return 0 } - -// StandardResettingTimer is the standard implementation of a ResettingTimer. -// and Meter. -type StandardResettingTimer struct { - values []int64 - sum int64 // sum is a running count of the total sum, used later to calculate mean - - mutex sync.Mutex -} - -// Snapshot resets the timer and returns a read-only copy of its contents. -func (t *StandardResettingTimer) Snapshot() ResettingTimerSnapshot { - t.mutex.Lock() - defer t.mutex.Unlock() - snapshot := &resettingTimerSnapshot{} - if len(t.values) > 0 { - snapshot.mean = float64(t.sum) / float64(len(t.values)) - snapshot.values = t.values - t.values = make([]int64, 0, InitialResettingTimerSliceCap) - } - t.sum = 0 - return snapshot -} - -// Record the duration of the execution of the given function. -func (t *StandardResettingTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event. -func (t *StandardResettingTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.values = append(t.values, int64(d)) - t.sum += int64(d) -} - -// Record the duration of an event that started at a time and ends now. -func (t *StandardResettingTimer) UpdateSince(ts time.Time) { - t.Update(time.Since(ts)) -} - -// resettingTimerSnapshot is a point-in-time copy of another ResettingTimer. -type resettingTimerSnapshot struct { - values []int64 - mean float64 - max int64 - min int64 - thresholdBoundaries []float64 - calculated bool -} - -// Count return the length of the values from snapshot. -func (t *resettingTimerSnapshot) Count() int { - return len(t.values) -} - -// Percentiles returns the boundaries for the input percentiles. -// note: this method is not thread safe -func (t *resettingTimerSnapshot) Percentiles(percentiles []float64) []float64 { - t.calc(percentiles) - return t.thresholdBoundaries -} - -// Mean returns the mean of the snapshotted values -// note: this method is not thread safe -func (t *resettingTimerSnapshot) Mean() float64 { - if !t.calculated { - t.calc(nil) - } - - return t.mean -} - -// Max returns the max of the snapshotted values -// note: this method is not thread safe -func (t *resettingTimerSnapshot) Max() int64 { - if !t.calculated { - t.calc(nil) - } - return t.max -} - -// Min returns the min of the snapshotted values -// note: this method is not thread safe -func (t *resettingTimerSnapshot) Min() int64 { - if !t.calculated { - t.calc(nil) - } - return t.min -} - -func (t *resettingTimerSnapshot) calc(percentiles []float64) { - scores := CalculatePercentiles(t.values, percentiles) - t.thresholdBoundaries = scores - if len(t.values) == 0 { - return - } - t.min = t.values[0] - t.max = t.values[len(t.values)-1] -} diff --git a/metrics/resetting_timer_test.go b/metrics/resetting_timer_test.go deleted file mode 100644 index 4571fc8eb0..0000000000 --- a/metrics/resetting_timer_test.go +++ /dev/null @@ -1,197 +0,0 @@ -package metrics - -import ( - "testing" - "time" -) - -func TestResettingTimer(t *testing.T) { - tests := []struct { - values []int64 - start int - end int - wantP50 float64 - wantP95 float64 - wantP99 float64 - wantMean float64 - wantMin int64 - wantMax int64 - }{ - { - values: []int64{}, - start: 1, - end: 11, - wantP50: 5.5, wantP95: 10, wantP99: 10, - wantMin: 1, wantMax: 10, wantMean: 5.5, - }, - { - values: []int64{}, - start: 1, - end: 101, - wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99, - wantMin: 1, wantMax: 100, wantMean: 50.5, - }, - { - values: []int64{1}, - start: 0, - end: 0, - wantP50: 1, wantP95: 1, wantP99: 1, - wantMin: 1, wantMax: 1, wantMean: 1, - }, - { - values: []int64{0}, - start: 0, - end: 0, - wantP50: 0, wantP95: 0, wantP99: 0, - wantMin: 0, wantMax: 0, wantMean: 0, - }, - { - values: []int64{}, - start: 0, - end: 0, - wantP50: 0, wantP95: 0, wantP99: 0, - wantMin: 0, wantMax: 0, wantMean: 0, - }, - { - values: []int64{1, 10}, - start: 0, - end: 0, - wantP50: 5.5, wantP95: 10, wantP99: 10, - wantMin: 1, wantMax: 10, wantMean: 5.5, - }, - } - for i, tt := range tests { - timer := NewResettingTimer() - - for i := tt.start; i < tt.end; i++ { - tt.values = append(tt.values, int64(i)) - } - - for _, v := range tt.values { - timer.Update(time.Duration(v)) - } - snap := timer.Snapshot() - - ps := snap.Percentiles([]float64{0.50, 0.95, 0.99}) - - if have, want := snap.Min(), tt.wantMin; have != want { - t.Fatalf("%d: min: have %d, want %d", i, have, want) - } - if have, want := snap.Max(), tt.wantMax; have != want { - t.Fatalf("%d: max: have %d, want %d", i, have, want) - } - if have, want := snap.Mean(), tt.wantMean; have != want { - t.Fatalf("%d: mean: have %v, want %v", i, have, want) - } - if have, want := ps[0], tt.wantP50; have != want { - t.Errorf("%d: p50: have %v, want %v", i, have, want) - } - if have, want := ps[1], tt.wantP95; have != want { - t.Errorf("%d: p95: have %v, want %v", i, have, want) - } - if have, want := ps[2], tt.wantP99; have != want { - t.Errorf("%d: p99: have %v, want %v", i, have, want) - } - } -} - -func TestResettingTimerWithFivePercentiles(t *testing.T) { - tests := []struct { - values []int64 - start int - end int - wantP05 float64 - wantP20 float64 - wantP50 float64 - wantP95 float64 - wantP99 float64 - wantMean float64 - wantMin int64 - wantMax int64 - }{ - { - values: []int64{}, - start: 1, - end: 11, - wantP05: 1, wantP20: 2.2, wantP50: 5.5, wantP95: 10, wantP99: 10, - wantMin: 1, wantMax: 10, wantMean: 5.5, - }, - { - values: []int64{}, - start: 1, - end: 101, - wantP05: 5.050000000000001, wantP20: 20.200000000000003, wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99, - wantMin: 1, wantMax: 100, wantMean: 50.5, - }, - { - values: []int64{1}, - start: 0, - end: 0, - wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 1, wantP99: 1, - wantMin: 1, wantMax: 1, wantMean: 1, - }, - { - values: []int64{0}, - start: 0, - end: 0, - wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0, - wantMin: 0, wantMax: 0, wantMean: 0, - }, - { - values: []int64{}, - start: 0, - end: 0, - wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0, - wantMin: 0, wantMax: 0, wantMean: 0, - }, - { - values: []int64{1, 10}, - start: 0, - end: 0, - wantP05: 1, wantP20: 1, wantP50: 5.5, wantP95: 10, wantP99: 10, - wantMin: 1, wantMax: 10, wantMean: 5.5, - }, - } - for ind, tt := range tests { - timer := NewResettingTimer() - - for i := tt.start; i < tt.end; i++ { - tt.values = append(tt.values, int64(i)) - } - - for _, v := range tt.values { - timer.Update(time.Duration(v)) - } - - snap := timer.Snapshot() - - ps := snap.Percentiles([]float64{0.05, 0.20, 0.50, 0.95, 0.99}) - - if tt.wantMin != snap.Min() { - t.Errorf("%d: min: got %d, want %d", ind, snap.Min(), tt.wantMin) - } - - if tt.wantMax != snap.Max() { - t.Errorf("%d: max: got %d, want %d", ind, snap.Max(), tt.wantMax) - } - - if tt.wantMean != snap.Mean() { - t.Errorf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean) - } - if tt.wantP05 != ps[0] { - t.Errorf("%d: p05: got %v, want %v", ind, ps[0], tt.wantP05) - } - if tt.wantP20 != ps[1] { - t.Errorf("%d: p20: got %v, want %v", ind, ps[1], tt.wantP20) - } - if tt.wantP50 != ps[2] { - t.Errorf("%d: p50: got %v, want %v", ind, ps[2], tt.wantP50) - } - if tt.wantP95 != ps[3] { - t.Errorf("%d: p95: got %v, want %v", ind, ps[3], tt.wantP95) - } - if tt.wantP99 != ps[4] { - t.Errorf("%d: p99: got %v, want %v", ind, ps[4], tt.wantP99) - } - } -} diff --git a/metrics/sample.go b/metrics/sample.go deleted file mode 100644 index bb81e105cf..0000000000 --- a/metrics/sample.go +++ /dev/null @@ -1,446 +0,0 @@ -package metrics - -import ( - "math" - "math/rand" - "sync" - "time" - - "golang.org/x/exp/slices" -) - -const rescaleThreshold = time.Hour - -type SampleSnapshot interface { - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Size() int - StdDev() float64 - Sum() int64 - Variance() float64 -} - -// Samples maintain a statistically-significant selection of values from -// a stream. -type Sample interface { - Snapshot() SampleSnapshot - Clear() - Update(int64) -} - -// ExpDecaySample is an exponentially-decaying sample using a forward-decaying -// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time -// Decay Model for Streaming Systems". -// -// -type ExpDecaySample struct { - alpha float64 - count int64 - mutex sync.Mutex - reservoirSize int - t0, t1 time.Time - values *expDecaySampleHeap - rand *rand.Rand -} - -// NewExpDecaySample constructs a new exponentially-decaying sample with the -// given reservoir size and alpha. -func NewExpDecaySample(reservoirSize int, alpha float64) Sample { - if !Enabled { - return NilSample{} - } - s := &ExpDecaySample{ - alpha: alpha, - reservoirSize: reservoirSize, - t0: time.Now(), - values: newExpDecaySampleHeap(reservoirSize), - } - s.t1 = s.t0.Add(rescaleThreshold) - return s -} - -// SetRand sets the random source (useful in tests) -func (s *ExpDecaySample) SetRand(prng *rand.Rand) Sample { - s.rand = prng - return s -} - -// Clear clears all samples. -func (s *ExpDecaySample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.t0 = time.Now() - s.t1 = s.t0.Add(rescaleThreshold) - s.values.Clear() -} - -// Snapshot returns a read-only copy of the sample. -func (s *ExpDecaySample) Snapshot() SampleSnapshot { - s.mutex.Lock() - defer s.mutex.Unlock() - var ( - samples = s.values.Values() - values = make([]int64, len(samples)) - max int64 = math.MinInt64 - min int64 = math.MaxInt64 - sum int64 - ) - for i, item := range samples { - v := item.v - values[i] = v - sum += v - if v > max { - max = v - } - if v < min { - min = v - } - } - return newSampleSnapshotPrecalculated(s.count, values, min, max, sum) -} - -// Update samples a new value. -func (s *ExpDecaySample) Update(v int64) { - s.update(time.Now(), v) -} - -// update samples a new value at a particular timestamp. This is a method all -// its own to facilitate testing. -func (s *ExpDecaySample) update(t time.Time, v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if s.values.Size() == s.reservoirSize { - s.values.Pop() - } - var f64 float64 - if s.rand != nil { - f64 = s.rand.Float64() - } else { - f64 = rand.Float64() - } - s.values.Push(expDecaySample{ - k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / f64, - v: v, - }) - if t.After(s.t1) { - values := s.values.Values() - t0 := s.t0 - s.values.Clear() - s.t0 = t - s.t1 = s.t0.Add(rescaleThreshold) - for _, v := range values { - v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) - s.values.Push(v) - } - } -} - -// NilSample is a no-op Sample. -type NilSample struct{} - -func (NilSample) Clear() {} -func (NilSample) Snapshot() SampleSnapshot { return (*emptySnapshot)(nil) } -func (NilSample) Update(v int64) {} - -// SamplePercentile returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values []int64, p float64) float64 { - return CalculatePercentiles(values, []float64{p})[0] -} - -// CalculatePercentiles returns a slice of arbitrary percentiles of the slice of -// int64. This method returns interpolated results, so e.g if there are only two -// values, [0, 10], a 50% percentile will land between them. -// -// Note: As a side-effect, this method will also sort the slice of values. -// Note2: The input format for percentiles is NOT percent! To express 50%, use 0.5, not 50. -func CalculatePercentiles(values []int64, ps []float64) []float64 { - scores := make([]float64, len(ps)) - size := len(values) - if size == 0 { - return scores - } - slices.Sort(values) - for i, p := range ps { - pos := p * float64(size+1) - - if pos < 1.0 { - scores[i] = float64(values[0]) - } else if pos >= float64(size) { - scores[i] = float64(values[size-1]) - } else { - lower := float64(values[int(pos)-1]) - upper := float64(values[int(pos)]) - scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) - } - } - return scores -} - -// sampleSnapshot is a read-only copy of another Sample. -type sampleSnapshot struct { - count int64 - values []int64 - - max int64 - min int64 - mean float64 - sum int64 - variance float64 -} - -// newSampleSnapshotPrecalculated creates a read-only sampleSnapShot, using -// precalculated sums to avoid iterating the values -func newSampleSnapshotPrecalculated(count int64, values []int64, min, max, sum int64) *sampleSnapshot { - if len(values) == 0 { - return &sampleSnapshot{ - count: count, - values: values, - } - } - return &sampleSnapshot{ - count: count, - values: values, - max: max, - min: min, - mean: float64(sum) / float64(len(values)), - sum: sum, - } -} - -// newSampleSnapshot creates a read-only sampleSnapShot, and calculates some -// numbers. -func newSampleSnapshot(count int64, values []int64) *sampleSnapshot { - var ( - max int64 = math.MinInt64 - min int64 = math.MaxInt64 - sum int64 - ) - for _, v := range values { - sum += v - if v > max { - max = v - } - if v < min { - min = v - } - } - return newSampleSnapshotPrecalculated(count, values, min, max, sum) -} - -// Count returns the count of inputs at the time the snapshot was taken. -func (s *sampleSnapshot) Count() int64 { return s.count } - -// Max returns the maximal value at the time the snapshot was taken. -func (s *sampleSnapshot) Max() int64 { return s.max } - -// Mean returns the mean value at the time the snapshot was taken. -func (s *sampleSnapshot) Mean() float64 { return s.mean } - -// Min returns the minimal value at the time the snapshot was taken. -func (s *sampleSnapshot) Min() int64 { return s.min } - -// Percentile returns an arbitrary percentile of values at the time the -// snapshot was taken. -func (s *sampleSnapshot) Percentile(p float64) float64 { - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values at the time -// the snapshot was taken. -func (s *sampleSnapshot) Percentiles(ps []float64) []float64 { - return CalculatePercentiles(s.values, ps) -} - -// Size returns the size of the sample at the time the snapshot was taken. -func (s *sampleSnapshot) Size() int { return len(s.values) } - -// Snapshot returns the snapshot. -func (s *sampleSnapshot) Snapshot() SampleSnapshot { return s } - -// StdDev returns the standard deviation of values at the time the snapshot was -// taken. -func (s *sampleSnapshot) StdDev() float64 { - if s.variance == 0.0 { - s.variance = SampleVariance(s.mean, s.values) - } - return math.Sqrt(s.variance) -} - -// Sum returns the sum of values at the time the snapshot was taken. -func (s *sampleSnapshot) Sum() int64 { return s.sum } - -// Values returns a copy of the values in the sample. -func (s *sampleSnapshot) Values() []int64 { - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of values at the time the snapshot was taken. -func (s *sampleSnapshot) Variance() float64 { - if s.variance == 0.0 { - s.variance = SampleVariance(s.mean, s.values) - } - return s.variance -} - -// SampleVariance returns the variance of the slice of int64. -func SampleVariance(mean float64, values []int64) float64 { - if len(values) == 0 { - return 0.0 - } - var sum float64 - for _, v := range values { - d := float64(v) - mean - sum += d * d - } - return sum / float64(len(values)) -} - -// A uniform sample using Vitter's Algorithm R. -// -// -type UniformSample struct { - count int64 - mutex sync.Mutex - reservoirSize int - values []int64 - rand *rand.Rand -} - -// NewUniformSample constructs a new uniform sample with the given reservoir -// size. -func NewUniformSample(reservoirSize int) Sample { - if !Enabled { - return NilSample{} - } - return &UniformSample{ - reservoirSize: reservoirSize, - values: make([]int64, 0, reservoirSize), - } -} - -// SetRand sets the random source (useful in tests) -func (s *UniformSample) SetRand(prng *rand.Rand) Sample { - s.rand = prng - return s -} - -// Clear clears all samples. -func (s *UniformSample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.values = make([]int64, 0, s.reservoirSize) -} - -// Snapshot returns a read-only copy of the sample. -func (s *UniformSample) Snapshot() SampleSnapshot { - s.mutex.Lock() - values := make([]int64, len(s.values)) - copy(values, s.values) - count := s.count - s.mutex.Unlock() - return newSampleSnapshot(count, values) -} - -// Update samples a new value. -func (s *UniformSample) Update(v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) < s.reservoirSize { - s.values = append(s.values, v) - } else { - var r int64 - if s.rand != nil { - r = s.rand.Int63n(s.count) - } else { - r = rand.Int63n(s.count) - } - if r < int64(len(s.values)) { - s.values[int(r)] = v - } - } -} - -// expDecaySample represents an individual sample in a heap. -type expDecaySample struct { - k float64 - v int64 -} - -func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { - return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} -} - -// expDecaySampleHeap is a min-heap of expDecaySamples. -// The internal implementation is copied from the standard library's container/heap -type expDecaySampleHeap struct { - s []expDecaySample -} - -func (h *expDecaySampleHeap) Clear() { - h.s = h.s[:0] -} - -func (h *expDecaySampleHeap) Push(s expDecaySample) { - n := len(h.s) - h.s = h.s[0 : n+1] - h.s[n] = s - h.up(n) -} - -func (h *expDecaySampleHeap) Pop() expDecaySample { - n := len(h.s) - 1 - h.s[0], h.s[n] = h.s[n], h.s[0] - h.down(0, n) - - n = len(h.s) - s := h.s[n-1] - h.s = h.s[0 : n-1] - return s -} - -func (h *expDecaySampleHeap) Size() int { - return len(h.s) -} - -func (h *expDecaySampleHeap) Values() []expDecaySample { - return h.s -} - -func (h *expDecaySampleHeap) up(j int) { - for { - i := (j - 1) / 2 // parent - if i == j || !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - j = i - } -} - -func (h *expDecaySampleHeap) down(i, n int) { - for { - j1 := 2*i + 1 - if j1 >= n || j1 < 0 { // j1 < 0 after int overflow - break - } - j := j1 // left child - if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { - j = j2 // = 2*i + 2 // right child - } - if !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - i = j - } -} diff --git a/metrics/sample_test.go b/metrics/sample_test.go deleted file mode 100644 index 7967357055..0000000000 --- a/metrics/sample_test.go +++ /dev/null @@ -1,360 +0,0 @@ -package metrics - -import ( - "math" - "math/rand" - "runtime" - "testing" - "time" -) - -const epsilonPercentile = .00000000001 - -// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively -// expensive computations like Variance, the cost of copying the Sample, as -// approximated by a make and copy, is much greater than the cost of the -// computation for small samples and only slightly less for large samples. -func BenchmarkCompute1000(b *testing.B) { - s := make([]int64, 1000) - var sum int64 - for i := 0; i < len(s); i++ { - s[i] = int64(i) - sum += int64(i) - } - mean := float64(sum) / float64(len(s)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - SampleVariance(mean, s) - } -} -func BenchmarkCompute1000000(b *testing.B) { - s := make([]int64, 1000000) - var sum int64 - for i := 0; i < len(s); i++ { - s[i] = int64(i) - sum += int64(i) - } - mean := float64(sum) / float64(len(s)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - SampleVariance(mean, s) - } -} -func BenchmarkCopy1000(b *testing.B) { - s := make([]int64, 1000) - for i := 0; i < len(s); i++ { - s[i] = int64(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - sCopy := make([]int64, len(s)) - copy(sCopy, s) - } -} -func BenchmarkCopy1000000(b *testing.B) { - s := make([]int64, 1000000) - for i := 0; i < len(s); i++ { - s[i] = int64(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - sCopy := make([]int64, len(s)) - copy(sCopy, s) - } -} - -func BenchmarkExpDecaySample257(b *testing.B) { - benchmarkSample(b, NewExpDecaySample(257, 0.015)) -} - -func BenchmarkExpDecaySample514(b *testing.B) { - benchmarkSample(b, NewExpDecaySample(514, 0.015)) -} - -func BenchmarkExpDecaySample1028(b *testing.B) { - benchmarkSample(b, NewExpDecaySample(1028, 0.015)) -} - -func BenchmarkUniformSample257(b *testing.B) { - benchmarkSample(b, NewUniformSample(257)) -} - -func BenchmarkUniformSample514(b *testing.B) { - benchmarkSample(b, NewUniformSample(514)) -} - -func BenchmarkUniformSample1028(b *testing.B) { - benchmarkSample(b, NewUniformSample(1028)) -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func TestExpDecaySample(t *testing.T) { - for _, tc := range []struct { - reservoirSize int - alpha float64 - updates int - }{ - {100, 0.99, 10}, - {1000, 0.01, 100}, - {100, 0.99, 1000}, - } { - sample := NewExpDecaySample(tc.reservoirSize, tc.alpha) - for i := 0; i < tc.updates; i++ { - sample.Update(int64(i)) - } - snap := sample.Snapshot() - if have, want := int(snap.Count()), tc.updates; have != want { - t.Errorf("have %d want %d", have, want) - } - if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want { - t.Errorf("have %d want %d", have, want) - } - values := snap.(*sampleSnapshot).values - if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want { - t.Errorf("have %d want %d", have, want) - } - for _, v := range values { - if v > int64(tc.updates) || v < 0 { - t.Errorf("out of range [0, %d): %v", tc.updates, v) - } - } - } -} - -// This test makes sure that the sample's priority is not amplified by using -// nanosecond duration since start rather than second duration since start. -// The priority becomes +Inf quickly after starting if this is done, -// effectively freezing the set of samples until a rescale step happens. -func TestExpDecaySampleNanosecondRegression(t *testing.T) { - sw := NewExpDecaySample(100, 0.99) - for i := 0; i < 100; i++ { - sw.Update(10) - } - time.Sleep(1 * time.Millisecond) - for i := 0; i < 100; i++ { - sw.Update(20) - } - s := sw.Snapshot() - v := s.(*sampleSnapshot).values - avg := float64(0) - for i := 0; i < len(v); i++ { - avg += float64(v[i]) - } - avg /= float64(len(v)) - if avg > 16 || avg < 14 { - t.Errorf("out of range [14, 16]: %v\n", avg) - } -} - -func TestExpDecaySampleRescale(t *testing.T) { - s := NewExpDecaySample(2, 0.001).(*ExpDecaySample) - s.update(time.Now(), 1) - s.update(time.Now().Add(time.Hour+time.Microsecond), 1) - for _, v := range s.values.Values() { - if v.k == 0.0 { - t.Fatal("v.k == 0.0") - } - } -} - -func TestExpDecaySampleSnapshot(t *testing.T) { - now := time.Now() - s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1))) - for i := 1; i <= 10000; i++ { - s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) - } - snapshot := s.Snapshot() - s.Update(1) - testExpDecaySampleStatistics(t, snapshot) -} - -func TestExpDecaySampleStatistics(t *testing.T) { - now := time.Now() - s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1))) - for i := 1; i <= 10000; i++ { - s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) - } - testExpDecaySampleStatistics(t, s.Snapshot()) -} - -func TestUniformSample(t *testing.T) { - sw := NewUniformSample(100) - for i := 0; i < 1000; i++ { - sw.Update(int64(i)) - } - s := sw.Snapshot() - if size := s.Count(); size != 1000 { - t.Errorf("s.Count(): 1000 != %v\n", size) - } - if size := s.Size(); size != 100 { - t.Errorf("s.Size(): 100 != %v\n", size) - } - values := s.(*sampleSnapshot).values - - if l := len(values); l != 100 { - t.Errorf("len(s.Values()): 100 != %v\n", l) - } - for _, v := range values { - if v > 1000 || v < 0 { - t.Errorf("out of range [0, 100): %v\n", v) - } - } -} - -func TestUniformSampleIncludesTail(t *testing.T) { - sw := NewUniformSample(100) - max := 100 - for i := 0; i < max; i++ { - sw.Update(int64(i)) - } - s := sw.Snapshot() - v := s.(*sampleSnapshot).values - sum := 0 - exp := (max - 1) * max / 2 - for i := 0; i < len(v); i++ { - sum += int(v[i]) - } - if exp != sum { - t.Errorf("sum: %v != %v\n", exp, sum) - } -} - -func TestUniformSampleSnapshot(t *testing.T) { - s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1))) - for i := 1; i <= 10000; i++ { - s.Update(int64(i)) - } - snapshot := s.Snapshot() - s.Update(1) - testUniformSampleStatistics(t, snapshot) -} - -func TestUniformSampleStatistics(t *testing.T) { - s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1))) - for i := 1; i <= 10000; i++ { - s.Update(int64(i)) - } - testUniformSampleStatistics(t, s.Snapshot()) -} - -func benchmarkSample(b *testing.B, s Sample) { - var memStats runtime.MemStats - runtime.ReadMemStats(&memStats) - pauseTotalNs := memStats.PauseTotalNs - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Update(1) - } - b.StopTimer() - runtime.GC() - runtime.ReadMemStats(&memStats) - b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N) -} - -func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) { - if count := s.Count(); count != 10000 { - t.Errorf("s.Count(): 10000 != %v\n", count) - } - if min := s.Min(); min != 107 { - t.Errorf("s.Min(): 107 != %v\n", min) - } - if max := s.Max(); max != 10000 { - t.Errorf("s.Max(): 10000 != %v\n", max) - } - if mean := s.Mean(); mean != 4965.98 { - t.Errorf("s.Mean(): 4965.98 != %v\n", mean) - } - if stdDev := s.StdDev(); stdDev != 2959.825156930727 { - t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev) - } - ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) - if ps[0] != 4615 { - t.Errorf("median: 4615 != %v\n", ps[0]) - } - if ps[1] != 7672 { - t.Errorf("75th percentile: 7672 != %v\n", ps[1]) - } - if ps[2] != 9998.99 { - t.Errorf("99th percentile: 9998.99 != %v\n", ps[2]) - } -} - -func testUniformSampleStatistics(t *testing.T, s SampleSnapshot) { - if count := s.Count(); count != 10000 { - t.Errorf("s.Count(): 10000 != %v\n", count) - } - if min := s.Min(); min != 37 { - t.Errorf("s.Min(): 37 != %v\n", min) - } - if max := s.Max(); max != 9989 { - t.Errorf("s.Max(): 9989 != %v\n", max) - } - if mean := s.Mean(); mean != 4748.14 { - t.Errorf("s.Mean(): 4748.14 != %v\n", mean) - } - if stdDev := s.StdDev(); stdDev != 2826.684117548333 { - t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev) - } - ps := s.Percentiles([]float64{0.5, 0.75, 0.99}) - if ps[0] != 4599 { - t.Errorf("median: 4599 != %v\n", ps[0]) - } - if ps[1] != 7380.5 { - t.Errorf("75th percentile: 7380.5 != %v\n", ps[1]) - } - if math.Abs(9986.429999999998-ps[2]) > epsilonPercentile { - t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2]) - } -} - -// TestUniformSampleConcurrentUpdateCount would expose data race problems with -// concurrent Update and Count calls on Sample when test is called with -race -// argument -func TestUniformSampleConcurrentUpdateCount(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - s := NewUniformSample(100) - for i := 0; i < 100; i++ { - s.Update(int64(i)) - } - quit := make(chan struct{}) - go func() { - t := time.NewTicker(10 * time.Millisecond) - defer t.Stop() - for { - select { - case <-t.C: - s.Update(rand.Int63()) - case <-quit: - t.Stop() - return - } - } - }() - for i := 0; i < 1000; i++ { - s.Snapshot().Count() - time.Sleep(5 * time.Millisecond) - } - quit <- struct{}{} -} - -func BenchmarkCalculatePercentiles(b *testing.B) { - pss := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} - var vals []int64 - for i := 0; i < 1000; i++ { - vals = append(vals, int64(rand.Int31())) - } - v := make([]int64, len(vals)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - copy(v, vals) - _ = CalculatePercentiles(v, pss) - } -} diff --git a/metrics/syslog.go b/metrics/syslog.go deleted file mode 100644 index fd856d6973..0000000000 --- a/metrics/syslog.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build !windows -// +build !windows - -package metrics - -import ( - "fmt" - "log/syslog" - "time" -) - -// Output each metric in the given registry to syslog periodically using -// the given syslogger. -func Syslog(r Registry, d time.Duration, w *syslog.Writer) { - for range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Snapshot().Count())) - case CounterFloat64: - w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Snapshot().Count())) - case Gauge: - w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Snapshot().Value())) - case GaugeFloat64: - w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Snapshot().Value())) - case GaugeInfo: - w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Snapshot().Value())) - case Healthcheck: - metric.Check() - w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", - name, - h.Count(), - h.Min(), - h.Max(), - h.Mean(), - h.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - )) - case Meter: - m := metric.Snapshot() - w.Info(fmt.Sprintf( - "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", - name, - m.Count(), - m.Rate1(), - m.Rate5(), - m.Rate15(), - m.RateMean(), - )) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", - name, - t.Count(), - t.Min(), - t.Max(), - t.Mean(), - t.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - t.Rate1(), - t.Rate5(), - t.Rate15(), - t.RateMean(), - )) - } - }) - } -} diff --git a/metrics/testdata/opentsb.want b/metrics/testdata/opentsb.want deleted file mode 100644 index 43fe1b2ac2..0000000000 --- a/metrics/testdata/opentsb.want +++ /dev/null @@ -1,23 +0,0 @@ -put pre.elite.count 978307200 1337 host=hal9000 -put pre.elite.one-minute 978307200 0.00 host=hal9000 -put pre.elite.five-minute 978307200 0.00 host=hal9000 -put pre.elite.fifteen-minute 978307200 0.00 host=hal9000 -put pre.elite.mean 978307200 0.00 host=hal9000 -put pre.foo.value 978307200 {"chain_id":"5"} host=hal9000 -put pre.months.count 978307200 12 host=hal9000 -put pre.pi.value 978307200 3.140000 host=hal9000 -put pre.second.count 978307200 1 host=hal9000 -put pre.second.min 978307200 1000 host=hal9000 -put pre.second.max 978307200 1000 host=hal9000 -put pre.second.mean 978307200 1000.00 host=hal9000 -put pre.second.std-dev 978307200 0.00 host=hal9000 -put pre.second.50-percentile 978307200 1000.00 host=hal9000 -put pre.second.75-percentile 978307200 1000.00 host=hal9000 -put pre.second.95-percentile 978307200 1000.00 host=hal9000 -put pre.second.99-percentile 978307200 1000.00 host=hal9000 -put pre.second.999-percentile 978307200 1000.00 host=hal9000 -put pre.second.one-minute 978307200 0.00 host=hal9000 -put pre.second.five-minute 978307200 0.00 host=hal9000 -put pre.second.fifteen-minute 978307200 0.00 host=hal9000 -put pre.second.mean-rate 978307200 0.00 host=hal9000 -put pre.tau.count 978307200 1.570000 host=hal9000 diff --git a/metrics/timer.go b/metrics/timer.go deleted file mode 100644 index bb8def82fb..0000000000 --- a/metrics/timer.go +++ /dev/null @@ -1,182 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -type TimerSnapshot interface { - HistogramSnapshot - MeterSnapshot -} - -// Timers capture the duration and rate of events. -type Timer interface { - Snapshot() TimerSnapshot - Stop() - Time(func()) - UpdateSince(time.Time) - Update(time.Duration) -} - -// GetOrRegisterTimer returns an existing Timer or constructs and registers a -// new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterTimer(name string, r Registry) Timer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewTimer).(Timer) -} - -// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewCustomTimer(h Histogram, m Meter) Timer { - if !Enabled { - return NilTimer{} - } - return &StandardTimer{ - histogram: h, - meter: m, - } -} - -// NewRegisteredTimer constructs and registers a new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredTimer(name string, r Registry) Timer { - c := NewTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewTimer constructs a new StandardTimer using an exponentially-decaying -// sample with the same reservoir size and alpha as UNIX load averages. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewTimer() Timer { - if !Enabled { - return NilTimer{} - } - return &StandardTimer{ - histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), - meter: NewMeter(), - } -} - -// NilTimer is a no-op Timer. -type NilTimer struct{} - -func (NilTimer) Snapshot() TimerSnapshot { return (*emptySnapshot)(nil) } -func (NilTimer) Stop() {} -func (NilTimer) Time(f func()) { f() } -func (NilTimer) Update(time.Duration) {} -func (NilTimer) UpdateSince(time.Time) {} - -// StandardTimer is the standard implementation of a Timer and uses a Histogram -// and Meter. -type StandardTimer struct { - histogram Histogram - meter Meter - mutex sync.Mutex -} - -// Snapshot returns a read-only copy of the timer. -func (t *StandardTimer) Snapshot() TimerSnapshot { - t.mutex.Lock() - defer t.mutex.Unlock() - return &timerSnapshot{ - histogram: t.histogram.Snapshot(), - meter: t.meter.Snapshot(), - } -} - -// Stop stops the meter. -func (t *StandardTimer) Stop() { - t.meter.Stop() -} - -// Record the duration of the execution of the given function. -func (t *StandardTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event, in nanoseconds. -func (t *StandardTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(d.Nanoseconds()) - t.meter.Mark(1) -} - -// Record the duration of an event that started at a time and ends now. -// The record uses nanoseconds. -func (t *StandardTimer) UpdateSince(ts time.Time) { - t.Update(time.Since(ts)) -} - -// timerSnapshot is a read-only copy of another Timer. -type timerSnapshot struct { - histogram HistogramSnapshot - meter MeterSnapshot -} - -// Count returns the number of events recorded at the time the snapshot was -// taken. -func (t *timerSnapshot) Count() int64 { return t.histogram.Count() } - -// Max returns the maximum value at the time the snapshot was taken. -func (t *timerSnapshot) Max() int64 { return t.histogram.Max() } - -// Size returns the size of the sample at the time the snapshot was taken. -func (t *timerSnapshot) Size() int { return t.histogram.Size() } - -// Mean returns the mean value at the time the snapshot was taken. -func (t *timerSnapshot) Mean() float64 { return t.histogram.Mean() } - -// Min returns the minimum value at the time the snapshot was taken. -func (t *timerSnapshot) Min() int64 { return t.histogram.Min() } - -// Percentile returns an arbitrary percentile of sampled values at the time the -// snapshot was taken. -func (t *timerSnapshot) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of sampled values at -// the time the snapshot was taken. -func (t *timerSnapshot) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (t *timerSnapshot) Rate1() float64 { return t.meter.Rate1() } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (t *timerSnapshot) Rate5() float64 { return t.meter.Rate5() } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (t *timerSnapshot) Rate15() float64 { return t.meter.Rate15() } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (t *timerSnapshot) RateMean() float64 { return t.meter.RateMean() } - -// StdDev returns the standard deviation of the values at the time the snapshot -// was taken. -func (t *timerSnapshot) StdDev() float64 { return t.histogram.StdDev() } - -// Sum returns the sum at the time the snapshot was taken. -func (t *timerSnapshot) Sum() int64 { return t.histogram.Sum() } - -// Variance returns the variance of the values at the time the snapshot was -// taken. -func (t *timerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/metrics/timer_test.go b/metrics/timer_test.go deleted file mode 100644 index f10de16c9c..0000000000 --- a/metrics/timer_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package metrics - -import ( - "fmt" - "math" - "testing" - "time" -) - -func BenchmarkTimer(b *testing.B) { - tm := NewTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - tm.Update(1) - } -} - -func TestGetOrRegisterTimer(t *testing.T) { - r := NewRegistry() - NewRegisteredTimer("foo", r).Update(47) - if tm := GetOrRegisterTimer("foo", r).Snapshot(); tm.Count() != 1 { - t.Fatal(tm) - } -} - -func TestTimerExtremes(t *testing.T) { - tm := NewTimer() - tm.Update(math.MaxInt64) - tm.Update(0) - if stdDev := tm.Snapshot().StdDev(); stdDev != 4.611686018427388e+18 { - t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev) - } -} - -func TestTimerStop(t *testing.T) { - l := len(arbiter.meters) - tm := NewTimer() - if l+1 != len(arbiter.meters) { - t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters)) - } - tm.Stop() - if l != len(arbiter.meters) { - t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters)) - } -} - -func TestTimerFunc(t *testing.T) { - var ( - tm = NewTimer() - testStart = time.Now() - actualTime time.Duration - ) - tm.Time(func() { - time.Sleep(50 * time.Millisecond) - actualTime = time.Since(testStart) - }) - var ( - drift = time.Millisecond * 2 - measured = time.Duration(tm.Snapshot().Max()) - ceil = actualTime + drift - floor = actualTime - drift - ) - if measured > ceil || measured < floor { - t.Errorf("tm.Max(): %v > %v || %v > %v\n", measured, ceil, measured, floor) - } -} - -func TestTimerZero(t *testing.T) { - tm := NewTimer().Snapshot() - if count := tm.Count(); count != 0 { - t.Errorf("tm.Count(): 0 != %v\n", count) - } - if min := tm.Min(); min != 0 { - t.Errorf("tm.Min(): 0 != %v\n", min) - } - if max := tm.Max(); max != 0 { - t.Errorf("tm.Max(): 0 != %v\n", max) - } - if mean := tm.Mean(); mean != 0.0 { - t.Errorf("tm.Mean(): 0.0 != %v\n", mean) - } - if stdDev := tm.StdDev(); stdDev != 0.0 { - t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev) - } - ps := tm.Percentiles([]float64{0.5, 0.75, 0.99}) - if ps[0] != 0.0 { - t.Errorf("median: 0.0 != %v\n", ps[0]) - } - if ps[1] != 0.0 { - t.Errorf("75th percentile: 0.0 != %v\n", ps[1]) - } - if ps[2] != 0.0 { - t.Errorf("99th percentile: 0.0 != %v\n", ps[2]) - } - if rate1 := tm.Rate1(); rate1 != 0.0 { - t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1) - } - if rate5 := tm.Rate5(); rate5 != 0.0 { - t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5) - } - if rate15 := tm.Rate15(); rate15 != 0.0 { - t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15) - } - if rateMean := tm.RateMean(); rateMean != 0.0 { - t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean) - } -} - -func ExampleGetOrRegisterTimer() { - m := "account.create.latency" - t := GetOrRegisterTimer(m, nil) - t.Update(47) - fmt.Println(t.Snapshot().Max()) // Output: 47 -} diff --git a/metrics/validate.sh b/metrics/validate.sh deleted file mode 100755 index 0d8ba28df3..0000000000 --- a/metrics/validate.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -set -e - -# check there are no formatting issues -GOFMT_LINES=$(gofmt -l . | wc -l | xargs) -test "$GOFMT_LINES" -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" - -# run the tests for the root package -go test -race . diff --git a/metrics/writer.go b/metrics/writer.go deleted file mode 100644 index 098da45c27..0000000000 --- a/metrics/writer.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "strings" - "time" - - "golang.org/x/exp/slices" -) - -// Write sorts writes each metric in the given registry periodically to the -// given io.Writer. -func Write(r Registry, d time.Duration, w io.Writer) { - for range time.Tick(d) { - WriteOnce(r, w) - } -} - -// WriteOnce sorts and writes metrics in the given registry to the given -// io.Writer. -func WriteOnce(r Registry, w io.Writer) { - var namedMetrics []namedMetric - r.Each(func(name string, i interface{}) { - namedMetrics = append(namedMetrics, namedMetric{name, i}) - }) - slices.SortFunc(namedMetrics, namedMetric.cmp) - for _, namedMetric := range namedMetrics { - switch metric := namedMetric.m.(type) { - case Counter: - fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", metric.Snapshot().Count()) - case CounterFloat64: - fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %f\n", metric.Snapshot().Count()) - case Gauge: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %9d\n", metric.Snapshot().Value()) - case GaugeFloat64: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %f\n", metric.Snapshot().Value()) - case GaugeInfo: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %s\n", metric.Snapshot().Value().String()) - case Healthcheck: - metric.Check() - fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) - fmt.Fprintf(w, " error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "histogram %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", h.Count()) - fmt.Fprintf(w, " min: %9d\n", h.Min()) - fmt.Fprintf(w, " max: %9d\n", h.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "meter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", m.Count()) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "timer %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", t.Count()) - fmt.Fprintf(w, " min: %9d\n", t.Min()) - fmt.Fprintf(w, " max: %9d\n", t.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) - } - } -} - -type namedMetric struct { - name string - m interface{} -} - -func (m namedMetric) cmp(other namedMetric) int { - return strings.Compare(m.name, other.name) -} diff --git a/metrics/writer_test.go b/metrics/writer_test.go deleted file mode 100644 index 8376bf8975..0000000000 --- a/metrics/writer_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package metrics - -import ( - "testing" - - "golang.org/x/exp/slices" -) - -func TestMetricsSorting(t *testing.T) { - var namedMetrics = []namedMetric{ - {name: "zzz"}, - {name: "bbb"}, - {name: "fff"}, - {name: "ggg"}, - } - - slices.SortFunc(namedMetrics, namedMetric.cmp) - for i, name := range []string{"bbb", "fff", "ggg", "zzz"} { - if namedMetrics[i].name != name { - t.Fail() - } - } -} diff --git a/miner/ordering_ext.go b/miner/ordering_ext.go deleted file mode 100644 index 31ec24024d..0000000000 --- a/miner/ordering_ext.go +++ /dev/null @@ -1,15 +0,0 @@ -package miner - -import ( - "math/big" - - "github.com/ava-labs/coreth/core/txpool" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/libevm/common" -) - -type TransactionsByPriceAndNonce = transactionsByPriceAndNonce - -func NewTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *TransactionsByPriceAndNonce { - return newTransactionsByPriceAndNonce(signer, txs, baseFee) -} diff --git a/miner/worker.go b/miner/worker.go index cc3af7cabd..3d2953df8e 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -165,14 +165,16 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte Time: timestamp, } - // Set BaseFee and Extra data field if we are post ApricotPhase3 - if chainExtra.IsApricotPhase3(timestamp) { - var err error - header.Extra, header.BaseFee, err = dummy.CalcBaseFee(w.chainConfig, parent, timestamp) - if err != nil { - return nil, fmt.Errorf("failed to calculate new base fee: %w", err) - } + var err error + header.Extra, err = dummy.CalcExtraPrefix(w.chainConfig, parent, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to calculate new extra prefix: %w", err) } + header.BaseFee, err = dummy.CalcBaseFee(w.chainConfig, parent, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to calculate new base fee: %w", err) + } + // Apply EIP-4844, EIP-4788. if w.chainConfig.IsCancun(header.Number, header.Time) { var excessBlobGas uint64 diff --git a/params/avalanche_params.go b/params/avalanche_params.go index 93b66eb99a..f5b41a5c02 100644 --- a/params/avalanche_params.go +++ b/params/avalanche_params.go @@ -6,7 +6,7 @@ package params import ( "math/big" - "github.com/ava-labs/coreth/predicate" + "github.com/ava-labs/avalanchego/utils/wrappers" ) // Minimum Gas Price @@ -20,20 +20,19 @@ const ( CortinaGasLimit uint64 = 15_000_000 ApricotPhase3MinBaseFee int64 = 75 * GWei - ApricotPhase3MaxBaseFee int64 = 225 * GWei + ApricotPhase3MaxBaseFee = 225 * GWei ApricotPhase3InitialBaseFee int64 = 225 * GWei - ApricotPhase3TargetGas uint64 = 10_000_000 - ApricotPhase4MinBaseFee int64 = 25 * GWei - ApricotPhase4MaxBaseFee int64 = 1_000 * GWei - ApricotPhase4BaseFeeChangeDenominator uint64 = 12 - ApricotPhase5TargetGas uint64 = 15_000_000 + ApricotPhase3TargetGas = 10_000_000 + ApricotPhase3BaseFeeChangeDenominator = 12 + ApricotPhase5TargetGas = 15_000_000 ApricotPhase5BaseFeeChangeDenominator uint64 = 36 EtnaMinBaseFee int64 = GWei - // DynamicFeeExtraDataSize is defined in the predicate package to avoid a circular dependency. - // After Durango, the extra data past the dynamic fee rollup window represents predicate results. - DynamicFeeExtraDataSize = predicate.DynamicFeeExtraDataSize - RollupWindow uint64 = 10 + RollupWindow = 10 // in seconds + DynamicFeeExtraDataSize = wrappers.LongLen * RollupWindow + + // The base cost to charge per atomic transaction. Added in Apricot Phase 5. + AtomicTxBaseCost uint64 = 10_000 ) // The atomic gas limit specifies the maximum amount of gas that can be consumed by the atomic diff --git a/params/config.go b/params/config.go index e44e41e287..7a783b9231 100644 --- a/params/config.go +++ b/params/config.go @@ -54,8 +54,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -91,8 +91,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -124,8 +124,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -157,8 +157,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -191,8 +191,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -226,8 +226,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -261,8 +261,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -296,8 +296,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -331,8 +331,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -366,8 +366,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -401,8 +401,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -436,8 +436,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -471,8 +471,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -507,8 +507,8 @@ var ( &ChainConfig{ ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -540,6 +540,44 @@ var ( }, }) + TestFUpgradeChainConfig = WithExtra( + &ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ShanghaiTime: utils.NewUint64(0), + CancunTime: utils.NewUint64(0), + }, + &extras.ChainConfig{ + AvalancheContext: extras.AvalancheContext{SnowCtx: utils.TestSnowContext()}, + NetworkUpgrades: extras.NetworkUpgrades{ + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: utils.NewUint64(0), + DurangoBlockTimestamp: utils.NewUint64(0), + EtnaTimestamp: utils.NewUint64(0), + FUpgradeTimestamp: utils.NewUint64(0), + }, + }) + TestRules = TestChainConfig.Rules(new(big.Int), IsMergeTODO, 0) ) diff --git a/params/config_extra.go b/params/config_extra.go index 17cee401e0..6efd6c4cdc 100644 --- a/params/config_extra.go +++ b/params/config_extra.go @@ -25,11 +25,20 @@ type ConfigCompatError = extras.ConfigCompatError // SetEthUpgrades enables Etheruem network upgrades using the same time as // the Avalanche network upgrade that enables them. -// -// TODO: Prior to Cancun, Avalanche upgrades are referenced inline in the -// code in place of their Ethereum counterparts. The original Ethereum names -// should be restored for maintainability. func SetEthUpgrades(c *ChainConfig) { + // Set Ethereum block upgrades to initially activated as they were already activated on launch. + c.HomesteadBlock = big.NewInt(0) + c.DAOForkBlock = big.NewInt(0) + c.DAOForkSupport = true + c.EIP150Block = big.NewInt(0) + c.EIP155Block = big.NewInt(0) + c.EIP158Block = big.NewInt(0) + c.ByzantiumBlock = big.NewInt(0) + c.ConstantinopleBlock = big.NewInt(0) + c.PetersburgBlock = big.NewInt(0) + c.IstanbulBlock = big.NewInt(0) + c.MuirGlacierBlock = big.NewInt(0) + if c.ChainID != nil && AvalancheFujiChainID.Cmp(c.ChainID) == 0 { c.BerlinBlock = big.NewInt(184985) // https://testnet.snowtrace.io/block/184985?chainid=43113, AP2 activation block c.LondonBlock = big.NewInt(805078) // https://testnet.snowtrace.io/block/805078?chainid=43113, AP3 activation block @@ -147,25 +156,3 @@ func ToWithUpgradesJSON(c *ChainConfig) *ChainConfigWithUpgradesJSON { UpgradeConfig: GetExtra(c).UpgradeConfig, } } - -func GetChainConfig(agoUpgrade upgrade.Config, chainID *big.Int) *ChainConfig { - return WithExtra( - &ChainConfig{ - ChainID: chainID, - HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - }, - &extras.ChainConfig{ - NetworkUpgrades: extras.GetNetworkUpgrades(agoUpgrade), - }, - ) -} diff --git a/params/extras/network_upgrades.go b/params/extras/network_upgrades.go index 6488469d23..bc42194fce 100644 --- a/params/extras/network_upgrades.go +++ b/params/extras/network_upgrades.go @@ -41,6 +41,9 @@ type NetworkUpgrades struct { // Note: EIP-4844 BlobTxs are not enabled in the mempool and blocks are not // allowed to contain them. For details see https://github.com/avalanche-foundation/ACPs/pull/131 EtnaTimestamp *uint64 `json:"etnaTimestamp,omitempty"` + // FUpgrade is a placeholder for the next upgrade. + // (nil = no fork, 0 = already activated) + FUpgradeTimestamp *uint64 `json:"fUpgradeTimestamp,omitempty"` } func (n *NetworkUpgrades) Equal(other *NetworkUpgrades) bool { @@ -84,6 +87,9 @@ func (n *NetworkUpgrades) checkNetworkUpgradesCompatible(newcfg *NetworkUpgrades if isForkTimestampIncompatible(n.EtnaTimestamp, newcfg.EtnaTimestamp, time) { return newTimestampCompatError("Etna fork block timestamp", n.EtnaTimestamp, newcfg.EtnaTimestamp) } + if isForkTimestampIncompatible(n.FUpgradeTimestamp, newcfg.FUpgradeTimestamp, time) { + return newTimestampCompatError("F-Upgrade fork block timestamp", n.FUpgradeTimestamp, newcfg.FUpgradeTimestamp) + } return nil } @@ -102,6 +108,7 @@ func (n *NetworkUpgrades) forkOrder() []fork { {name: "cortinaBlockTimestamp", timestamp: n.CortinaBlockTimestamp}, {name: "durangoBlockTimestamp", timestamp: n.DurangoBlockTimestamp}, {name: "etnaTimestamp", timestamp: n.EtnaTimestamp}, + {name: "fUpgradeTimestamp", timestamp: n.FUpgradeTimestamp}, } } @@ -177,6 +184,12 @@ func (n NetworkUpgrades) IsEtna(time uint64) bool { return isTimestampForked(n.EtnaTimestamp, time) } +// IsFUpgrade returns whether [time] represents a block +// with a timestamp after the F upgrade time. +func (n *NetworkUpgrades) IsFUpgrade(time uint64) bool { + return isTimestampForked(n.FUpgradeTimestamp, time) +} + func (n NetworkUpgrades) Description() string { var banner string banner += fmt.Sprintf(" - Apricot Phase 1 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.3.0)\n", ptrToString(n.ApricotPhase1BlockTimestamp)) @@ -190,7 +203,8 @@ func (n NetworkUpgrades) Description() string { banner += fmt.Sprintf(" - Banff Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0)\n", ptrToString(n.BanffBlockTimestamp)) banner += fmt.Sprintf(" - Cortina Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0)\n", ptrToString(n.CortinaBlockTimestamp)) banner += fmt.Sprintf(" - Durango Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0)\n", ptrToString(n.DurangoBlockTimestamp)) - banner += fmt.Sprintf(" - Etna Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.12.0)\n", ptrToString(n.EtnaTimestamp)) + banner += fmt.Sprintf(" - Etna Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.12.0)\n", ptrToString(n.EtnaTimestamp)) + banner += fmt.Sprintf(" - F-Upgrade Timestamp: @%-10v (Unscheduled)\n", ptrToString(n.FUpgradeTimestamp)) return banner } @@ -208,6 +222,7 @@ func GetNetworkUpgrades(agoUpgrade upgrade.Config) NetworkUpgrades { CortinaBlockTimestamp: utils.TimeToNewUint64(agoUpgrade.CortinaTime), DurangoBlockTimestamp: utils.TimeToNewUint64(agoUpgrade.DurangoTime), EtnaTimestamp: utils.TimeToNewUint64(agoUpgrade.EtnaTime), + FUpgradeTimestamp: utils.TimeToNewUint64(agoUpgrade.FUpgradeTime), } } @@ -218,6 +233,7 @@ type AvalancheRules struct { IsCortina bool IsDurango bool IsEtna bool + IsFUpgrade bool } func (n *NetworkUpgrades) GetAvalancheRules(timestamp uint64) AvalancheRules { @@ -234,5 +250,6 @@ func (n *NetworkUpgrades) GetAvalancheRules(timestamp uint64) AvalancheRules { IsCortina: n.IsCortina(timestamp), IsDurango: n.IsDurango(timestamp), IsEtna: n.IsEtna(timestamp), + IsFUpgrade: n.IsFUpgrade(timestamp), } } diff --git a/peer/peer_tracker.go b/peer/peer_tracker.go index bc06e9b267..c529dc1e32 100644 --- a/peer/peer_tracker.go +++ b/peer/peer_tracker.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/libevm/log" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) const ( diff --git a/peer/stats/stats.go b/peer/stats/stats.go index 165537bae7..5a3c2918f4 100644 --- a/peer/stats/stats.go +++ b/peer/stats/stats.go @@ -6,7 +6,7 @@ package stats import ( "time" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) // RequestHandlerStats provides the interface for metrics for app requests. diff --git a/plugin/evm/acp176/acp176.go b/plugin/evm/acp176/acp176.go new file mode 100644 index 0000000000..88b582749f --- /dev/null +++ b/plugin/evm/acp176/acp176.go @@ -0,0 +1,182 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// ACP176 implements the fee logic specified here: +// https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/176-dynamic-evm-gas-limit-and-price-discovery-updates/README.md +package acp176 + +import ( + "fmt" + "math" + "math/big" + "sort" + + "github.com/ava-labs/avalanchego/vms/components/gas" + "github.com/holiman/uint256" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +const ( + MinTargetPerSecond = 1_000_000 // P + TargetConversion = MaxTargetChangeRate * MaxTargetExcessDiff // D + MaxTargetExcessDiff = 1 << 15 // Q + MinGasPrice = 1 // M + + TimeToFillCapacity = 10 // in seconds + TargetToMax = 2 // multiplier to convert from target per second to max per second + TargetToPriceUpdateConversion = 43 // 43s ~= 30s * ln(2) which makes the price double at most every ~30 seconds + MaxTargetChangeRate = 1024 // Controls the rate that the target can change per block. + + targetToMaxCapacity = TargetToMax * TimeToFillCapacity + maxTargetExcess = 1_024_950_627 // TargetConversion * ln(MaxUint64 / MinTargetPerSecond) + 1 +) + +// State represents the current state of the gas pricing and constraints. +type State struct { + Gas gas.State + TargetExcess gas.Gas // q +} + +// Target returns the target gas consumed per second, `T`. +// +// Target = MinTargetPerSecond * e^(TargetExcess / TargetConversion) +func (s *State) Target() gas.Gas { + return gas.Gas(gas.CalculatePrice( + MinTargetPerSecond, + s.TargetExcess, + TargetConversion, + )) +} + +// MaxCapacity returns the maximum possible accrued gas capacity, `C`. +func (s *State) MaxCapacity() gas.Gas { + targetPerSecond := s.Target() + maxCapacity, err := safemath.Mul(targetToMaxCapacity, targetPerSecond) + if err != nil { + maxCapacity = math.MaxUint64 + } + return maxCapacity +} + +// GasPrice returns the current required fee per gas. +// +// GasPrice = MinGasPrice * e^(Excess / (Target() * TargetToPriceUpdateConversion)) +func (s *State) GasPrice() gas.Price { + target := s.Target() + priceUpdateConversion, err := safemath.Mul(TargetToPriceUpdateConversion, target) // K + if err != nil { + priceUpdateConversion = math.MaxUint64 + } + + return gas.CalculatePrice(MinGasPrice, s.Gas.Excess, priceUpdateConversion) +} + +// AdvanceTime increases the gas capacity and decreases the gas excess based on +// the elapsed seconds. +func (s *State) AdvanceTime(seconds uint64) { + targetPerSecond := s.Target() + maxPerSecond, err := safemath.Mul(TargetToMax, targetPerSecond) // R + if err != nil { + maxPerSecond = math.MaxUint64 + } + maxCapacity, err := safemath.Mul(TimeToFillCapacity, maxPerSecond) // C + if err != nil { + maxCapacity = math.MaxUint64 + } + s.Gas = s.Gas.AdvanceTime( + maxCapacity, + maxPerSecond, + targetPerSecond, + seconds, + ) +} + +// ConsumeGas decreases the gas capacity and increases the gas excess by +// gasUsed + extraGasUsed. If the gas capacity is insufficient, an error is +// returned. +func (s *State) ConsumeGas( + gasUsed uint64, + extraGasUsed *big.Int, +) error { + newGas, err := s.Gas.ConsumeGas(gas.Gas(gasUsed)) + if err != nil { + return err + } + + if extraGasUsed == nil { + s.Gas = newGas + return nil + } + if !extraGasUsed.IsUint64() { + return fmt.Errorf("%w: extraGasUsed (%d) exceeds MaxUint64", + gas.ErrInsufficientCapacity, + extraGasUsed, + ) + } + newGas, err = newGas.ConsumeGas(gas.Gas(extraGasUsed.Uint64())) + if err != nil { + return err + } + + s.Gas = newGas + return nil +} + +// UpdateTargetExcess updates the targetExcess to be as close as possible to the +// desiredTargetExcess without exceeding the maximum targetExcess change. +func (s *State) UpdateTargetExcess(desiredTargetExcess gas.Gas) { + previousTargetPerSecond := s.Target() + s.TargetExcess = targetExcess(s.TargetExcess, desiredTargetExcess) + newTargetPerSecond := s.Target() + s.Gas.Excess = scaleExcess( + s.Gas.Excess, + newTargetPerSecond, + previousTargetPerSecond, + ) +} + +// DesiredTargetExcess calculates the optimal desiredTargetExcess given the +// desired target. +// +// This could be solved directly by calculating D * ln(desiredTarget / P) using +// floating point math. However, it introduces inaccuracies. So, we use a binary +// search to find the closest integer solution. +func DesiredTargetExcess(desiredTarget gas.Gas) gas.Gas { + return gas.Gas(sort.Search(maxTargetExcess, func(targetExcessGuess int) bool { + state := State{ + TargetExcess: gas.Gas(targetExcessGuess), + } + return state.Target() >= desiredTarget + })) +} + +// targetExcess calculates the optimal new targetExcess for a block proposer to +// include given the current and desired excess values. +func targetExcess(excess, desired gas.Gas) gas.Gas { + change := safemath.AbsDiff(excess, desired) + change = min(change, MaxTargetExcessDiff) + if excess < desired { + return excess + change + } + return excess - change +} + +// scaleExcess scales the excess during gas target modifications to keep the +// price constant. +func scaleExcess( + excess, + newTargetPerSecond, + previousTargetPerSecond gas.Gas, +) gas.Gas { + var bigExcess uint256.Int + bigExcess.SetUint64(uint64(excess)) + + var bigTarget uint256.Int + bigTarget.SetUint64(uint64(newTargetPerSecond)) + bigExcess.Mul(&bigExcess, &bigTarget) + + bigTarget.SetUint64(uint64(previousTargetPerSecond)) + bigExcess.Div(&bigExcess, &bigTarget) + return gas.Gas(bigExcess.Uint64()) +} diff --git a/plugin/evm/acp176/acp176_test.go b/plugin/evm/acp176/acp176_test.go new file mode 100644 index 0000000000..c0d513c4f7 --- /dev/null +++ b/plugin/evm/acp176/acp176_test.go @@ -0,0 +1,704 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package acp176 + +import ( + "math" + "math/big" + "testing" + + "github.com/ava-labs/avalanchego/vms/components/gas" + "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" +) + +const nAVAX = 1_000_000_000 + +var ( + readerTests = []struct { + name string + state State + skipTestDesiredTargetExcess bool + target gas.Gas + maxCapacity gas.Gas + gasPrice gas.Price + }{ + { + name: "zero", + state: State{ + Gas: gas.State{ + Excess: 0, + }, + TargetExcess: 0, + }, + target: MinTargetPerSecond, + maxCapacity: targetToMaxCapacity * MinTargetPerSecond, + gasPrice: MinGasPrice, + }, + { + name: "almost_excess_change", + state: State{ + Gas: gas.State{ + Excess: 29_805_331, // MinTargetPerSecond * ln(2) * TargetToPriceUpdateConversion + }, + TargetExcess: 33, // Largest excess that doesn't increase the target + }, + skipTestDesiredTargetExcess: true, + target: MinTargetPerSecond, + maxCapacity: targetToMaxCapacity * MinTargetPerSecond, + gasPrice: 2 * MinGasPrice, + }, + { + name: "small_excess_change", + state: State{ + Gas: gas.State{ + Excess: 29_805_362, // (MinTargetPerSecond + 1) * ln(2) * TargetToPriceUpdateConversion + }, + TargetExcess: 34, // Smallest excess that increases the target + }, + target: MinTargetPerSecond + 1, + maxCapacity: targetToMaxCapacity * (MinTargetPerSecond + 1), + gasPrice: 2 * MinGasPrice, + }, + { + name: "max_initial_excess_change", + state: State{ + Gas: gas.State{ + Excess: 47_286_485, // (MinTargetPerSecond + 977) * ln(3) * TargetToPriceUpdateConversion + }, + TargetExcess: MaxTargetExcessDiff, + }, + skipTestDesiredTargetExcess: true, + target: MinTargetPerSecond + 977, + maxCapacity: targetToMaxCapacity * (MinTargetPerSecond + 977), + gasPrice: 3 * MinGasPrice, + }, + { + name: "current_target", + state: State{ + Gas: gas.State{ + Excess: 1_336_650_647, // 1_500_000 * ln(nAVAX) * TargetToPriceUpdateConversion + }, + TargetExcess: 13_605_152, // 2^25 * ln(1.5) + }, + target: 1_500_000, + maxCapacity: targetToMaxCapacity * 1_500_000, + gasPrice: (nAVAX + 7) * MinGasPrice, // +7 due to approximation + }, + { + name: "3m_target", + state: State{ + Gas: gas.State{ + Excess: 3_267_368_247, // 3_000_000 * ln(100*nAVAX) * TargetToPriceUpdateConversion + }, + TargetExcess: 36_863_312, // 2^25 * ln(3) + }, + target: 3_000_000, + maxCapacity: targetToMaxCapacity * 3_000_000, + gasPrice: (100*nAVAX + 31) * MinGasPrice, // +31 due to approximation + }, + { + name: "6m_target", + state: State{ + Gas: gas.State{ + Excess: 6_534_736_494, // 6_000_000 * ln(100*nAVAX) * TargetToPriceUpdateConversion + }, + TargetExcess: 60_121_472, // 2^25 * ln(6) + }, + target: 6_000_000, + maxCapacity: targetToMaxCapacity * 6_000_000, + gasPrice: (100*nAVAX + 31) * MinGasPrice, // +31 due to approximation + }, + { + name: "10m_target", + state: State{ + Gas: gas.State{ + Excess: 10_891_227_490, // 10_000_000 * ln(100*nAVAX) * TargetToPriceUpdateConversion + }, + TargetExcess: 77_261_935, // 2^25 * ln(10) + }, + target: 10_000_000, + maxCapacity: targetToMaxCapacity * 10_000_000, + gasPrice: (100*nAVAX + 31) * MinGasPrice, // +31 due to approximation + }, + { + name: "100m_target", + state: State{ + Gas: gas.State{ + Excess: 108_912_274_899, // 100_000_000 * ln(100*nAVAX) * TargetToPriceUpdateConversion + }, + TargetExcess: 154_523_870, // 2^25 * ln(100) + }, + target: 100_000_000, + maxCapacity: targetToMaxCapacity * 100_000_000, + gasPrice: (100*nAVAX + 8) * MinGasPrice, // +8 due to approximation + }, + { + name: "low_1b_target", + state: State{ + Gas: gas.State{ + Excess: 1_089_122_722_848, // (1_000_000_000 - 24) * ln(100*nAVAX) * TargetToPriceUpdateConversion + }, + TargetExcess: 231_785_804, // 2^25 * ln(1000) + }, + target: 1_000_000_000 - 24, + maxCapacity: targetToMaxCapacity * (1_000_000_000 - 24), + gasPrice: (100*nAVAX + 1) * MinGasPrice, // +1 due to approximation + }, + { + name: "high_1b_target", + state: State{ + Gas: gas.State{ + Excess: 1_089_122_755_521, // (1_000_000_000 + 6) * ln(100*nAVAX) * TargetToPriceUpdateConversion + }, + TargetExcess: 231_785_805, // 2^25 * ln(1000) + 1 + }, + target: 1_000_000_000 + 6, + maxCapacity: targetToMaxCapacity * (1_000_000_000 + 6), + gasPrice: (100 * nAVAX) * MinGasPrice, + }, + { + name: "largest_max_capacity", + state: State{ + Gas: gas.State{ + Excess: math.MaxUint64, + }, + TargetExcess: 924430531, // 2^25 * ln(MaxUint64 / MinMaxCapacity) + }, + target: 922_337_190_378_117_171, + maxCapacity: 18_446_743_807_562_343_420, + gasPrice: 2 * MinGasPrice, + }, + { + name: "largest_int64_target", + state: State{ + Gas: gas.State{ + Excess: math.MaxUint64, + }, + TargetExcess: 1_001_692_466, // 2^25 * ln(MaxInt64 / MinTargetPerSecond) + }, + target: 9_223_371_923_824_614_091, + maxCapacity: math.MaxUint64, + gasPrice: 2 * MinGasPrice, + }, + { + name: "second_largest_uint64_target", + state: State{ + Gas: gas.State{ + Excess: math.MaxUint64, + }, + TargetExcess: 1_024_950_626, // 2^25 * ln(MaxUint64 / MinTargetPerSecond) + }, + target: 18_446_743_882_783_898_031, + maxCapacity: math.MaxUint64, + gasPrice: 2 * MinGasPrice, + }, + { + name: "largest_uint64_target", + state: State{ + Gas: gas.State{ + Excess: math.MaxUint64, + }, + TargetExcess: 1_024_950_627, // 2^25 * ln(MaxUint64 / MinTargetPerSecond) + 1 + }, + target: math.MaxUint64, + maxCapacity: math.MaxUint64, + gasPrice: 2 * MinGasPrice, + }, + { + name: "largest_excess", + state: State{ + Gas: gas.State{ + Excess: math.MaxUint64, + }, + TargetExcess: math.MaxUint64, + }, + skipTestDesiredTargetExcess: true, + target: math.MaxUint64, + maxCapacity: math.MaxUint64, + gasPrice: 2 * MinGasPrice, + }, + } + advanceTimeTests = []struct { + name string + initial State + seconds uint64 + expected State + }{ + { + name: "0_seconds", + initial: State{ + Gas: gas.State{ + Capacity: 0, + Excess: 2_000_000, + }, + // Set target to 1.5M per second + TargetExcess: 13_605_152, // 2^25 * ln(1.5) + }, + seconds: 0, + expected: State{ + Gas: gas.State{ + Capacity: 0, + Excess: 2_000_000, + }, + TargetExcess: 13_605_152, // unmodified + }, + }, + { + name: "1_seconds", + initial: State{ + Gas: gas.State{ + Capacity: 0, + Excess: 2_000_000, + }, + // Set target to 1.5M per second + TargetExcess: 13_605_152, // 2^25 * ln(1.5) + }, + seconds: 1, + expected: State{ + Gas: gas.State{ + Capacity: 3_000_000, + Excess: 500_000, + }, + TargetExcess: 13_605_152, // unmodified + }, + }, + { + name: "5_seconds", + initial: State{ + Gas: gas.State{ + Capacity: 0, + Excess: 15_000_000, + }, + // Set target to 1.5M per second + TargetExcess: 13_605_152, // 2^25 * ln(1.5) + }, + seconds: 5, + expected: State{ + Gas: gas.State{ + Capacity: 15_000_000, + Excess: 7_500_000, + }, + TargetExcess: 13_605_152, // unmodified + }, + }, + { + name: "0_seconds_over_capacity", + initial: State{ + Gas: gas.State{ + Capacity: 31_000_000, // Could happen if the targetExcess was modified + Excess: 2_000_000, + }, + // Set capacity to 30M + TargetExcess: 13_605_152, // 2^25 * ln(1.5) + }, + seconds: 0, + expected: State{ + Gas: gas.State{ + Capacity: 30_000_000, // capped at 30M + Excess: 2_000_000, // unmodified + }, + TargetExcess: 13_605_152, // unmodified + }, + }, + { + name: "hit_max_capacity_boundary", + initial: State{ + Gas: gas.State{ + Capacity: 0, // Could happen if the targetExcess was modified + Excess: math.MaxUint64, + }, + // Set MaxCapacity to MaxUint64 + TargetExcess: 924_430_532, // 2^25 * ln(MaxUint64 / MinMaxCapacity) + }, + seconds: 1, + expected: State{ + Gas: gas.State{ + Capacity: 1_844_674_435_731_815_790, // greater than MaxUint64/10 + Excess: math.MaxUint64 - 922_337_217_865_907_895, // MaxUint64 - capacity / TargetToMax + }, + TargetExcess: 924_430_532, // unmodified + }, + }, + { + name: "hit_max_rate_boundary", + initial: State{ + Gas: gas.State{ + Capacity: 0, // Could happen if the targetExcess was modified + Excess: math.MaxUint64, + }, + // Set MaxPerSecond to MaxUint64 + TargetExcess: 1_001_692_467, // 2^25 * ln(MaxUint64 / MinMaxPerSecond) + }, + seconds: 1, + expected: State{ + Gas: gas.State{ + Capacity: math.MaxUint64, // greater than MaxUint64/10 + Excess: 9_223_371_875_007_030_354, // less than MaxUint64/2 + }, + TargetExcess: 1_001_692_467, // unmodified + }, + }, + } + consumeGasTests = []struct { + name string + initial State + gasUsed uint64 + extraGasUsed *big.Int + expectedErr error + expected State + }{ + { + name: "no_gas_used", + initial: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + gasUsed: 0, + extraGasUsed: nil, + expected: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + }, + { + name: "some_gas_used", + initial: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + gasUsed: 100_000, + extraGasUsed: nil, + expected: State{ + Gas: gas.State{ + Capacity: 900_000, + Excess: 2_100_000, + }, + }, + }, + { + name: "some_extra_gas_used", + initial: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + gasUsed: 0, + extraGasUsed: big.NewInt(100_000), + expected: State{ + Gas: gas.State{ + Capacity: 900_000, + Excess: 2_100_000, + }, + }, + }, + { + name: "both_gas_used", + initial: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + gasUsed: 10_000, + extraGasUsed: big.NewInt(100_000), + expected: State{ + Gas: gas.State{ + Capacity: 890_000, + Excess: 2_110_000, + }, + }, + }, + { + name: "gas_used_capacity_exceeded", + initial: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + gasUsed: 1_000_001, + extraGasUsed: nil, + expectedErr: gas.ErrInsufficientCapacity, + expected: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + }, + { + name: "massive_extra_gas_used", + initial: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + gasUsed: 0, + extraGasUsed: new(big.Int).Lsh(common.Big1, 64), + expectedErr: gas.ErrInsufficientCapacity, + expected: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + }, + { + name: "extra_gas_used_capacity_exceeded", + initial: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + gasUsed: 0, + extraGasUsed: big.NewInt(1_000_001), + expectedErr: gas.ErrInsufficientCapacity, + expected: State{ + Gas: gas.State{ + Capacity: 1_000_000, + Excess: 2_000_000, + }, + }, + }, + } + updateTargetExcessTests = []struct { + name string + initial State + desiredTargetExcess gas.Gas + expected State + }{ + { + name: "no_change", + initial: State{ + Gas: gas.State{ + Excess: 2_000_000, + }, + TargetExcess: 0, + }, + desiredTargetExcess: 0, + expected: State{ + Gas: gas.State{ + Excess: 2_000_000, + }, + TargetExcess: 0, + }, + }, + { + name: "max_increase", + initial: State{ + Gas: gas.State{ + Excess: 2_000_000, + }, + TargetExcess: 0, + }, + desiredTargetExcess: MaxTargetExcessDiff + 1, + expected: State{ + Gas: gas.State{ + Excess: 2_001_954, // 2M * NewTarget / OldTarget + }, + TargetExcess: MaxTargetExcessDiff, // capped + }, + }, + { + name: "inverse_max_increase", + initial: State{ + Gas: gas.State{ + Excess: 2_001_954, + }, + TargetExcess: MaxTargetExcessDiff, + }, + desiredTargetExcess: 0, + expected: State{ + Gas: gas.State{ + Excess: 2_000_000, // inverse of max_increase + }, + TargetExcess: 0, + }, + }, + { + name: "max_decrease", + initial: State{ + Gas: gas.State{ + Excess: 2_000_000_000, + }, + TargetExcess: 2 * MaxTargetExcessDiff, + }, + desiredTargetExcess: 0, + expected: State{ + Gas: gas.State{ + Excess: 1_998_047_816, // 2M * NewTarget / OldTarget + }, + TargetExcess: MaxTargetExcessDiff, + }, + }, + { + name: "inverse_max_decrease", + initial: State{ + Gas: gas.State{ + Excess: 1_998_047_816, + }, + TargetExcess: MaxTargetExcessDiff, + }, + desiredTargetExcess: 2 * MaxTargetExcessDiff, + expected: State{ + Gas: gas.State{ + Excess: 1_999_999_999, // inverse of max_decrease -1 due to rounding error + }, + TargetExcess: 2 * MaxTargetExcessDiff, + }, + }, + } +) + +func TestTarget(t *testing.T) { + for _, test := range readerTests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.target, test.state.Target()) + }) + } +} + +func BenchmarkTarget(b *testing.B) { + for _, test := range readerTests { + b.Run(test.name, func(b *testing.B) { + for range b.N { + test.state.Target() + } + }) + } +} + +func TestMaxCapacity(t *testing.T) { + for _, test := range readerTests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.maxCapacity, test.state.MaxCapacity()) + }) + } +} + +func BenchmarkMaxCapacity(b *testing.B) { + for _, test := range readerTests { + b.Run(test.name, func(b *testing.B) { + for range b.N { + test.state.MaxCapacity() + } + }) + } +} + +func TestGasPrice(t *testing.T) { + for _, test := range readerTests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.gasPrice, test.state.GasPrice()) + }) + } +} + +func BenchmarkGasPrice(b *testing.B) { + for _, test := range readerTests { + b.Run(test.name, func(b *testing.B) { + for range b.N { + test.state.GasPrice() + } + }) + } +} + +func TestAdvanceTime(t *testing.T) { + for _, test := range advanceTimeTests { + t.Run(test.name, func(t *testing.T) { + initial := test.initial + initial.AdvanceTime(test.seconds) + require.Equal(t, test.expected, initial) + }) + } +} + +func BenchmarkAdvanceTime(b *testing.B) { + for _, test := range advanceTimeTests { + b.Run(test.name, func(b *testing.B) { + for range b.N { + initial := test.initial + initial.AdvanceTime(test.seconds) + } + }) + } +} + +func TestConsumeGas(t *testing.T) { + for _, test := range consumeGasTests { + t.Run(test.name, func(t *testing.T) { + initial := test.initial + err := initial.ConsumeGas(test.gasUsed, test.extraGasUsed) + require.ErrorIs(t, err, test.expectedErr) + require.Equal(t, test.expected, initial) + }) + } +} + +func BenchmarkConsumeGas(b *testing.B) { + for _, test := range consumeGasTests { + b.Run(test.name, func(b *testing.B) { + for range b.N { + initial := test.initial + _ = initial.ConsumeGas(test.gasUsed, test.extraGasUsed) + } + }) + } +} + +func TestUpdateTargetExcess(t *testing.T) { + for _, test := range updateTargetExcessTests { + t.Run(test.name, func(t *testing.T) { + initial := test.initial + initial.UpdateTargetExcess(test.desiredTargetExcess) + require.Equal(t, test.expected, initial) + }) + } +} + +func BenchmarkUpdateTargetExcess(b *testing.B) { + for _, test := range updateTargetExcessTests { + b.Run(test.name, func(b *testing.B) { + for range b.N { + initial := test.initial + initial.UpdateTargetExcess(test.desiredTargetExcess) + } + }) + } +} + +func TestDesiredTargetExcess(t *testing.T) { + for _, test := range readerTests { + if test.skipTestDesiredTargetExcess { + continue + } + t.Run(test.name, func(t *testing.T) { + require.Equal(t, test.state.TargetExcess, DesiredTargetExcess(test.target)) + }) + } +} + +func BenchmarkDesiredTargetExcess(b *testing.B) { + for _, test := range readerTests { + if test.skipTestDesiredTargetExcess { + continue + } + b.Run(test.name, func(b *testing.B) { + for range b.N { + DesiredTargetExcess(test.target) + } + }) + } +} diff --git a/plugin/evm/ap4/cost.go b/plugin/evm/ap4/cost.go new file mode 100644 index 0000000000..46296196fc --- /dev/null +++ b/plugin/evm/ap4/cost.go @@ -0,0 +1,88 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// AP4 implements the block gas cost logic activated by the Apricot Phase 4 +// upgrade. +package ap4 + +import ( + "math" + + safemath "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/coreth/params" +) + +const ( + // MinBlockGasCost is the minimum block gas cost. + MinBlockGasCost = 0 + // MaxBlockGasCost is the maximum block gas cost. If the block gas cost + // would exceed this value, the block gas cost is set to this value. + MaxBlockGasCost = 1_000_000 + // TargetBlockRate is the target amount of time in seconds between blocks. + // If blocks are produced faster than this rate, the block gas cost is + // increased. If blocks are produced slower than this rate, the block gas + // cost is decreased. + TargetBlockRate = 2 + + // BlockGasCostStep is the rate at which the block gas cost changes per + // second. + // + // This value was modified by the Apricot Phase 5 upgrade. + BlockGasCostStep = 50_000 + + // MinBaseFee is the minimum base fee that is allowed after Apricot Phase 3 + // upgrade. + // + // This value modifies the previously used `ap3.MinBaseFee`. + // + // This value was modified in Etna. + MinBaseFee = 25 * params.GWei + + // MaxBaseFee is the maximum base fee that is allowed after Apricot Phase 3 + // upgrade. + // + // This value modifies the previously used `ap3.MaxBaseFee`. + MaxBaseFee = 1_000 * params.GWei +) + +// BlockGasCost calculates the required block gas cost. +// +// cost = parentCost + step * ([TargetBlockRate] - timeElapsed) +// +// The returned cost is clamped to [[MinBlockGasCost], [MaxBlockGasCost]]. +func BlockGasCost( + parentCost uint64, + step uint64, + timeElapsed uint64, +) uint64 { + deviation := safemath.AbsDiff(TargetBlockRate, timeElapsed) + change, err := safemath.Mul(step, deviation) + if err != nil { + change = math.MaxUint64 + } + + var ( + op = safemath.Add[uint64] + defaultCost uint64 = MaxBlockGasCost + ) + if timeElapsed > TargetBlockRate { + op = safemath.Sub + defaultCost = MinBlockGasCost + } + + cost, err := op(parentCost, change) + if err != nil { + cost = defaultCost + } + + switch { + case cost < MinBlockGasCost: + // This is technically dead code because [MinBlockGasCost] is 0, but it + // makes the code more clear. + return MinBlockGasCost + case cost > MaxBlockGasCost: + return MaxBlockGasCost + default: + return cost + } +} diff --git a/plugin/evm/ap4/cost_test.go b/plugin/evm/ap4/cost_test.go new file mode 100644 index 0000000000..87e8edece1 --- /dev/null +++ b/plugin/evm/ap4/cost_test.go @@ -0,0 +1,77 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ap4 + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBlockGasCost(t *testing.T) { + tests := []struct { + name string + parentCost uint64 + step uint64 + timeElapsed uint64 + want uint64 + }{ + { + name: "timeElapsed_under_target", + parentCost: 500, + step: 100, + timeElapsed: 0, + want: 500 + 100*TargetBlockRate, + }, + { + name: "timeElapsed_at_target", + parentCost: 3, + step: 100, + timeElapsed: TargetBlockRate, + want: 3, + }, + { + name: "timeElapsed_over_target", + parentCost: 500, + step: 100, + timeElapsed: 2 * TargetBlockRate, + want: 500 - 100*TargetBlockRate, + }, + { + name: "change_overflow", + parentCost: 500, + step: math.MaxUint64, + timeElapsed: 0, + want: MaxBlockGasCost, + }, + { + name: "cost_overflow", + parentCost: math.MaxUint64, + step: 1, + timeElapsed: 0, + want: MaxBlockGasCost, + }, + { + name: "clamp_to_max", + parentCost: MaxBlockGasCost, + step: 100, + timeElapsed: TargetBlockRate - 1, + want: MaxBlockGasCost, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal( + t, + test.want, + BlockGasCost( + test.parentCost, + test.step, + test.timeElapsed, + ), + ) + }) + } +} diff --git a/plugin/evm/api.go b/plugin/evm/api.go index d9a36a787f..3ed9ed52ee 100644 --- a/plugin/evm/api.go +++ b/plugin/evm/api.go @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/common/hexutil" "github.com/ava-labs/libevm/log" ) @@ -33,10 +32,9 @@ const ( ) var ( - errNoAddresses = errors.New("no addresses provided") - errNoSourceChain = errors.New("no source chain provided") - errNilTxID = errors.New("nil transaction ID") - errMissingPrivateKey = errors.New("argument 'privateKey' not given") + errNoAddresses = errors.New("no addresses provided") + errNoSourceChain = errors.New("no source chain provided") + errNilTxID = errors.New("nil transaction ID") initialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) ) @@ -71,17 +69,6 @@ func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { // AvaxAPI offers Avalanche network related API methods type AvaxAPI struct{ vm *VM } -// parseAssetID parses an assetID string into an ID -func (service *AvaxAPI) parseAssetID(assetID string) (ids.ID, error) { - if assetID == "" { - return ids.ID{}, fmt.Errorf("assetID is required") - } else if assetID == "AVAX" { - return service.vm.ctx.AVAXAssetID, nil - } else { - return ids.FromString(assetID) - } -} - type VersionReply struct { Version string `json:"version"` } @@ -92,198 +79,6 @@ func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionRepl return nil } -// ExportKey returns a private key from the provided user -func (service *AvaxAPI) ExportKey(r *http.Request, args *client.ExportKeyArgs, reply *client.ExportKeyReply) error { - log.Info("EVM: ExportKey called") - - address, err := client.ParseEthAddress(args.Address) - if err != nil { - return fmt.Errorf("couldn't parse %s to address: %s", args.Address, err) - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("problem retrieving user '%s': %w", args.Username, err) - } - defer db.Close() - - user := user{db: db} - reply.PrivateKey, err = user.getKey(address) - if err != nil { - return fmt.Errorf("problem retrieving private key: %w", err) - } - reply.PrivateKeyHex = hexutil.Encode(reply.PrivateKey.Bytes()) - return nil -} - -// ImportKey adds a private key to the provided user -func (service *AvaxAPI) ImportKey(r *http.Request, args *client.ImportKeyArgs, reply *api.JSONAddress) error { - log.Info("EVM: ImportKey called", "username", args.Username) - - if args.PrivateKey == nil { - return errMissingPrivateKey - } - - reply.Address = args.PrivateKey.EthAddress().Hex() - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("problem retrieving data: %w", err) - } - defer db.Close() - - user := user{db: db} - if err := user.putAddress(args.PrivateKey); err != nil { - return fmt.Errorf("problem saving key %w", err) - } - return nil -} - -// ImportAVAX is a deprecated name for Import. -func (service *AvaxAPI) ImportAVAX(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { - return service.Import(nil, args, response) -} - -// Import issues a transaction to import AVAX from the X-chain. The AVAX -// must have already been exported from the X-Chain. -func (service *AvaxAPI) Import(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { - log.Info("EVM: ImportAVAX called") - - chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) - if err != nil { - return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - // Get the user's info - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("couldn't get user '%s': %w", args.Username, err) - } - defer db.Close() - - user := user{db: db} - privKeys, err := user.getKeys() - if err != nil { // Get keys - return fmt.Errorf("couldn't get keys controlled by the user: %w", err) - } - - var baseFee *big.Int - if args.BaseFee == nil { - // Get the base fee to use - baseFee, err = service.vm.estimateBaseFee(context.Background()) - if err != nil { - return err - } - } else { - baseFee = args.BaseFee.ToInt() - } - - tx, err := service.vm.newImportTx(chainID, args.To, baseFee, privKeys) - if err != nil { - return err - } - - response.TxID = tx.ID() - if err := service.vm.mempool.AddLocalTx(tx); err != nil { - return err - } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) - return nil -} - -// ExportAVAX exports AVAX from the C-Chain to the X-Chain -// It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) ExportAVAX(_ *http.Request, args *client.ExportAVAXArgs, response *api.JSONTxID) error { - return service.Export(nil, &client.ExportArgs{ - ExportAVAXArgs: *args, - AssetID: service.vm.ctx.AVAXAssetID.String(), - }, response) -} - -// Export exports an asset from the C-Chain to the X-Chain -// It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) Export(_ *http.Request, args *client.ExportArgs, response *api.JSONTxID) error { - log.Info("EVM: Export called") - - assetID, err := service.parseAssetID(args.AssetID) - if err != nil { - return err - } - - if args.Amount == 0 { - return errors.New("argument 'amount' must be > 0") - } - - // Get the chainID and parse the to address - chainID, to, err := service.vm.ParseAddress(args.To) - if err != nil { - chainID, err = service.vm.ctx.BCLookup.Lookup(args.TargetChain) - if err != nil { - return err - } - to, err = ids.ShortFromString(args.To) - if err != nil { - return err - } - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - // Get this user's data - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("problem retrieving user '%s': %w", args.Username, err) - } - defer db.Close() - - user := user{db: db} - privKeys, err := user.getKeys() - if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) - } - - var baseFee *big.Int - if args.BaseFee == nil { - // Get the base fee to use - baseFee, err = service.vm.estimateBaseFee(context.Background()) - if err != nil { - return err - } - } else { - baseFee = args.BaseFee.ToInt() - } - - // Create the transaction - tx, err := service.vm.newExportTx( - assetID, // AssetID - uint64(args.Amount), // Amount - chainID, // ID of the chain to send the funds to - to, // Address - baseFee, - privKeys, // Private keys - ) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - response.TxID = tx.ID() - if err := service.vm.mempool.AddLocalTx(tx); err != nil { - return err - } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) - return nil -} - // GetUTXOs gets all utxos for passed in addresses func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply *api.GetUTXOsReply) error { log.Info("EVM: GetUTXOs called", "Addresses", args.Addresses) diff --git a/plugin/evm/atomic/mempool.go b/plugin/evm/atomic/mempool.go index 30b5f511c6..4754b070c9 100644 --- a/plugin/evm/atomic/mempool.go +++ b/plugin/evm/atomic/mempool.go @@ -14,9 +14,9 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" ) const ( diff --git a/plugin/evm/atomic_backend.go b/plugin/evm/atomic_backend.go index ecbfb6c2b6..35388b0689 100644 --- a/plugin/evm/atomic_backend.go +++ b/plugin/evm/atomic_backend.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/plugin/evm/atomic" - syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/log" ) @@ -57,10 +56,6 @@ type AtomicBackend interface { // will not have been executed on shared memory. MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error - // Syncer creates and returns a new Syncer object that can be used to sync the - // state of the atomic trie from peers - Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (Syncer, error) - // SetLastAccepted is used after state-sync to reset the last accepted block. SetLastAccepted(lastAcceptedHash common.Hash) @@ -356,12 +351,6 @@ func (a *atomicBackend) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight return database.PutUInt64(a.metadataDB, appliedSharedMemoryCursorKey, previousLastAcceptedHeight+1) } -// Syncer creates and returns a new Syncer object that can be used to sync the -// state of the atomic trie from peers -func (a *atomicBackend) Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (Syncer, error) { - return newAtomicSyncer(client, a, targetRoot, targetHeight, requestSize) -} - func (a *atomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (AtomicState, error) { if state, ok := a.verifiedRoots[blockHash]; ok { return state, nil diff --git a/plugin/evm/atomic_syncer.go b/plugin/evm/atomic_syncer.go index 2da188d47a..7c25da5539 100644 --- a/plugin/evm/atomic_syncer.go +++ b/plugin/evm/atomic_syncer.go @@ -50,8 +50,7 @@ func addZeroes(height uint64) []byte { return packer.Bytes } -func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { - atomicTrie := atomicBackend.AtomicTrie() +func newAtomicSyncer(client syncclient.LeafClient, vdb *versiondb.Database, atomicTrie AtomicTrie, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { lastCommittedRoot, lastCommit := atomicTrie.LastCommitted() trie, err := atomicTrie.OpenTrie(lastCommittedRoot) if err != nil { @@ -59,7 +58,7 @@ func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, } atomicSyncer := &atomicSyncer{ - db: atomicBackend.db, + db: vdb, atomicTrie: atomicTrie, trie: trie, targetRoot: targetRoot, diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic_syncer_test.go index a3562c91ab..3da14280a1 100644 --- a/plugin/evm/atomic_syncer_test.go +++ b/plugin/evm/atomic_syncer_test.go @@ -65,7 +65,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight // next trie. for i, checkpoint := range checkpoints { // Create syncer targeting the current [syncTrie]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) + syncer, err := newAtomicSyncer(mockClient, clientDB, atomicBackend.AtomicTrie(), targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } @@ -92,7 +92,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight } // Create syncer targeting the current [targetRoot]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) + syncer, err := newAtomicSyncer(mockClient, clientDB, atomicBackend.AtomicTrie(), targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go index 7966c79b72..ea961191ee 100644 --- a/plugin/evm/atomic_trie.go +++ b/plugin/evm/atomic_trie.go @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/atomic" @@ -33,8 +32,7 @@ const ( atomicKeyLength = wrappers.LongLen + common.HashLength sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates - atomicTrieTipBufferSize = 1 // No need to support a buffer of previously accepted tries for the atomic trie - atomicTrieMemoryCap = 64 * units.MiB + atomicTrieMemoryCap = 64 * units.MiB ) var ( @@ -126,7 +124,6 @@ type atomicTrie struct { lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. codec codec.Manager memoryCap common.StorageSize - tipBuffer *core.BoundedBuffer[common.Hash] } // newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. @@ -169,7 +166,6 @@ func newAtomicTrie( codec: codec, lastCommittedRoot: root, lastCommittedHeight: height, - tipBuffer: core.NewBoundedBuffer(atomicTrieTipBufferSize, trieDB.Dereference), memoryCap: atomicTrieMemoryCap, // Initialize lastAcceptedRoot to the last committed root. // If there were further blocks processed (ahead of the commit interval), @@ -354,12 +350,6 @@ func (a *atomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { hasCommitted = true } - // Attempt to dereference roots at least [tipBufferSize] old - // - // Note: It is safe to dereference roots that have been committed to disk - // (they are no-ops). - a.tipBuffer.Insert(root) - // Commit this root if we have reached the [commitInterval]. if height%a.commitInterval == 0 { if err := a.commit(height, root); err != nil { @@ -368,6 +358,13 @@ func (a *atomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { hasCommitted = true } + // The following dereferences, if any, the previously inserted root. + // This one can be dereferenced whether it has been: + // - committed, in which case the dereference is a no-op + // - not committted, in which case the current root we are inserting contains + // references to all the relevant data from the previous root, so the previous + // root can be dereferenced. + a.trieDB.Dereference(a.lastAcceptedRoot) a.lastAcceptedRoot = root return hasCommitted, nil } diff --git a/plugin/evm/atomic_trie_test.go b/plugin/evm/atomic_trie_test.go index 071cbfd5c8..7387374b9b 100644 --- a/plugin/evm/atomic_trie_test.go +++ b/plugin/evm/atomic_trie_test.go @@ -5,23 +5,29 @@ package evm import ( "encoding/binary" + "encoding/hex" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/trie/trienode" ) const testCommitInterval = 100 @@ -591,6 +597,170 @@ func TestApplyToSharedMemory(t *testing.T) { } } +func TestAtomicTrie_AcceptTrie(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + lastAcceptedRoot common.Hash + lastCommittedRoot common.Hash + lastCommittedHeight uint64 + commitInterval uint64 + height uint64 + root common.Hash + wantHasCommitted bool + wantLastCommittedHeight uint64 + wantLastCommittedRoot common.Hash + wantLastAcceptedRoot common.Hash + wantTipBufferRoot common.Hash + wantMetadataDBKVs map[string]string // hex to hex + }{ + "no_committing": { + lastAcceptedRoot: types.EmptyRootHash, + lastCommittedRoot: common.Hash{2}, + lastCommittedHeight: 100, + commitInterval: 10, + height: 105, + root: common.Hash{3}, + wantLastCommittedHeight: 100, + wantLastCommittedRoot: common.Hash{2}, + wantLastAcceptedRoot: common.Hash{3}, + wantTipBufferRoot: common.Hash{3}, + wantMetadataDBKVs: map[string]string{ + "0000000000000064": hex.EncodeToString(common.Hash{2}.Bytes()), // height 100 + hex.EncodeToString(lastCommittedKey): "0000000000000064", // height 100 + }, + }, + "no_committing_with_previous_root": { + lastAcceptedRoot: common.Hash{1}, + lastCommittedRoot: common.Hash{2}, + lastCommittedHeight: 100, + commitInterval: 10, + height: 105, + root: common.Hash{3}, + wantLastCommittedHeight: 100, + wantLastCommittedRoot: common.Hash{2}, + wantLastAcceptedRoot: common.Hash{3}, + wantTipBufferRoot: common.Hash{3}, + wantMetadataDBKVs: map[string]string{ + "0000000000000064": hex.EncodeToString(common.Hash{2}.Bytes()), // height 100 + hex.EncodeToString(lastCommittedKey): "0000000000000064", // height 100 + }, + }, + "commit_all_up_to_height_without_height": { + lastAcceptedRoot: types.EmptyRootHash, + lastCommittedRoot: common.Hash{2}, + lastCommittedHeight: 60, + commitInterval: 10, + height: 105, + root: common.Hash{3}, + wantHasCommitted: true, + wantLastCommittedHeight: 100, + wantLastCommittedRoot: types.EmptyRootHash, + wantLastAcceptedRoot: common.Hash{3}, + wantTipBufferRoot: common.Hash{3}, + wantMetadataDBKVs: map[string]string{ + "000000000000003c": hex.EncodeToString(common.Hash{2}.Bytes()), // height 60 + "0000000000000046": hex.EncodeToString(types.EmptyRootHash[:]), // height 70 + "0000000000000050": hex.EncodeToString(types.EmptyRootHash[:]), // height 80 + "000000000000005a": hex.EncodeToString(types.EmptyRootHash[:]), // height 90 + "0000000000000064": hex.EncodeToString(types.EmptyRootHash[:]), // height 100 + hex.EncodeToString(lastCommittedKey): "0000000000000064", // height 100 + }, + }, + "commit_root": { + lastAcceptedRoot: types.EmptyRootHash, + lastCommittedRoot: common.Hash{2}, + lastCommittedHeight: 100, + commitInterval: 10, + height: 110, + root: common.Hash{3}, + wantHasCommitted: true, + wantLastCommittedHeight: 110, + wantLastCommittedRoot: common.Hash{3}, + wantLastAcceptedRoot: common.Hash{3}, + wantTipBufferRoot: common.Hash{3}, + wantMetadataDBKVs: map[string]string{ + "0000000000000064": hex.EncodeToString(common.Hash{2}.Bytes()), // height 100 + "000000000000006e": hex.EncodeToString(common.Hash{3}.Bytes()), // height 110 + hex.EncodeToString(lastCommittedKey): "000000000000006e", // height 110 + }, + }, + "commit_root_with_previous_root": { + lastAcceptedRoot: common.Hash{1}, + lastCommittedRoot: common.Hash{2}, + lastCommittedHeight: 100, + commitInterval: 10, + height: 110, + root: common.Hash{3}, + wantHasCommitted: true, + wantLastCommittedHeight: 110, + wantLastCommittedRoot: common.Hash{3}, + wantLastAcceptedRoot: common.Hash{3}, + wantTipBufferRoot: common.Hash{3}, + wantMetadataDBKVs: map[string]string{ + "0000000000000064": hex.EncodeToString(common.Hash{2}.Bytes()), // height 100 + "000000000000006e": hex.EncodeToString(common.Hash{3}.Bytes()), // height 110 + hex.EncodeToString(lastCommittedKey): "000000000000006e", // height 110 + }, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + versionDB := versiondb.New(memdb.New()) + atomicTrieDB := prefixdb.New(atomicTrieDBPrefix, versionDB) + metadataDB := prefixdb.New(atomicTrieMetaDBPrefix, versionDB) + const lastAcceptedHeight = 0 // no effect + atomicTrie, err := newAtomicTrie(atomicTrieDB, metadataDB, atomic.TestTxCodec, + lastAcceptedHeight, testCase.commitInterval) + require.NoError(t, err) + atomicTrie.lastAcceptedRoot = testCase.lastAcceptedRoot + if testCase.lastAcceptedRoot != types.EmptyRootHash { + // Generate trie node test blob + encoder := rlp.NewEncoderBuffer(nil) + offset := encoder.List() + encoder.WriteBytes([]byte{1}) // key + encoder.WriteBytes(make([]byte, 32)) // value + encoder.ListEnd(offset) + testBlob := encoder.ToBytes() + err := encoder.Flush() + require.NoError(t, err) + + nodeSet := trienode.NewNodeSet(testCase.lastAcceptedRoot) + nodeSet.AddNode([]byte("any"), trienode.New(testCase.lastAcceptedRoot, testBlob)) // dirty node + err = atomicTrie.InsertTrie(nodeSet, testCase.lastAcceptedRoot) + require.NoError(t, err) + + _, storageSize, _ := atomicTrie.trieDB.Size() + require.NotZero(t, storageSize, "there should be a dirty node taking up storage space") + } + atomicTrie.updateLastCommitted(testCase.lastCommittedRoot, testCase.lastCommittedHeight) + + hasCommitted, err := atomicTrie.AcceptTrie(testCase.height, testCase.root) + require.NoError(t, err) + + assert.Equal(t, testCase.wantHasCommitted, hasCommitted) + assert.Equal(t, testCase.wantLastCommittedHeight, atomicTrie.lastCommittedHeight) + assert.Equal(t, testCase.wantLastCommittedRoot, atomicTrie.lastCommittedRoot) + assert.Equal(t, testCase.wantLastAcceptedRoot, atomicTrie.lastAcceptedRoot) + + // Check dereferencing previous dirty root inserted occurred + _, storageSize, _ := atomicTrie.trieDB.Size() + assert.Zerof(t, storageSize, "storage size should be zero after accepting the trie due to the dirty nodes derefencing but is %s", storageSize) + + for wantKeyHex, wantValueHex := range testCase.wantMetadataDBKVs { + wantKey, err := hex.DecodeString(wantKeyHex) + require.NoError(t, err) + value, err := metadataDB.Get(wantKey) + assert.NoErrorf(t, err, "getting key %s from metadata database", wantKeyHex) + assert.Equalf(t, wantValueHex, hex.EncodeToString(value), "value for key %s", wantKeyHex) + } + }) + } +} + func BenchmarkAtomicTrieInit(b *testing.B) { db := versiondb.New(memdb.New()) codec := atomic.TestTxCodec diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go index 9b6eeaae65..5373cbef4f 100644 --- a/plugin/evm/block_verification.go +++ b/plugin/evm/block_verification.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/header" "github.com/ava-labs/libevm/trie" ) @@ -121,37 +122,9 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } } - // Check that the size of the header's Extra data field is correct for [rules]. - headerExtraDataSize := len(ethHeader.Extra) - switch { - case rulesExtra.IsDurango: - if headerExtraDataSize < params.DynamicFeeExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be len >= %d but got %d", - params.DynamicFeeExtraDataSize, len(ethHeader.Extra), - ) - } - case rulesExtra.IsApricotPhase3: - if headerExtraDataSize != params.DynamicFeeExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be len %d but got %d", - params.DynamicFeeExtraDataSize, headerExtraDataSize, - ) - } - case rulesExtra.IsApricotPhase1: - if headerExtraDataSize != 0 { - return fmt.Errorf( - "expected header ExtraData to be 0 but got %d", - headerExtraDataSize, - ) - } - default: - if uint64(headerExtraDataSize) > params.MaximumExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be <= %d but got %d", - params.MaximumExtraDataSize, headerExtraDataSize, - ) - } + // Verify the extra data is well-formed. + if err := header.VerifyExtra(rulesExtra.AvalancheRules, ethHeader.Extra); err != nil { + return err } if b.ethBlock.Version() != 0 { diff --git a/plugin/evm/client/client.go b/plugin/evm/client/client.go index cb7b9194db..b507342405 100644 --- a/plugin/evm/client/client.go +++ b/plugin/evm/client/client.go @@ -8,13 +8,10 @@ import ( "errors" "fmt" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/common/hexutil" "golang.org/x/exp/slog" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -34,11 +31,6 @@ type Client interface { GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (atomic.Status, error) GetAtomicTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, sourceChain string, limit uint32, startAddress ids.ShortID, startUTXOID ids.ID, options ...rpc.Option) ([][]byte, ids.ShortID, ids.ID, error) - ExportKey(ctx context.Context, userPass api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) - ImportKey(ctx context.Context, userPass api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (common.Address, error) - Import(ctx context.Context, userPass api.UserPass, to common.Address, sourceChain string, options ...rpc.Option) (ids.ID, error) - ExportAVAX(ctx context.Context, userPass api.UserPass, amount uint64, to ids.ShortID, targetChain string, options ...rpc.Option) (ids.ID, error) - Export(ctx context.Context, userPass api.UserPass, amount uint64, to ids.ShortID, targetChain string, assetID string, options ...rpc.Option) (ids.ID, error) StartCPUProfiler(ctx context.Context, options ...rpc.Option) error StopCPUProfiler(ctx context.Context, options ...rpc.Option) error MemoryProfile(ctx context.Context, options ...rpc.Option) error @@ -143,139 +135,6 @@ func (c *client) GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, source return utxos, endAddr, endUTXOID, err } -// ExportKeyArgs are arguments for ExportKey -type ExportKeyArgs struct { - api.UserPass - Address string `json:"address"` -} - -// ExportKeyReply is the response for ExportKey -type ExportKeyReply struct { - // The decrypted PrivateKey for the Address provided in the arguments - PrivateKey *secp256k1.PrivateKey `json:"privateKey"` - PrivateKeyHex string `json:"privateKeyHex"` -} - -// ExportKey returns the private key corresponding to [addr] controlled by [user] -// in both Avalanche standard format and hex format -func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) { - res := &ExportKeyReply{} - err := c.requester.SendRequest(ctx, "avax.exportKey", &ExportKeyArgs{ - UserPass: user, - Address: addr.Hex(), - }, res, options...) - return res.PrivateKey, res.PrivateKeyHex, err -} - -// ImportKeyArgs are arguments for ImportKey -type ImportKeyArgs struct { - api.UserPass - PrivateKey *secp256k1.PrivateKey `json:"privateKey"` -} - -// ImportKey imports [privateKey] to [user] -func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (common.Address, error) { - res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "avax.importKey", &ImportKeyArgs{ - UserPass: user, - PrivateKey: privateKey, - }, res, options...) - if err != nil { - return common.Address{}, err - } - return ParseEthAddress(res.Address) -} - -// ImportArgs are arguments for passing into Import requests -type ImportArgs struct { - api.UserPass - - // Fee that should be used when creating the tx - BaseFee *hexutil.Big `json:"baseFee"` - - // Chain the funds are coming from - SourceChain string `json:"sourceChain"` - - // The address that will receive the imported funds - To common.Address `json:"to"` -} - -// Import sends an import transaction to import funds from [sourceChain] and -// returns the ID of the newly created transaction -func (c *client) Import(ctx context.Context, user api.UserPass, to common.Address, sourceChain string, options ...rpc.Option) (ids.ID, error) { - res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "avax.import", &ImportArgs{ - UserPass: user, - To: to, - SourceChain: sourceChain, - }, res, options...) - return res.TxID, err -} - -// ExportAVAX sends AVAX from this chain to the address specified by [to]. -// Returns the ID of the newly created atomic transaction -func (c *client) ExportAVAX( - ctx context.Context, - user api.UserPass, - amount uint64, - to ids.ShortID, - targetChain string, - options ...rpc.Option, -) (ids.ID, error) { - return c.Export(ctx, user, amount, to, targetChain, "AVAX", options...) -} - -// ExportAVAXArgs are the arguments to ExportAVAX -type ExportAVAXArgs struct { - api.UserPass - - // Fee that should be used when creating the tx - BaseFee *hexutil.Big `json:"baseFee"` - - // Amount of asset to send - Amount json.Uint64 `json:"amount"` - - // Chain the funds are going to. Optional. Used if To address does not - // include the chainID. - TargetChain string `json:"targetChain"` - - // ID of the address that will receive the AVAX. This address may include - // the chainID, which is used to determine what the destination chain is. - To string `json:"to"` -} - -// ExportArgs are the arguments to Export -type ExportArgs struct { - ExportAVAXArgs - // AssetID of the tokens - AssetID string `json:"assetID"` -} - -// Export sends an asset from this chain to the P/C-Chain. -// After this tx is accepted, the AVAX must be imported to the P/C-chain with an importTx. -// Returns the ID of the newly created atomic transaction -func (c *client) Export( - ctx context.Context, - user api.UserPass, - amount uint64, - to ids.ShortID, - targetChain string, - assetID string, - options ...rpc.Option, -) (ids.ID, error) { - res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "avax.export", &ExportArgs{ - ExportAVAXArgs: ExportAVAXArgs{ - UserPass: user, - Amount: json.Uint64(amount), - TargetChain: targetChain, - To: to.String(), - }, - AssetID: assetID, - }, res, options...) - return res.TxID, err -} - func (c *client) StartCPUProfiler(ctx context.Context, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.startCPUProfiler", struct{}{}, &api.EmptyReply{}, options...) } diff --git a/plugin/evm/client/client_interface_test.go b/plugin/evm/client/client_interface_test.go deleted file mode 100644 index 332bb8bcf4..0000000000 --- a/plugin/evm/client/client_interface_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package client - -import ( - "reflect" - "testing" -) - -func TestInterfaceStructOneToOne(t *testing.T) { - // checks struct provides at least the methods signatures in the interface - var _ Client = (*client)(nil) - // checks interface and struct have the same number of methods - clientType := reflect.TypeOf(&client{}) - ClientType := reflect.TypeOf((*Client)(nil)).Elem() - if clientType.NumMethod() != ClientType.NumMethod() { - t.Fatalf("no 1 to 1 compliance between struct methods (%v) and interface methods (%v)", clientType.NumMethod(), ClientType.NumMethod()) - } -} diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go index 864c990911..b373b4f292 100644 --- a/plugin/evm/config/config.go +++ b/plugin/evm/config/config.go @@ -59,6 +59,9 @@ const ( // - state sync time: ~6 hrs. defaultStateSyncMinBlocks = 300_000 DefaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request + + estimatedBlockAcceptPeriod = 2 * time.Second + defaultHistoricalProofQueryWindow = uint64(24 * time.Hour / estimatedBlockAcceptPeriod) ) var ( @@ -124,6 +127,10 @@ type Config struct { PopulateMissingTriesParallelism int `json:"populate-missing-tries-parallelism"` // Number of concurrent readers to use when re-populating missing tries on startup. PruneWarpDB bool `json:"prune-warp-db-enabled"` // Determines if the warpDB should be cleared on startup + // HistoricalProofQueryWindow is, when running in archive mode only, the number of blocks before the + // last accepted block to be accepted for proof state queries. + HistoricalProofQueryWindow uint64 `json:"historical-proof-query-window,omitempty"` + // Metric Settings MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance @@ -288,6 +295,7 @@ func (c *Config) SetDefaults(txPoolConfig TxPoolConfig) { c.StateSyncRequestSize = DefaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize + c.HistoricalProofQueryWindow = defaultHistoricalProofQueryWindow } func (d *Duration) UnmarshalJSON(data []byte) (err error) { diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index 1197562865..2f0bfb1821 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -49,7 +49,7 @@ func newTxGossipHandler[T gossip.Gossipable]( maxMessageSize int, throttlingPeriod time.Duration, throttlingLimit int, - validators *p2p.Validators, + validators p2p.ValidatorSet, ) txGossipHandler { // push gossip messages can be handled from any peer handler := gossip.NewHandler( diff --git a/plugin/evm/header/block_gas_cost.go b/plugin/evm/header/block_gas_cost.go new file mode 100644 index 0000000000..ad13e7e8df --- /dev/null +++ b/plugin/evm/header/block_gas_cost.go @@ -0,0 +1,68 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package header + +import ( + "math/big" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params/extras" + "github.com/ava-labs/coreth/plugin/evm/ap4" +) + +// ApricotPhase5BlockGasCostStep is the rate at which the block gas cost changes +// per second as of the Apricot Phase 5 upgrade. +// +// This value modifies the previously used [ap4.BlockGasCostStep]. +const ApricotPhase5BlockGasCostStep = 200_000 + +// BlockGasCost calculates the required block gas cost based on the parent +// header and the timestamp of the new block. +func BlockGasCost( + config *extras.ChainConfig, + parent *types.Header, + timestamp uint64, +) uint64 { + step := uint64(ap4.BlockGasCostStep) + if config.IsApricotPhase5(timestamp) { + step = ApricotPhase5BlockGasCostStep + } + // Treat an invalid parent/current time combination as 0 elapsed time. + // + // TODO: Does it even make sense to handle this? The timestamp should be + // verified to ensure this never happens. + var timeElapsed uint64 + if parent.Time <= timestamp { + timeElapsed = timestamp - parent.Time + } + return BlockGasCostWithStep( + parent.BlockGasCost, + step, + timeElapsed, + ) +} + +// BlockGasCostWithStep calculates the required block gas cost based on the +// parent cost and the time difference between the parent block and new block. +// +// This is a helper function that allows the caller to manually specify the step +// value to use. +func BlockGasCostWithStep( + parentCost *big.Int, + step uint64, + timeElapsed uint64, +) uint64 { + // Handle AP3/AP4 boundary by returning the minimum value as the boundary. + if parentCost == nil { + return ap4.MinBlockGasCost + } + + // [ap4.MaxBlockGasCost] is <= MaxUint64, so we know that parentCost is + // always going to be a valid uint64. + return ap4.BlockGasCost( + parentCost.Uint64(), + step, + timeElapsed, + ) +} diff --git a/plugin/evm/header/block_gas_cost_test.go b/plugin/evm/header/block_gas_cost_test.go new file mode 100644 index 0000000000..4c39ca0019 --- /dev/null +++ b/plugin/evm/header/block_gas_cost_test.go @@ -0,0 +1,161 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package header + +import ( + "math/big" + "testing" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params/extras" + "github.com/ava-labs/coreth/plugin/evm/ap4" + "github.com/ava-labs/coreth/utils" + "github.com/stretchr/testify/assert" +) + +func TestBlockGasCost(t *testing.T) { + tests := []struct { + name string + apricotPhase5BlockTimestamp *uint64 + parentTime uint64 + parentCost *big.Int + timestamp uint64 + expected uint64 + }{ + { + name: "normal_ap4", + parentTime: 10, + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timestamp: 10 + ap4.TargetBlockRate + 1, + expected: ap4.MaxBlockGasCost - ap4.BlockGasCostStep, + }, + { + name: "normal_ap5", + apricotPhase5BlockTimestamp: utils.NewUint64(0), + parentTime: 10, + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timestamp: 10 + ap4.TargetBlockRate + 1, + expected: ap4.MaxBlockGasCost - ApricotPhase5BlockGasCostStep, + }, + { + name: "negative_time_elapsed", + parentTime: 10, + parentCost: big.NewInt(ap4.MinBlockGasCost), + timestamp: 9, + expected: ap4.MinBlockGasCost + ap4.BlockGasCostStep*ap4.TargetBlockRate, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + config := &extras.ChainConfig{ + NetworkUpgrades: extras.NetworkUpgrades{ + ApricotPhase5BlockTimestamp: test.apricotPhase5BlockTimestamp, + }, + } + parent := &types.Header{ + Time: test.parentTime, + BlockGasCost: test.parentCost, + } + + assert.Equal(t, test.expected, BlockGasCost( + config, + parent, + test.timestamp, + )) + }) + } +} + +func TestBlockGasCostWithStep(t *testing.T) { + tests := []struct { + name string + parentCost *big.Int + timeElapsed uint64 + expected uint64 + }{ + { + name: "nil_parentCost", + parentCost: nil, + timeElapsed: 0, + expected: ap4.MinBlockGasCost, + }, + { + name: "timeElapsed_0", + parentCost: big.NewInt(0), + timeElapsed: 0, + expected: ap4.TargetBlockRate * ap4.BlockGasCostStep, + }, + { + name: "timeElapsed_1", + parentCost: big.NewInt(0), + timeElapsed: 1, + expected: (ap4.TargetBlockRate - 1) * ap4.BlockGasCostStep, + }, + { + name: "timeElapsed_0_with_parentCost", + parentCost: big.NewInt(50_000), + timeElapsed: 0, + expected: 50_000 + ap4.TargetBlockRate*ap4.BlockGasCostStep, + }, + { + name: "timeElapsed_0_with_max_parentCost", + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timeElapsed: 0, + expected: ap4.MaxBlockGasCost, + }, + { + name: "timeElapsed_1_with_max_parentCost", + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timeElapsed: 1, + expected: ap4.MaxBlockGasCost, + }, + { + name: "timeElapsed_at_target", + parentCost: big.NewInt(900_000), + timeElapsed: ap4.TargetBlockRate, + expected: 900_000, + }, + { + name: "timeElapsed_over_target_3", + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timeElapsed: 3, + expected: ap4.MaxBlockGasCost - (3-ap4.TargetBlockRate)*ap4.BlockGasCostStep, + }, + { + name: "timeElapsed_over_target_10", + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timeElapsed: 10, + expected: ap4.MaxBlockGasCost - (10-ap4.TargetBlockRate)*ap4.BlockGasCostStep, + }, + { + name: "timeElapsed_over_target_20", + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timeElapsed: 20, + expected: ap4.MaxBlockGasCost - (20-ap4.TargetBlockRate)*ap4.BlockGasCostStep, + }, + { + name: "timeElapsed_over_target_22", + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timeElapsed: 22, + expected: ap4.MaxBlockGasCost - (22-ap4.TargetBlockRate)*ap4.BlockGasCostStep, + }, + { + name: "timeElapsed_large_clamped_to_0", + parentCost: big.NewInt(ap4.MaxBlockGasCost), + timeElapsed: 23, + expected: 0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.expected, BlockGasCostWithStep( + test.parentCost, + ap4.BlockGasCostStep, + test.timeElapsed, + )) + }) + } +} diff --git a/plugin/evm/header/extra.go b/plugin/evm/header/extra.go new file mode 100644 index 0000000000..ee86919457 --- /dev/null +++ b/plugin/evm/header/extra.go @@ -0,0 +1,58 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package header + +import ( + "errors" + "fmt" + + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/params/extras" +) + +var errInvalidExtraLength = errors.New("invalid header.Extra length") + +// VerifyExtra verifies that the header's Extra field is correctly formatted for +// [rules]. +func VerifyExtra(rules extras.AvalancheRules, extra []byte) error { + extraLen := len(extra) + switch { + case rules.IsDurango: + if extraLen < params.DynamicFeeExtraDataSize { + return fmt.Errorf( + "%w: expected >= %d but got %d", + errInvalidExtraLength, + params.DynamicFeeExtraDataSize, + extraLen, + ) + } + case rules.IsApricotPhase3: + if extraLen != params.DynamicFeeExtraDataSize { + return fmt.Errorf( + "%w: expected %d but got %d", + errInvalidExtraLength, + params.DynamicFeeExtraDataSize, + extraLen, + ) + } + case rules.IsApricotPhase1: + if extraLen != 0 { + return fmt.Errorf( + "%w: expected 0 but got %d", + errInvalidExtraLength, + extraLen, + ) + } + default: + if uint64(extraLen) > params.MaximumExtraDataSize { + return fmt.Errorf( + "%w: expected <= %d but got %d", + errInvalidExtraLength, + params.MaximumExtraDataSize, + extraLen, + ) + } + } + return nil +} diff --git a/plugin/evm/header/extra_test.go b/plugin/evm/header/extra_test.go new file mode 100644 index 0000000000..a2c7224f9b --- /dev/null +++ b/plugin/evm/header/extra_test.go @@ -0,0 +1,104 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package header + +import ( + "testing" + + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/params/extras" + "github.com/stretchr/testify/require" +) + +func TestVerifyExtra(t *testing.T) { + tests := []struct { + name string + rules extras.AvalancheRules + extra []byte + expected error + }{ + { + name: "initial_valid", + rules: extras.AvalancheRules{}, + extra: make([]byte, params.MaximumExtraDataSize), + expected: nil, + }, + { + name: "initial_invalid", + rules: extras.AvalancheRules{}, + extra: make([]byte, params.MaximumExtraDataSize+1), + expected: errInvalidExtraLength, + }, + { + name: "ap1_valid", + rules: extras.AvalancheRules{ + IsApricotPhase1: true, + }, + extra: nil, + expected: nil, + }, + { + name: "ap1_invalid", + rules: extras.AvalancheRules{ + IsApricotPhase1: true, + }, + extra: make([]byte, 1), + expected: errInvalidExtraLength, + }, + { + name: "ap3_valid", + rules: extras.AvalancheRules{ + IsApricotPhase3: true, + }, + extra: make([]byte, params.DynamicFeeExtraDataSize), + expected: nil, + }, + { + name: "ap3_invalid_less", + rules: extras.AvalancheRules{ + IsApricotPhase3: true, + }, + extra: make([]byte, params.DynamicFeeExtraDataSize-1), + expected: errInvalidExtraLength, + }, + { + name: "ap3_invalid_more", + rules: extras.AvalancheRules{ + IsApricotPhase3: true, + }, + extra: make([]byte, params.DynamicFeeExtraDataSize+1), + expected: errInvalidExtraLength, + }, + { + name: "durango_valid_min", + rules: extras.AvalancheRules{ + IsDurango: true, + }, + extra: make([]byte, params.DynamicFeeExtraDataSize), + expected: nil, + }, + { + name: "durango_valid_extra", + rules: extras.AvalancheRules{ + IsDurango: true, + }, + extra: make([]byte, params.DynamicFeeExtraDataSize+1), + expected: nil, + }, + { + name: "durango_invalid", + rules: extras.AvalancheRules{ + IsDurango: true, + }, + extra: make([]byte, params.DynamicFeeExtraDataSize-1), + expected: errInvalidExtraLength, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := VerifyExtra(test.rules, test.extra) + require.ErrorIs(t, err, test.expected) + }) + } +} diff --git a/plugin/evm/network_handler.go b/plugin/evm/network_handler.go index 15dca8ce38..b207bc2ca1 100644 --- a/plugin/evm/network_handler.go +++ b/plugin/evm/network_handler.go @@ -8,13 +8,13 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/message" syncHandlers "github.com/ava-labs/coreth/sync/handlers" syncStats "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/warp" warpHandlers "github.com/ava-labs/coreth/warp/handlers" "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/triedb" ) diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/syncervm_client.go index 2807b6b0da..e6efd34a49 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/syncervm_client.go @@ -276,7 +276,14 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { log.Info("atomic tx: sync starting", "root", client.syncSummary.AtomicRoot) - atomicSyncer, err := client.atomicBackend.Syncer(client.client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.stateSyncRequestSize) + atomicSyncer, err := newAtomicSyncer( + client.client, + client.db, + client.atomicBackend.AtomicTrie(), + client.syncSummary.AtomicRoot, + client.syncSummary.BlockNumber, + client.stateSyncRequestSize, + ) if err != nil { return err } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 65aff9b080..931a123f03 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/api/metrics" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -32,7 +33,6 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/database" @@ -87,11 +87,6 @@ func TestStateSyncFromScratchExceedParent(t *testing.T) { func TestStateSyncToggleEnabledToDisabled(t *testing.T) { rand.Seed(1) - // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. - metrics.Enabled = false - defer func() { - metrics.Enabled = true - }() var lock sync.Mutex reqCount := 0 @@ -141,7 +136,8 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } - // Disable metrics to prevent duplicate registerer + // Reset metrics to allow re-initialization + vmSetup.syncerVM.ctx.Metrics = metrics.NewPrefixGatherer() stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` if err := syncDisabledVM.Initialize( context.Background(), @@ -206,6 +202,8 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, test.stateSyncMinBlocks, ) + // Reset metrics to allow re-initialization + vmSetup.syncerVM.ctx.Metrics = metrics.NewPrefixGatherer() if err := syncReEnabledVM.Initialize( context.Background(), vmSetup.syncerVM.ctx, diff --git a/plugin/evm/testutils/metrics.go b/plugin/evm/testutils/metrics.go new file mode 100644 index 0000000000..afb2ea7f43 --- /dev/null +++ b/plugin/evm/testutils/metrics.go @@ -0,0 +1,20 @@ +package testutils + +import ( + "testing" + + "github.com/ava-labs/libevm/metrics" +) + +// WithMetrics enables go-ethereum metrics globally for the test. +// If the [metrics.Enabled] is already true, nothing is done. +// Otherwise, it is set to true and is reverted to false when the test finishes. +func WithMetrics(t *testing.T) { + if metrics.Enabled { + return + } + metrics.Enabled = true + t.Cleanup(func() { + metrics.Enabled = false + }) +} diff --git a/plugin/evm/user.go b/plugin/evm/user.go deleted file mode 100644 index 4f004db867..0000000000 --- a/plugin/evm/user.go +++ /dev/null @@ -1,137 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "errors" - - "github.com/ava-labs/avalanchego/database/encdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/libevm/common" -) - -// Key in the database whose corresponding value is the list of -// addresses this user controls -var addressesKey = ids.Empty[:] - -var ( - errDBNil = errors.New("db uninitialized") - errKeyNil = errors.New("key uninitialized") -) - -type user struct { - // This user's database, acquired from the keystore - db *encdb.Database -} - -// Get the addresses controlled by this user -func (u *user) getAddresses() ([]common.Address, error) { - if u.db == nil { - return nil, errDBNil - } - - // If user has no addresses, return empty list - hasAddresses, err := u.db.Has(addressesKey) - if err != nil { - return nil, err - } - if !hasAddresses { - return nil, nil - } - - // User has addresses. Get them. - bytes, err := u.db.Get(addressesKey) - if err != nil { - return nil, err - } - addresses := []common.Address{} - if _, err := atomic.Codec.Unmarshal(bytes, &addresses); err != nil { - return nil, err - } - return addresses, nil -} - -// controlsAddress returns true iff this user controls the given address -func (u *user) controlsAddress(address common.Address) (bool, error) { - if u.db == nil { - return false, errDBNil - //} else if address.IsZero() { - // return false, errEmptyAddress - } - return u.db.Has(address.Bytes()) -} - -// putAddress persists that this user controls address controlled by [privKey] -func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { - if privKey == nil { - return errKeyNil - } - - address := privKey.EthAddress() // address the privKey controls - controlsAddress, err := u.controlsAddress(address) - if err != nil { - return err - } - if controlsAddress { // user already controls this address. Do nothing. - return nil - } - - if err := u.db.Put(address.Bytes(), privKey.Bytes()); err != nil { // Address --> private key - return err - } - - addresses := make([]common.Address, 0) // Add address to list of addresses user controls - userHasAddresses, err := u.db.Has(addressesKey) - if err != nil { - return err - } - if userHasAddresses { // Get addresses this user already controls, if they exist - if addresses, err = u.getAddresses(); err != nil { - return err - } - } - addresses = append(addresses, address) - bytes, err := atomic.Codec.Marshal(atomic.CodecVersion, addresses) - if err != nil { - return err - } - if err := u.db.Put(addressesKey, bytes); err != nil { - return err - } - return nil -} - -// Key returns the private key that controls the given address -func (u *user) getKey(address common.Address) (*secp256k1.PrivateKey, error) { - if u.db == nil { - return nil, errDBNil - //} else if address.IsZero() { - // return nil, errEmptyAddress - } - - bytes, err := u.db.Get(address.Bytes()) - if err != nil { - return nil, err - } - return secp256k1.ToPrivateKey(bytes) -} - -// Return all private keys controlled by this user -func (u *user) getKeys() ([]*secp256k1.PrivateKey, error) { - addrs, err := u.getAddresses() - if err != nil { - return nil, err - } - keys := make([]*secp256k1.PrivateKey, len(addrs)) - for i, addr := range addrs { - key, err := u.getKey(addr) - if err != nil { - return nil, err - } - keys[i] = key - } - return keys, nil -} diff --git a/plugin/evm/validators.go b/plugin/evm/validators.go new file mode 100644 index 0000000000..44c5560267 --- /dev/null +++ b/plugin/evm/validators.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +type validatorSet struct { + set set.Set[ids.NodeID] +} + +func (v *validatorSet) Has(ctx context.Context, nodeID ids.NodeID) bool { + return v.set.Contains(nodeID) +} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 99374fc3ab..2ec5e97ca9 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -34,8 +34,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/eth/ethconfig" - "github.com/ava-labs/coreth/metrics" - corethPrometheus "github.com/ava-labs/coreth/metrics/prometheus" + corethprometheus "github.com/ava-labs/coreth/metrics/prometheus" "github.com/ava-labs/coreth/miner" "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" @@ -46,6 +45,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/triedb" warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" @@ -411,25 +411,18 @@ func (vm *VM) Initialize( } var extDataHashes map[common.Hash]common.Hash - var chainID *big.Int // Set the chain config for mainnet/fuji chain IDs switch chainCtx.NetworkID { case avalanchegoConstants.MainnetID: - chainID = params.AvalancheMainnetChainID extDataHashes = mainnetExtDataHashes case avalanchegoConstants.FujiID: - chainID = params.AvalancheFujiChainID extDataHashes = fujiExtDataHashes - case avalanchegoConstants.LocalID: - chainID = params.AvalancheLocalChainID - default: - chainID = g.Config.ChainID } // if the chainCtx.NetworkUpgrades is not empty, set the chain config // normally it should not be empty, but some tests may not set it if chainCtx.NetworkUpgrades != (upgrade.Config{}) { - g.Config = params.GetChainConfig(chainCtx.NetworkUpgrades, new(big.Int).Set(chainID)) + params.GetExtra(g.Config).NetworkUpgrades = extras.GetNetworkUpgrades(chainCtx.NetworkUpgrades) } // If the Durango is activated, activate the Warp Precompile at the same time @@ -497,6 +490,7 @@ func (vm *VM) Initialize( vm.ethConfig.SnapshotDelayInit = vm.stateSyncEnabled(lastAcceptedHeight) vm.ethConfig.SnapshotWait = vm.config.SnapshotWait vm.ethConfig.SnapshotVerify = vm.config.SnapshotVerify + vm.ethConfig.HistoricalProofQueryWindow = vm.config.HistoricalProofQueryWindow vm.ethConfig.OfflinePruning = vm.config.OfflinePruning vm.ethConfig.OfflinePruningBloomFilterSize = vm.config.OfflinePruningBloomFilterSize vm.ethConfig.OfflinePruningDataDirectory = vm.config.OfflinePruningDataDirectory @@ -632,13 +626,9 @@ func (vm *VM) Initialize( } func (vm *VM) initializeMetrics() error { + metrics.Enabled = true vm.sdkMetrics = prometheus.NewRegistry() - // If metrics are enabled, register the default metrics registry - if !metrics.Enabled { - return nil - } - - gatherer := corethPrometheus.Gatherer(metrics.DefaultRegistry) + gatherer := corethprometheus.NewGatherer(metrics.DefaultRegistry) if err := vm.ctx.Metrics.Register(ethMetricsPrefix, gatherer); err != nil { return err } @@ -1124,6 +1114,11 @@ func (vm *VM) initBlockBuilding() error { vm.builder = vm.NewBlockBuilder(vm.toEngine) vm.builder.awaitSubmittedTxs() + var p2pValidators p2p.ValidatorSet = &validatorSet{} + if vm.config.PullGossipFrequency.Duration > 0 { + p2pValidators = vm.p2pValidators + } + if vm.ethTxGossipHandler == nil { vm.ethTxGossipHandler = newTxGossipHandler[*GossipEthTx]( vm.ctx.Log, @@ -1133,7 +1128,7 @@ func (vm *VM) initBlockBuilding() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.p2pValidators, + p2pValidators, ) } @@ -1150,7 +1145,7 @@ func (vm *VM) initBlockBuilding() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.p2pValidators, + p2pValidators, ) } @@ -1175,15 +1170,20 @@ func (vm *VM) initBlockBuilding() error { } } - vm.shutdownWg.Add(2) - go func() { - gossip.Every(ctx, vm.ctx.Log, ethTxPushGossiper, vm.config.PushGossipFrequency.Duration) - vm.shutdownWg.Done() - }() - go func() { - gossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, vm.config.PullGossipFrequency.Duration) - vm.shutdownWg.Done() - }() + if vm.config.PushGossipFrequency.Duration > 0 { + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, ethTxPushGossiper, vm.config.PushGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + } + if vm.config.PullGossipFrequency.Duration > 0 { + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, vm.config.PullGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + } if vm.atomicTxPullGossiper == nil { atomicTxPullGossiper := gossip.NewPullGossiper[*atomic.GossipAtomicTx]( @@ -1202,15 +1202,20 @@ func (vm *VM) initBlockBuilding() error { } } - vm.shutdownWg.Add(2) - go func() { - gossip.Every(ctx, vm.ctx.Log, vm.atomicTxPushGossiper, vm.config.PushGossipFrequency.Duration) - vm.shutdownWg.Done() - }() - go func() { - gossip.Every(ctx, vm.ctx.Log, vm.atomicTxPullGossiper, vm.config.PullGossipFrequency.Duration) - vm.shutdownWg.Done() - }() + if vm.config.PushGossipFrequency.Duration > 0 { + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, vm.atomicTxPushGossiper, vm.config.PushGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + } + if vm.config.PullGossipFrequency.Duration > 0 { + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, vm.atomicTxPullGossiper, vm.config.PullGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + } return nil } @@ -1580,7 +1585,7 @@ func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { var nextBaseFee *big.Int timestamp := uint64(vm.clock.Time().Unix()) if vm.chainConfigExtra().IsApricotPhase3(timestamp) { - _, nextBaseFee, err = dummy.EstimateNextBaseFee(vm.chainConfig, parentHeader, timestamp) + nextBaseFee, err = dummy.EstimateNextBaseFee(vm.chainConfig, parentHeader, timestamp) if err != nil { // Return extremely detailed error since CalcBaseFee should never encounter an issue here return fmt.Errorf("failed to calculate base fee with parent timestamp (%d), parent ExtraData: (0x%x), and current timestamp (%d): %w", parentHeader.Time, parentHeader.Extra, timestamp, err) @@ -1740,23 +1745,6 @@ func (vm *VM) startContinuousProfiler() { <-vm.shutdownChan } -func (vm *VM) estimateBaseFee(ctx context.Context) (*big.Int, error) { - // Get the base fee to use - baseFee, err := vm.eth.APIBackend.EstimateBaseFee(ctx) - if err != nil { - return nil, err - } - if baseFee == nil { - baseFee = initialBaseFee - } else { - // give some breathing room - baseFee.Mul(baseFee, big.NewInt(11)) - baseFee.Div(baseFee, big.NewInt(10)) - } - - return baseFee, nil -} - // readLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the // last accepted block hash and height by reading directly from [vm.chaindb] instead of relying // on [chain]. diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 5f56e69c87..4451f966eb 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -23,7 +23,6 @@ import ( "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/eth/filters" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/utils" @@ -32,7 +31,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/api/keystore" + "github.com/ava-labs/avalanchego/api/metrics" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" @@ -42,11 +41,10 @@ import ( "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/cb58" - "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" @@ -77,8 +75,6 @@ var ( testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] testShortIDAddrs []ids.ShortID testAvaxAssetID = ids.ID{1, 2, 3} - username = "Johns" - password = "CjasdjhiPeirbSenfeI13" // #nosec G101 genesisJSON = func(cfg *params.ChainConfig) string { g := new(core.Genesis) @@ -129,7 +125,8 @@ var ( genesisJSONCortina = genesisJSON(params.TestCortinaChainConfig) genesisJSONDurango = genesisJSON(params.TestDurangoChainConfig) genesisJSONEtna = genesisJSON(params.TestEtnaChainConfig) - genesisJSONLatest = genesisJSONEtna + genesisJSONFUpgrade = genesisJSON(params.TestFUpgradeChainConfig) + genesisJSONLatest = genesisJSONFUpgrade genesisJSONCancun = genesisJSON(activateCancun(params.TestChainConfig)) @@ -222,7 +219,7 @@ func NewContext() *snow.Context { return subnetID, nil }, } - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() if err != nil { panic(err) } @@ -258,12 +255,6 @@ func setupGenesis( // The caller of this function is responsible for unlocking. ctx.Lock.Lock() - userKeystore := keystore.New(logging.NoLog{}, memdb.New()) - if err := userKeystore.CreateUser(username, password); err != nil { - t.Fatal(err) - } - ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) - issuer := make(chan commonEng.Message, 1) prefixedDB := prefixdb.New([]byte{1}, baseDB) return ctx, prefixedDB, genesisBytes, issuer, atomicMemory @@ -383,6 +374,12 @@ func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON stri return issuer, vm, db, sharedMemory, sender } +// resetMetrics resets the vm avalanchego metrics, and allows +// for the VM to be re-initialized in tests. +func resetMetrics(vm *VM) { + vm.ctx.Metrics = metrics.NewPrefixGatherer() +} + func TestVMConfig(t *testing.T) { txFeeCap := float64(11) enabledEthAPIs := []string{"debug"} @@ -3777,10 +3774,6 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { } func TestSkipChainConfigCheckCompatible(t *testing.T) { - // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. - metrics.Enabled = false - defer func() { metrics.Enabled = true }() - importAmount := uint64(50000000) issuer, vm, dbManager, _, appSender := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase1, "", "", map[ids.ShortID]uint64{ testShortIDAddrs[0]: importAmount, @@ -3809,10 +3802,14 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { genesisWithUpgradeBytes, err := json.Marshal(genesisWithUpgrade) require.NoError(t, err) + resetMetrics(vm) + // this will not be allowed err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, appSender) require.ErrorContains(t, err, "mismatching ApricotPhase2 fork block timestamp in database") + resetMetrics(vm) + // try again with skip-upgrade-check config := []byte(`{"skip-upgrade-check": true}`) err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, config, issuer, []*commonEng.Fx{}, appSender) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index d746931e06..de9c7281fb 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -22,6 +22,7 @@ import ( avagoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/chain" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" @@ -266,13 +267,13 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned defer logsSub.Unsubscribe() nodeID1 := ids.GenerateTestNodeID() - blsSecretKey1, err := bls.NewSigner() + blsSecretKey1, err := localsigner.New() require.NoError(err) blsPublicKey1 := blsSecretKey1.PublicKey() blsSignature1 := blsSecretKey1.Sign(unsignedMessage.Bytes()) nodeID2 := ids.GenerateTestNodeID() - blsSecretKey2, err := bls.NewSigner() + blsSecretKey2, err := localsigner.New() require.NoError(err) blsPublicKey2 := blsSecretKey2.PublicKey() blsSignature2 := blsSecretKey2.Sign(unsignedMessage.Bytes()) @@ -529,7 +530,7 @@ func testReceiveWarpMessage( weight uint64 } newSigner := func(networkID ids.ID, weight uint64) signer { - secret, err := bls.NewSigner() + secret, err := localsigner.New() require.NoError(err) return signer{ networkID: networkID, diff --git a/precompile/contracts/warp/README.md b/precompile/contracts/warp/README.md index 73ca165224..5a3b79a486 100644 --- a/precompile/contracts/warp/README.md +++ b/precompile/contracts/warp/README.md @@ -1,6 +1,6 @@ # Integrating Avalanche Warp Messaging into the EVM -Avalanche Warp Messaging offers a basic primitive to enable Cross-Subnet communication on the Avalanche Network. +Avalanche Warp Messaging offers a basic primitive to enable Cross-L1 communication on the Avalanche Network. It is intended to allow communication between arbitrary Custom Virtual Machines (including, but not limited to Subnet-EVM and Coreth). @@ -8,7 +8,7 @@ It is intended to allow communication between arbitrary Custom Virtual Machines Avalanche Warp Messaging uses BLS Multi-Signatures with Public-Key Aggregation where every Avalanche validator registers a public key alongside its NodeID on the Avalanche P-Chain. -Every node tracking a Subnet has read access to the Avalanche P-Chain. This provides weighted sets of BLS Public Keys that correspond to the validator sets of each Subnet on the Avalanche Network. Avalanche Warp Messaging provides a basic primitive for signing and verifying messages between Subnets: the receiving network can verify whether an aggregation of signatures from a set of source Subnet validators represents a threshold of stake large enough for the receiving network to process the message. +Every node tracking an Avalanche L1 has read access to the Avalanche P-Chain. This provides weighted sets of BLS Public Keys that correspond to the validator sets of each L1 on the Avalanche Network. Avalanche Warp Messaging provides a basic primitive for signing and verifying messages between L1s: the receiving network can verify whether an aggregation of signatures from a set of source L1 validators represents a threshold of stake large enough for the receiving network to process the message. For more details on Avalanche Warp Messaging, see the AvalancheGo [Warp README](https://docs.avax.network/build/cross-chain/awm/deep-dive). @@ -77,27 +77,25 @@ Since the predicate is encoded into the [Transaction Access List](https://eips.e Therefore, we use the [Predicate Utils](https://github.com/ava-labs/coreth/blob/master/predicate/Predicate.md) package to encode the actual byte slice of size N into the access list. -### Performance Optimization: C-Chain to Subnet +### Performance Optimization: C-Chain to Avalanche L1 -To support C-Chain to Subnet communication, or more generally Primary Network to Subnet communication, we special case the C-Chain for two reasons: +For communication between the C-Chain and an L1, as well as broader interactions between the Primary Network and Avalanche L1s, we have implemented special handling for the C-Chain. -1. Every Subnet validator validates the C-Chain -2. The Primary Network has the largest possible number of validators +The Primary Network has a large validator set, which creates a unique challenge for Avalanche Warp messages. To reach the required stake threshold, numerous signatures would need to be collected and verifying messages from the Primary Network would be computationally costly. However, we have developed a more efficient solution. -Since the Primary Network has the largest possible number of validators for any Subnet on Avalanche, it would also be the most expensive Subnet to receive and verify Avalanche Warp Messages from as it reaching a threshold of stake on the primary network would require many signatures. Luckily, we can do something much smarter. +When an Avalanche L1 receives a message from a blockchain on the Primary Network, we use the validator set of the receiving L1 instead of the entire network when validating the message. Note this is NOT possible if an L1 does not validate the Primary Network, in which case the Warp precompile must be configured with `requirePrimaryNetworkSigners`. -When a Subnet receives a message from a blockchain on the Primary Network, we use the validator set of the receiving Subnet instead of the entire network when validating the message. This means that the C-Chain sending a message can be the exact same as Subnet to Subnet communication. +Sending messages from the C-Chain remains unchanged. +However, when L1 XYZ receives a message from the C-Chain, it changes the semantics to the following: -However, when Subnet B receives a message from the C-Chain, it changes the semantics to the following: +1. Read the `SourceChainID` of the signed message (C-Chain) +2. Look up the `SubnetID` that validates C-Chain: Primary Network +3. Look up the validator set of L1 XYZ (instead of the Primary Network) and the registered BLS Public Keys of L1 XYZ at the P-Chain height specified by the ProposerVM header +4. Continue Warp Message verification using the validator set of L1 XYZ instead of the Primary Network -1. Read the SourceChainID of the signed message (C-Chain) -2. Look up the SubnetID that validates C-Chain: Primary Network -3. Look up the validator set of Subnet B (instead of the Primary Network) and the registered BLS Public Keys of Subnet B at the P-Chain height specified by the ProposerVM header -4. Continue Warp Message verification using the validator set of Subnet B instead of the Primary Network +This means that C-Chain to L1 communication only requires a threshold of stake on the receiving L1 to sign the message instead of a threshold of stake for the entire Primary Network. -This means that C-Chain to Subnet communication only requires a threshold of stake on the receiving subnet to sign the message instead of a threshold of stake for the entire Primary Network. - -This assumes that the security of Subnet B already depends on the validators of Subnet B to behave virtuously. Therefore, requiring a threshold of stake from the receiving Subnet's validator set instead of the whole Primary Network does not meaningfully change security of the receiving Subnet. +This assumes that the security of L1 XYZ already depends on the validators of L1 XYZ to behave virtuously. Therefore, requiring a threshold of stake from the receiving L1's validator set instead of the whole Primary Network does not meaningfully change security of the receiving L1. Note: this special case is ONLY applied during Warp Message verification. The message sent by the Primary Network will still contain the Avalanche C-Chain's blockchainID as the sourceChainID and signatures will be served by querying the C-Chain directly. @@ -107,7 +105,7 @@ Note: this special case is ONLY applied during Warp Message verification. The me Avalanche Warp Messaging depends on the Avalanche P-Chain state at the P-Chain height specified by the ProposerVM block header. -Verifying a message requires looking up the validator set of the source subnet on the P-Chain. To support this, Avalanche Warp Messaging uses the ProposerVM header, which includes the P-Chain height it was issued at as the canonical point to lookup the source subnet's validator set. +Verifying a message requires looking up the validator set of the source L1 on the P-Chain. To support this, Avalanche Warp Messaging uses the ProposerVM header, which includes the P-Chain height it was issued at as the canonical point to lookup the source L1's validator set. This means verifying the Warp Message and therefore the state transition on a block depends on state that is external to the blockchain itself: the P-Chain. diff --git a/precompile/contracts/warp/config.go b/precompile/contracts/warp/config.go index 52966a72b5..495eeb62e2 100644 --- a/precompile/contracts/warp/config.go +++ b/precompile/contracts/warp/config.go @@ -31,16 +31,17 @@ var ( ) var ( - errOverflowSignersGasCost = errors.New("overflow calculating warp signers gas cost") - errInvalidPredicateBytes = errors.New("cannot unpack predicate bytes") - errInvalidWarpMsg = errors.New("cannot unpack warp message") - errCannotParseWarpMsg = errors.New("cannot parse warp message") - errInvalidWarpMsgPayload = errors.New("cannot unpack warp message payload") - errInvalidAddressedPayload = errors.New("cannot unpack addressed payload") - errInvalidBlockHashPayload = errors.New("cannot unpack block hash payload") - errCannotGetNumSigners = errors.New("cannot fetch num signers from warp message") - errWarpCannotBeActivated = errors.New("warp cannot be activated before Durango") - errFailedVerification = errors.New("cannot verify warp signature") + errOverflowSignersGasCost = errors.New("overflow calculating warp signers gas cost") + errInvalidPredicateBytes = errors.New("cannot unpack predicate bytes") + errInvalidWarpMsg = errors.New("cannot unpack warp message") + errCannotParseWarpMsg = errors.New("cannot parse warp message") + errInvalidWarpMsgPayload = errors.New("cannot unpack warp message payload") + errInvalidAddressedPayload = errors.New("cannot unpack addressed payload") + errInvalidBlockHashPayload = errors.New("cannot unpack block hash payload") + errCannotGetNumSigners = errors.New("cannot fetch num signers from warp message") + errWarpCannotBeActivated = errors.New("warp cannot be activated before Durango") + errFailedVerification = errors.New("cannot verify warp signature") + errCannotRetrieveValidatorSet = errors.New("cannot retrieve validator set") ) // Config implements the precompileconfig.Config interface and @@ -208,16 +209,25 @@ func (c *Config) VerifyPredicate(predicateContext *precompileconfig.PredicateCon warpMsg.SourceChainID, c.RequirePrimaryNetworkSigners, ) - err = warpMsg.Signature.Verify( + + validatorSet, err := warp.GetCanonicalValidatorSetFromChainID( context.Background(), - &warpMsg.UnsignedMessage, - predicateContext.SnowCtx.NetworkID, state, predicateContext.ProposerVMBlockCtx.PChainHeight, + warpMsg.UnsignedMessage.SourceChainID, + ) + if err != nil { + log.Debug("failed to retrieve canonical validator set", "msgID", warpMsg.ID(), "err", err) + return fmt.Errorf("%w: %w", errCannotRetrieveValidatorSet, err) + } + + err = warpMsg.Signature.Verify( + &warpMsg.UnsignedMessage, + predicateContext.SnowCtx.NetworkID, + validatorSet, quorumNumerator, WarpQuorumDenominator, ) - if err != nil { log.Debug("failed to verify warp signature", "msgID", warpMsg.ID(), "err", err) return fmt.Errorf("%w: %w", errFailedVerification, err) diff --git a/precompile/contracts/warp/contract_test.go b/precompile/contracts/warp/contract_test.go index 9ba15a8be9..f6d38cbb89 100644 --- a/precompile/contracts/warp/contract_test.go +++ b/precompile/contracts/warp/contract_test.go @@ -12,6 +12,7 @@ import ( agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/warp" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/core/extstate" "github.com/ava-labs/coreth/precompile/contract" @@ -95,7 +96,7 @@ func TestSendWarpMessage(t *testing.T) { sendWarpMessagePayload, ) require.NoError(t, err) - unsignedWarpMessage, err := warp.NewUnsignedMessage( + unsignedWarpMessage, err := avalancheWarp.NewUnsignedMessage( defaultSnowCtx.NetworkID, blockchainID, sendWarpMessageAddressedPayload.Bytes(), @@ -746,7 +747,7 @@ func TestPackEvents(t *testing.T) { ) require.NoError(t, err) - unsignedWarpMessage, err := warp.NewUnsignedMessage( + unsignedWarpMessage, err := avalancheWarp.NewUnsignedMessage( networkID, sourceChainID, addressedPayload.Bytes(), diff --git a/precompile/contracts/warp/predicate_test.go b/precompile/contracts/warp/predicate_test.go index a5fe8265b7..17d0740796 100644 --- a/precompile/contracts/warp/predicate_test.go +++ b/precompile/contracts/warp/predicate_test.go @@ -17,6 +17,7 @@ import ( agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/set" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" @@ -111,7 +112,7 @@ func (v *testValidator) Compare(o *testValidator) int { } func newTestValidator() *testValidator { - sk, err := bls.NewSigner() + sk, err := localsigner.New() if err != nil { panic(err) } diff --git a/precompile/contracts/warp/signature_verification_test.go b/precompile/contracts/warp/signature_verification_test.go index d52f0a0f89..90ee808f91 100644 --- a/precompile/contracts/warp/signature_verification_test.go +++ b/precompile/contracts/warp/signature_verification_test.go @@ -19,12 +19,13 @@ import ( ) type signatureTest struct { - name string - stateF func(*gomock.Controller) validators.State - quorumNum uint64 - quorumDen uint64 - msgF func(*require.Assertions) *avalancheWarp.Message - err error + name string + stateF func(*gomock.Controller) validators.State + quorumNum uint64 + quorumDen uint64 + msgF func(*require.Assertions) *avalancheWarp.Message + verifyErr error + canonicalErr error } // This test copies the test coverage from https://github.com/ava-labs/avalanchego/blob/0117ab96/vms/platformvm/warp/signature_test.go#L137. @@ -55,7 +56,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: errTest, + canonicalErr: errTest, }, { name: "can't get validator set", @@ -82,7 +83,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: errTest, + canonicalErr: errTest, }, { name: "weight overflow", @@ -122,7 +123,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrWeightOverflow, + canonicalErr: avalancheWarp.ErrWeightOverflow, }, { name: "invalid bit set index", @@ -152,7 +153,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidBitSet, + verifyErr: avalancheWarp.ErrInvalidBitSet, }, { name: "unknown index", @@ -185,7 +186,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrUnknownValidator, + verifyErr: avalancheWarp.ErrUnknownValidator, }, { name: "insufficient weight", @@ -229,7 +230,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInsufficientWeight, + verifyErr: avalancheWarp.ErrInsufficientWeight, }, { name: "can't parse sig", @@ -263,7 +264,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrParseSignature, + verifyErr: avalancheWarp.ErrParseSignature, }, { name: "no validators", @@ -298,7 +299,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: bls.ErrNoPublicKeys, + verifyErr: bls.ErrNoPublicKeys, }, { name: "invalid signature (substitute)", @@ -342,7 +343,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidSignature, + verifyErr: avalancheWarp.ErrInvalidSignature, }, { name: "invalid signature (missing one)", @@ -382,7 +383,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidSignature, + verifyErr: avalancheWarp.ErrInvalidSignature, }, { name: "invalid signature (extra one)", @@ -427,7 +428,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidSignature, + verifyErr: avalancheWarp.ErrInvalidSignature, }, { name: "valid signature", @@ -471,7 +472,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, { name: "valid signature (boundary)", @@ -515,7 +516,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, { name: "valid signature (missing key)", @@ -576,7 +577,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, { name: "valid signature (duplicate key)", @@ -635,7 +636,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, } @@ -648,16 +649,24 @@ func TestSignatureVerification(t *testing.T) { msg := tt.msgF(require) pChainState := tt.stateF(ctrl) - err := msg.Signature.Verify( + validatorSet, err := avalancheWarp.GetCanonicalValidatorSetFromChainID( context.Background(), - &msg.UnsignedMessage, - networkID, pChainState, pChainHeight, + msg.UnsignedMessage.SourceChainID, + ) + require.ErrorIs(err, tt.canonicalErr) + if err != nil { + return + } + err = msg.Signature.Verify( + &msg.UnsignedMessage, + networkID, + validatorSet, tt.quorumNum, tt.quorumDen, ) - require.ErrorIs(err, tt.err) + require.ErrorIs(err, tt.verifyErr) }) } } diff --git a/rpc/handler.go b/rpc/handler.go index 427af29cd7..e589b0292a 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -38,8 +38,8 @@ import ( "sync" "time" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" "golang.org/x/time/rate" ) diff --git a/rpc/http.go b/rpc/http.go index b8670a9df8..8b18ed3895 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -250,7 +250,7 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos if _, err := buf.ReadFrom(resp.Body); err == nil { body = buf.Bytes() } - + resp.Body.Close() return nil, HTTPError{ Status: resp.Status, StatusCode: resp.StatusCode, diff --git a/rpc/metrics.go b/rpc/metrics.go index 889b48fcdc..a4469545d7 100644 --- a/rpc/metrics.go +++ b/rpc/metrics.go @@ -30,7 +30,7 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) var ( diff --git a/scripts/eth-allowed-packages.txt b/scripts/eth-allowed-packages.txt index 0f200f77f5..98fb94b34c 100644 --- a/scripts/eth-allowed-packages.txt +++ b/scripts/eth-allowed-packages.txt @@ -28,6 +28,7 @@ "github.com/ava-labs/libevm/libevm/legacy" "github.com/ava-labs/libevm/libevm/stateconf" "github.com/ava-labs/libevm/log" +"github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/params" "github.com/ava-labs/libevm/rlp" "github.com/ava-labs/libevm/trie" diff --git a/scripts/lint_allowed_eth_imports.sh b/scripts/lint_allowed_eth_imports.sh index fcbfe7c6f4..fa17b2e65f 100755 --- a/scripts/lint_allowed_eth_imports.sh +++ b/scripts/lint_allowed_eth_imports.sh @@ -11,7 +11,9 @@ set -o pipefail # 4. Print out the difference between the search results and the list of specified allowed package imports from libevm. libevm_regexp='"github.com/ava-labs/libevm/.*"' allow_named_imports='eth\w\+ "' -extra_imports=$(grep -r --include='*.go' --exclude=mocks.go "${libevm_regexp}" -h | grep -v "${allow_named_imports}" | grep -o "${libevm_regexp}" | sort -u | comm -23 - ./scripts/eth-allowed-packages.txt) +extra_imports=$(find . -type f \( -name "*.go" \) ! -path "./core/main_test.go" \ + -exec sh -c 'grep "$0" -h "$2" | grep -v "$1" | grep -o "$0"' "${libevm_regexp}" "${allow_named_imports}" {} \; | \ + sort -u | comm -23 - ./scripts/eth-allowed-packages.txt) if [ -n "${extra_imports}" ]; then echo "new ethereum imports should be added to ./scripts/eth-allowed-packages.txt to prevent accidental imports:" echo "${extra_imports}" diff --git a/scripts/versions.sh b/scripts/versions.sh index 5eed0c55ef..0df617e7e3 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'32f58b4f'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'daac8c8b'} diff --git a/sync/client/stats/stats.go b/sync/client/stats/stats.go index 14af154423..2be86a1d16 100644 --- a/sync/client/stats/stats.go +++ b/sync/client/stats/stats.go @@ -7,8 +7,8 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/libevm/metrics" ) var ( diff --git a/sync/handlers/stats/stats.go b/sync/handlers/stats/stats.go index 9dd04c4ea0..0d1171b82b 100644 --- a/sync/handlers/stats/stats.go +++ b/sync/handlers/stats/stats.go @@ -6,7 +6,7 @@ package stats import ( "time" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) // HandlerStats reports prometheus metrics for the state sync handlers diff --git a/sync/statesync/trie_sync_stats.go b/sync/statesync/trie_sync_stats.go index 3bf124d55d..b0e80041f1 100644 --- a/sync/statesync/trie_sync_stats.go +++ b/sync/statesync/trie_sync_stats.go @@ -10,9 +10,9 @@ import ( utils_math "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" ) const ( diff --git a/sync/statesync/trie_sync_stats_test.go b/sync/statesync/trie_sync_stats_test.go index c23cfec2a2..e2c1b0c340 100644 --- a/sync/statesync/trie_sync_stats_test.go +++ b/sync/statesync/trie_sync_stats_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" "github.com/stretchr/testify/require" ) diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go index 56beaa53ac..1a2e975074 100644 --- a/triedb/hashdb/database.go +++ b/triedb/hashdb/database.go @@ -35,11 +35,11 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/ethdb" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/rlp" "github.com/ava-labs/libevm/trie" "github.com/ava-labs/libevm/trie/trienode" diff --git a/triedb/pathdb/metrics.go b/triedb/pathdb/metrics.go index 8e3bb412d5..21038ecfc6 100644 --- a/triedb/pathdb/metrics.go +++ b/triedb/pathdb/metrics.go @@ -26,7 +26,7 @@ package pathdb -import "github.com/ava-labs/coreth/metrics" +import "github.com/ava-labs/libevm/metrics" // nolint: unused var ( diff --git a/utils/metered_cache.go b/utils/metered_cache.go index 148239dfc9..ee1807e36f 100644 --- a/utils/metered_cache.go +++ b/utils/metered_cache.go @@ -9,7 +9,7 @@ import ( "time" "github.com/VictoriaMetrics/fastcache" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) // MeteredCache wraps *fastcache.Cache and periodically pulls stats from it. diff --git a/utils/numbers.go b/utils/numbers.go index a6be2341fb..e9d4df0fa0 100644 --- a/utils/numbers.go +++ b/utils/numbers.go @@ -4,6 +4,7 @@ package utils import ( + "math/big" "time" ) @@ -27,3 +28,20 @@ func Uint64PtrEqual(x, y *uint64) bool { } return *x == *y } + +// BigEqual returns true if a is equal to b. If a and b are nil, it returns +// true. +func BigEqual(a, b *big.Int) bool { + if a == nil || b == nil { + return a == b + } + return a.Cmp(b) == 0 +} + +// BigEqualUint64 returns true if a is equal to b. If a is nil or not a uint64, +// it returns false. +func BigEqualUint64(a *big.Int, b uint64) bool { + return a != nil && + a.IsUint64() && + a.Uint64() == b +} diff --git a/utils/numbers_test.go b/utils/numbers_test.go new file mode 100644 index 0000000000..c664f6b2e5 --- /dev/null +++ b/utils/numbers_test.go @@ -0,0 +1,93 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBigEqual(t *testing.T) { + tests := []struct { + name string + a *big.Int + b *big.Int + want bool + }{ + { + name: "nil_nil", + a: nil, + b: nil, + want: true, + }, + { + name: "0_nil", + a: big.NewInt(0), + b: nil, + want: false, + }, + { + name: "0_1", + a: big.NewInt(0), + b: big.NewInt(1), + want: false, + }, + { + name: "1_1", + a: big.NewInt(1), + b: big.NewInt(1), + want: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert := assert.New(t) + + assert.Equal(test.want, BigEqual(test.a, test.b)) + assert.Equal(test.want, BigEqual(test.b, test.a)) + }) + } +} + +func TestBigEqualUint64(t *testing.T) { + tests := []struct { + name string + a *big.Int + b uint64 + want bool + }{ + { + name: "nil", + a: nil, + b: 0, + want: false, + }, + { + name: "not_uint64", + a: big.NewInt(-1), + b: 0, + want: false, + }, + { + name: "equal", + a: big.NewInt(1), + b: 1, + want: true, + }, + { + name: "not_equal", + a: big.NewInt(1), + b: 2, + want: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := BigEqualUint64(test.a, test.b) + assert.Equal(t, test.want, got) + }) + } +} diff --git a/utils/snow.go b/utils/snow.go index 36b9b7b7fb..51498ef141 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) @@ -25,7 +25,7 @@ var ( ) func TestSnowContext() *snow.Context { - sk, err := bls.NewSigner() + sk, err := localsigner.New() if err != nil { panic(err) } diff --git a/warp/aggregator/aggregator_test.go b/warp/aggregator/aggregator_test.go index 055d3edfa8..397e39553d 100644 --- a/warp/aggregator/aggregator_test.go +++ b/warp/aggregator/aggregator_test.go @@ -14,11 +14,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) func newValidator(t testing.TB, weight uint64) (bls.Signer, *avalancheWarp.Validator) { - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) pk := sk.PublicKey() return sk, &avalancheWarp.Validator{ @@ -51,7 +52,7 @@ func TestAggregateSignatures(t *testing.T) { vdr2: sig2, vdr3: sig3, } - nonVdrSk, err := bls.NewSigner() + nonVdrSk, err := localsigner.New() require.NoError(t, err) nonVdrSig := nonVdrSk.Sign(unsignedMsg.Bytes()) vdrs := []*avalancheWarp.Validator{ diff --git a/warp/backend_test.go b/warp/backend_test.go index cd7aa1ea76..9b3de02cd5 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/warp/warptest" @@ -41,7 +41,7 @@ func init() { func TestAddAndGetValidMessage(t *testing.T) { db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} @@ -64,7 +64,7 @@ func TestAddAndGetValidMessage(t *testing.T) { func TestAddAndGetUnknownMessage(t *testing.T) { db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} @@ -83,7 +83,7 @@ func TestGetBlockSignature(t *testing.T) { blockClient := warptest.MakeBlockClient(blkID) db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} @@ -108,7 +108,7 @@ func TestGetBlockSignature(t *testing.T) { func TestZeroSizedCache(t *testing.T) { db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) @@ -136,7 +136,7 @@ func TestOffChainMessages(t *testing.T) { check func(require *require.Assertions, b Backend) err error } - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 77a4af087e..5b30d7bc55 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -11,9 +11,11 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/warp" "github.com/ava-labs/coreth/warp/warptest" @@ -21,9 +23,11 @@ import ( ) func TestMessageSignatureHandler(t *testing.T) { + testutils.WithMetrics(t) + database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) @@ -125,9 +129,11 @@ func TestMessageSignatureHandler(t *testing.T) { } func TestBlockSignatureHandler(t *testing.T) { + testutils.WithMetrics(t) + database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) diff --git a/warp/handlers/stats.go b/warp/handlers/stats.go index ef3f31ae9a..6a56d54076 100644 --- a/warp/handlers/stats.go +++ b/warp/handlers/stats.go @@ -6,7 +6,7 @@ package handlers import ( "time" - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) type handlerStats struct { diff --git a/warp/service.go b/warp/service.go index 7e4f11c98f..6b16354a70 100644 --- a/warp/service.go +++ b/warp/service.go @@ -112,22 +112,22 @@ func (a *API) aggregateSignatures(ctx context.Context, unsignedMessage *warp.Uns } state := warpValidators.NewState(a.state, a.sourceSubnetID, a.sourceChainID, a.requirePrimaryNetworkSigners()) - validators, totalWeight, err := warp.GetCanonicalValidatorSet(ctx, state, pChainHeight, subnetID) + validatorSet, err := warp.GetCanonicalValidatorSetFromSubnetID(ctx, state, pChainHeight, subnetID) if err != nil { return nil, fmt.Errorf("failed to get validator set: %w", err) } - if len(validators) == 0 { + if len(validatorSet.Validators) == 0 { return nil, fmt.Errorf("%w (SubnetID: %s, Height: %d)", errNoValidators, subnetID, pChainHeight) } log.Debug("Fetching signature", "sourceSubnetID", subnetID, "height", pChainHeight, - "numValidators", len(validators), - "totalWeight", totalWeight, + "numValidators", len(validatorSet.Validators), + "totalWeight", validatorSet.TotalWeight, ) - agg := aggregator.New(aggregator.NewSignatureGetter(a.client), validators, totalWeight) + agg := aggregator.New(aggregator.NewSignatureGetter(a.client), validatorSet.Validators, validatorSet.TotalWeight) signatureResult, err := agg.AggregateSignatures(ctx, unsignedMessage, quorumNum) if err != nil { return nil, err diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index a58726aa0f..718fd78dd1 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -14,9 +14,10 @@ import ( "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/warp/warptest" "github.com/stretchr/testify/require" @@ -24,9 +25,11 @@ import ( ) func TestAddressedCallSignatures(t *testing.T) { + testutils.WithMetrics(t) + database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) @@ -138,9 +141,11 @@ func TestAddressedCallSignatures(t *testing.T) { } func TestBlockSignatures(t *testing.T) { + testutils.WithMetrics(t) + database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) diff --git a/warp/verifier_stats.go b/warp/verifier_stats.go index 980d464429..6a47c31397 100644 --- a/warp/verifier_stats.go +++ b/warp/verifier_stats.go @@ -4,7 +4,7 @@ package warp import ( - "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/libevm/metrics" ) type verifierStats struct {