Skip to content

Commit

Permalink
perf: add more metrics to perf
Browse files Browse the repository at this point in the history
  • Loading branch information
will@2012 committed Jan 16, 2024
1 parent 52645c2 commit cc119b9
Show file tree
Hide file tree
Showing 8 changed files with 90 additions and 0 deletions.
7 changes: 7 additions & 0 deletions core/state/snapshot/difflayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
bloomfilter "github.com/holiman/bloomfilter/v2"
)
Expand Down Expand Up @@ -77,6 +78,10 @@ var (
bloomStorageHasherOffset = 0
)

var (
perfGetSnapshotDiffLayerAccountTimer = metrics.NewRegisteredTimer("perf/get/snapshot/diff/layer/account/time", nil)
)

func init() {
// Init the bloom offsets in the range [0:24] (requires 8 bytes)
bloomDestructHasherOffset = rand.Intn(25)
Expand Down Expand Up @@ -273,6 +278,8 @@ func (dl *diffLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
start := time.Now()
defer perfGetSnapshotDiffLayerAccountTimer.UpdateSince(start)
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
Expand Down
8 changes: 8 additions & 0 deletions core/state/snapshot/disklayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,21 @@ package snapshot
import (
"bytes"
"sync"
"time"

"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)

var (
perfGetSnapshotDiskLayerAccountTimer = metrics.NewRegisteredTimer("perf/get/snapshot/disk/layer/account/time", nil)
)

// diskLayer is a low level persistent snapshot built on top of a key-value store.
type diskLayer struct {
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
Expand Down Expand Up @@ -66,6 +72,8 @@ func (dl *diskLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
start := time.Now()
defer perfGetSnapshotDiskLayerAccountTimer.UpdateSince(start)
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
Expand Down
15 changes: 15 additions & 0 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,13 @@ import (

const defaultNumOfSlots = 100

var (
perfGetStateObjectTimer = metrics.NewRegisteredTimer("perf/get/state/object/time", nil)
perfIntermediateRootTimer = metrics.NewRegisteredTimer("perf/intermediate/root/time", nil)
perfStateDBCommitTimer = metrics.NewRegisteredTimer("perf/state/db/commit/time", nil)
perfStateDBFinaliseTimer = metrics.NewRegisteredTimer("perf/state/db/finalise/time", nil)
)

type revision struct {
id int
journalIndex int
Expand Down Expand Up @@ -563,6 +570,8 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
// the object is not found or was deleted in this execution context. If you need
// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
func (s *StateDB) getStateObject(addr common.Address) *stateObject {
start := time.Now()
defer perfGetStateObjectTimer.UpdateSince(start)
if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
return obj
}
Expand Down Expand Up @@ -866,6 +875,8 @@ func (s *StateDB) GetRefund() uint64 {
// the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
start := time.Now()
defer perfStateDBFinaliseTimer.UpdateSince(start)
addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))
for addr := range s.journal.dirties {
obj, exist := s.stateObjects[addr]
Expand Down Expand Up @@ -915,6 +926,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
start := time.Now()
defer perfIntermediateRootTimer.UpdateSince(start)
// Finalise all the dirty storage states and write them into the tries
s.Finalise(deleteEmptyObjects)

Expand Down Expand Up @@ -992,6 +1005,8 @@ func (s *StateDB) clearJournalAndRefund() {

// Commit writes the state to the underlying in-memory trie database.
func (s *StateDB) Commit(deleteEmptyObjects bool, postCommitFuncs ...func() error) (common.Hash, error) {
start := time.Now()
defer perfStateDBCommitTimer.UpdateSince(start)
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
Expand Down
8 changes: 8 additions & 0 deletions core/state_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package core
import (
"fmt"
"math/big"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
Expand All @@ -27,9 +28,14 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
)

var (
perfProcessTxTime = metrics.NewRegisteredTimer("perf/process/tx/time", nil)
)

// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
Expand Down Expand Up @@ -77,6 +83,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
start := time.Now()
msg, err := TransactionToMessage(tx, types.MakeSigner(p.config, header.Number), header.BaseFee)
if err != nil {
return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err)
Expand All @@ -88,6 +95,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
}
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
perfProcessTxTime.UpdateSince(start)
}
// Fail if Shanghai not enabled and len(withdrawals) is non-zero.
withdrawals := block.Withdrawals()
Expand Down
13 changes: 13 additions & 0 deletions ethdb/leveldb/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,13 @@ const (
metricsGatheringInterval = 3 * time.Second
)

var (
// for perf performance
perfDBGetTimer = metrics.NewRegisteredTimer("perf/db/get/time", nil)
perfDBPutTimer = metrics.NewRegisteredTimer("perf/db/put/time", nil)
perfDBBatchWriteTimer = metrics.NewRegisteredTimer("perf/db/batch/write/time", nil)
)

// Database is a persistent key-value store. Apart from basic data storage
// functionality it also supports batch writes and iterating over the keyspace in
// binary-alphabetical order.
Expand Down Expand Up @@ -189,6 +196,8 @@ func (db *Database) Has(key []byte) (bool, error) {

// Get retrieves the given key if it's present in the key-value store.
func (db *Database) Get(key []byte) ([]byte, error) {
start := time.Now()
defer perfDBGetTimer.UpdateSince(start)
dat, err := db.db.Get(key, nil)
if err != nil {
return nil, err
Expand All @@ -198,6 +207,8 @@ func (db *Database) Get(key []byte) ([]byte, error) {

// Put inserts the given value into the key-value store.
func (db *Database) Put(key []byte, value []byte) error {
start := time.Now()
defer perfDBPutTimer.UpdateSince(start)
return db.db.Put(key, value, nil)
}

Expand Down Expand Up @@ -497,6 +508,8 @@ func (b *batch) ValueSize() int {

// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
start := time.Now()
defer perfDBBatchWriteTimer.UpdateSince(start)
return b.db.Write(b.b, nil)
}

Expand Down
13 changes: 13 additions & 0 deletions ethdb/pebble/pebble.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,13 @@ const (
metricsGatheringInterval = 3 * time.Second
)

var (
// for perf performance
perfDBGetTimer = metrics.NewRegisteredTimer("perf/db/get/time", nil)
perfDBPutTimer = metrics.NewRegisteredTimer("perf/db/put/time", nil)
perfDBBatchWriteTimer = metrics.NewRegisteredTimer("perf/db/batch/write/time", nil)
)

// Database is a persistent key-value store based on the pebble storage engine.
// Apart from basic data storage functionality it also supports batch writes and
// iterating over the keyspace in binary-alphabetical order.
Expand Down Expand Up @@ -247,6 +254,8 @@ func (d *Database) Has(key []byte) (bool, error) {

// Get retrieves the given key if it's present in the key-value store.
func (d *Database) Get(key []byte) ([]byte, error) {
start := time.Now()
defer perfDBGetTimer.UpdateSince(start)
dat, closer, err := d.db.Get(key)
if err != nil {
return nil, err
Expand All @@ -259,6 +268,8 @@ func (d *Database) Get(key []byte) ([]byte, error) {

// Put inserts the given value into the key-value store.
func (d *Database) Put(key []byte, value []byte) error {
start := time.Now()
defer perfDBPutTimer.UpdateSince(start)
return d.db.Set(key, value, pebble.NoSync)
}

Expand Down Expand Up @@ -502,6 +513,8 @@ func (b *batch) ValueSize() int {

// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
start := time.Now()
defer perfDBBatchWriteTimer.UpdateSince(start)
return b.b.Commit(pebble.NoSync)
}

Expand Down
8 changes: 8 additions & 0 deletions trie/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@ var (
memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
)

var (
perfTrieDBGetTimer = metrics.NewRegisteredTimer("perf/trie/db/get/time", nil)
)

// Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
Expand Down Expand Up @@ -344,6 +348,8 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
// node retrieves a cached trie node from memory, or returns nil if none can be
// found in the memory cache.
func (db *Database) node(hash common.Hash) node {
start := time.Now()
defer perfTrieDBGetTimer.UpdateSince(start)
// Retrieve the node from the clean cache if available
if db.cleans != nil {
if enc := db.cleans.Get(nil, hash[:]); enc != nil {
Expand Down Expand Up @@ -385,6 +391,8 @@ func (db *Database) node(hash common.Hash) node {
// Node retrieves an encoded cached trie node from memory. If it cannot be found
// cached, the method queries the persistent database for the content.
func (db *Database) Node(hash common.Hash) ([]byte, error) {
start := time.Now()
defer perfTrieDBGetTimer.UpdateSince(start)
// It doesn't make sense to retrieve the metaroot
if hash == (common.Hash{}) {
return nil, errors.New("not found")
Expand Down
18 changes: 18 additions & 0 deletions trie/trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,18 @@ import (
"bytes"
"errors"
"fmt"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)

var (
perfTrieGetTimer = metrics.NewRegisteredTimer("perf/trie/get/time", nil)
perfTrieReaderGetTimer = metrics.NewRegisteredTimer("perf/trie/reader/get/time", nil)
perfTrieReaderTotalTimer = metrics.NewRegisteredTimer("perf/trie/reader/total/time", nil)
)

// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
Expand Down Expand Up @@ -119,6 +127,8 @@ func (t *Trie) Get(key []byte) []byte {
// The value bytes must not be modified by the caller.
// If a node was not found in the database, a MissingNodeError is returned.
func (t *Trie) TryGet(key []byte) ([]byte, error) {
start := time.Now()
defer perfTrieGetTimer.UpdateSince(start)
value, newroot, didResolve, err := t.tryGet(t.root, keybytesToHex(key), 0)
if err == nil && didResolve {
t.root = newroot
Expand Down Expand Up @@ -151,7 +161,9 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
}
return value, n, didResolve, err
case hashNode:
start := time.Now()
child, err := t.resolveAndTrack(n, key[:pos])
perfTrieReaderGetTimer.UpdateSince(start)
if err != nil {
return nil, n, true, err
}
Expand All @@ -165,6 +177,8 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not
// possible to use keybyte-encoding as the path might contain odd nibbles.
func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) {
start := time.Now()
defer perfTrieGetTimer.UpdateSince(start)
item, newroot, resolved, err := t.tryGetNode(t.root, compactToHex(path), 0)
if err != nil {
return nil, resolved, err
Expand Down Expand Up @@ -227,7 +241,9 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
return item, n, resolved, err

case hashNode:
start := time.Now()
child, err := t.resolveAndTrack(n, path[:pos])
perfTrieReaderGetTimer.UpdateSince(start)
if err != nil {
return nil, n, 1, err
}
Expand Down Expand Up @@ -536,6 +552,8 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
// node's original value. The rlp-encoded blob is preferred to be loaded from
// database because it's easy to decode node while complex to encode node to blob.
func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
start := time.Now()
defer perfTrieReaderTotalTimer.UpdateSince(start)
blob, err := t.reader.nodeBlob(prefix, common.BytesToHash(n))
if err != nil {
return nil, err
Expand Down

0 comments on commit cc119b9

Please sign in to comment.