Skip to content

Commit

Permalink
Merge pull request #5395 from onflow/leo/add-checkpoint-size-metrics
Browse files Browse the repository at this point in the history
Add checkpoint size metrics
  • Loading branch information
zhangchiqing authored Feb 20, 2024
2 parents 2ff69ae + 88adf05 commit a260653
Show file tree
Hide file tree
Showing 24 changed files with 110 additions and 29 deletions.
5 changes: 5 additions & 0 deletions admin/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,11 @@ curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"
curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "stop-at-height", "data": { "height": 1111, "crash": false }}'
```

### Trigger checkpoint creation on execution
```
curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "trigger-checkpoint"}'
```

### Add/Remove/Get address to rate limit a payer from adding transactions to collection nodes' mempool
```
curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "ingest-tx-rate-limit", "data": { "command": "add", "addresses": "a08d349e8037d6e5,e6765c6113547fb7" }}'
Expand Down
2 changes: 1 addition & 1 deletion cmd/bootstrap/run/execution_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func GenerateExecutionState(
return flow.DummyStateCommitment, err
}

compactor, err := complete.NewCompactor(ledgerStorage, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(ledgerStorage, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector)
if err != nil {
return flow.DummyStateCommitment, err
}
Expand Down
1 change: 1 addition & 0 deletions cmd/execution_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -881,6 +881,7 @@ func (exeNode *ExecutionNode) LoadExecutionStateLedgerWALCompactor(
exeNode.exeConf.checkpointDistance,
exeNode.exeConf.checkpointsToKeep,
exeNode.toTriggerCheckpoint, // compactor will listen to the signal from admin tool for force triggering checkpointing
exeNode.collector,
)
}

Expand Down
2 changes: 1 addition & 1 deletion cmd/util/cmd/checkpoint-collect-stats/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func run(*cobra.Command, []string) {
if err != nil {
log.Fatal().Err(err).Msg("cannot create ledger from write-a-head logs and checkpoints")
}
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, math.MaxInt, 1, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, math.MaxInt, 1, atomic.NewBool(false), &metrics.NoopCollector{})
if err != nil {
log.Fatal().Err(err).Msg("cannot create compactor")
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/util/cmd/exec-data-json-export/ledger_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func ExportLedger(ledgerPath string, targetstate string, outputPath string) erro
return fmt.Errorf("cannot create ledger from write-a-head logs and checkpoints: %w", err)
}

compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{})
if err != nil {
return fmt.Errorf("cannot create compactor: %w", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func extractExecutionState(

log.Info().Msg("init compactor")

compactor, err := complete.NewCompactor(led, diskWal, log, complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, log, complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{})
if err != nil {
return fmt.Errorf("cannot create compactor: %w", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func TestExtractExecutionState(t *testing.T) {
require.NoError(t, err)
f, err := complete.NewLedger(diskWal, size*10, metr, zerolog.Nop(), complete.DefaultPathFinderVersion)
require.NoError(t, err)
compactor, err := complete.NewCompactor(f, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(f, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{})
require.NoError(t, err)
<-compactor.Ready()

Expand Down Expand Up @@ -166,7 +166,7 @@ func TestExtractExecutionState(t *testing.T) {
checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
checkpointsToKeep = 1
)
compactor, err := complete.NewCompactor(storage, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(storage, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{})
require.NoError(t, err)

<-compactor.Ready()
Expand Down
2 changes: 0 additions & 2 deletions engine/execution/ingestion/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -499,8 +499,6 @@ func (e *Engine) onBlockExecuted(
e.metrics.ExecutionStorageStateCommitment(int64(len(finalState)))
e.metrics.ExecutionLastExecutedBlockHeight(executed.Block.Header.Height)

// e.checkStateSyncStop(executed.Block.Header.Height)

missingCollections := make(map[*entity.ExecutableBlock][]*flow.CollectionGuarantee)
err := e.mempool.Run(
func(
Expand Down
2 changes: 1 addition & 1 deletion engine/testutil/nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ide
ls, err := completeLedger.NewLedger(diskWal, capacity, metricsCollector, node.Log.With().Str("compontent", "ledger").Logger(), completeLedger.DefaultPathFinderVersion)
require.NoError(t, err)

compactor, err := completeLedger.NewCompactor(ls, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := completeLedger.NewCompactor(ls, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector)
require.NoError(t, err)

<-compactor.Ready() // Need to start compactor here because BootstrapLedger() updates ledger state.
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ require (
require (
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593
github.com/coreos/go-semver v0.3.0
github.com/docker/go-units v0.5.0
github.com/go-playground/validator/v10 v10.14.1
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/gorilla/websocket v1.5.0
Expand Down Expand Up @@ -160,7 +161,6 @@ require (
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
github.com/dgraph-io/ristretto v0.1.0 // indirect
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
Expand Down
15 changes: 13 additions & 2 deletions ledger/complete/compactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/onflow/flow-go/ledger"
"github.com/onflow/flow-go/ledger/complete/mtrie/trie"
realWAL "github.com/onflow/flow-go/ledger/complete/wal"
"github.com/onflow/flow-go/module"
"github.com/onflow/flow-go/module/lifecycle"
"github.com/onflow/flow-go/module/observable"
)
Expand Down Expand Up @@ -57,6 +58,7 @@ type Compactor struct {
stopCh chan chan struct{}
trieUpdateCh <-chan *WALTrieUpdate
triggerCheckpointOnNextSegmentFinish *atomic.Bool // to trigger checkpoint manually
metrics module.WALMetrics
}

// NewCompactor creates new Compactor which writes WAL record and triggers
Expand All @@ -76,6 +78,7 @@ func NewCompactor(
checkpointDistance uint,
checkpointsToKeep uint,
triggerCheckpointOnNextSegmentFinish *atomic.Bool,
metrics module.WALMetrics,
) (*Compactor, error) {
if checkpointDistance < 1 {
checkpointDistance = 1
Expand Down Expand Up @@ -114,6 +117,7 @@ func NewCompactor(
checkpointDistance: checkpointDistance,
checkpointsToKeep: checkpointsToKeep,
triggerCheckpointOnNextSegmentFinish: triggerCheckpointOnNextSegmentFinish,
metrics: metrics,
}, nil
}

Expand Down Expand Up @@ -288,7 +292,7 @@ Loop:
// Since this function is only for checkpointing, Compactor isn't affected by returned error.
func (c *Compactor) checkpoint(ctx context.Context, tries []*trie.MTrie, checkpointNum int) error {

err := createCheckpoint(c.checkpointer, c.logger, tries, checkpointNum)
err := createCheckpoint(c.checkpointer, c.logger, tries, checkpointNum, c.metrics)
if err != nil {
return &createCheckpointError{num: checkpointNum, err: err}
}
Expand Down Expand Up @@ -325,7 +329,7 @@ func (c *Compactor) checkpoint(ctx context.Context, tries []*trie.MTrie, checkpo
// createCheckpoint creates checkpoint with given checkpointNum and tries.
// Errors indicate that checkpoint file can't be created.
// Caller should handle returned errors by retrying checkpointing when appropriate.
func createCheckpoint(checkpointer *realWAL.Checkpointer, logger zerolog.Logger, tries []*trie.MTrie, checkpointNum int) error {
func createCheckpoint(checkpointer *realWAL.Checkpointer, logger zerolog.Logger, tries []*trie.MTrie, checkpointNum int, metrics module.WALMetrics) error {

logger.Info().Msgf("serializing checkpoint %d with %v tries", checkpointNum, len(tries))

Expand All @@ -337,6 +341,13 @@ func createCheckpoint(checkpointer *realWAL.Checkpointer, logger zerolog.Logger,
return fmt.Errorf("error serializing checkpoint (%d): %w", checkpointNum, err)
}

size, err := realWAL.ReadCheckpointFileSize(checkpointer.Dir(), fileName)
if err != nil {
return fmt.Errorf("error reading checkpoint file size (%d): %w", checkpointNum, err)
}

metrics.ExecutionCheckpointSize(size)

duration := time.Since(startTime)
logger.Info().Float64("total_time_s", duration.Seconds()).Msgf("created checkpoint %d", checkpointNum)

Expand Down
10 changes: 5 additions & 5 deletions ledger/complete/compactor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func TestCompactorCreation(t *testing.T) {
// WAL segments are 32kB, so here we generate 2 keys 64kB each, times `size`
// so we should get at least `size` segments

compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)

co := CompactorObserver{fromBound: 8, done: make(chan struct{})}
Expand Down Expand Up @@ -316,7 +316,7 @@ func TestCompactorSkipCheckpointing(t *testing.T) {
// WAL segments are 32kB, so here we generate 2 keys 64kB each, times `size`
// so we should get at least `size` segments

compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)

co := CompactorObserver{fromBound: 8, done: make(chan struct{})}
Expand Down Expand Up @@ -442,7 +442,7 @@ func TestCompactorAccuracy(t *testing.T) {
l, err := NewLedger(wal, forestCapacity, metricsCollector, zerolog.Logger{}, DefaultPathFinderVersion)
require.NoError(t, err)

compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)

fromBound := lastCheckpointNum + (size / 2)
Expand Down Expand Up @@ -552,7 +552,7 @@ func TestCompactorTriggeredByAdminTool(t *testing.T) {
l, err := NewLedger(wal, forestCapacity, metricsCollector, unittest.LoggerWithName("ledger"), DefaultPathFinderVersion)
require.NoError(t, err)

compactor, err := NewCompactor(l, wal, unittest.LoggerWithName("compactor"), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(true))
compactor, err := NewCompactor(l, wal, unittest.LoggerWithName("compactor"), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(true), metrics.NewNoopCollector())
require.NoError(t, err)

fmt.Println("should stop as soon as segment 5 is generated, which should trigger checkpoint 5 to be created")
Expand Down Expand Up @@ -656,7 +656,7 @@ func TestCompactorConcurrency(t *testing.T) {
l, err := NewLedger(wal, forestCapacity, metricsCollector, zerolog.Logger{}, DefaultPathFinderVersion)
require.NoError(t, err)

compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)

fromBound := lastCheckpointNum + (size / 2 * numGoroutine)
Expand Down
10 changes: 5 additions & 5 deletions ledger/complete/ledger_benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ func benchmarkStorage(steps int, b *testing.B) {
led, err := complete.NewLedger(diskWal, steps+1, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
require.NoError(b, err)

compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(steps+1), checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(steps+1), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(b, err)

<-compactor.Ready()
Expand Down Expand Up @@ -160,7 +160,7 @@ func BenchmarkTrieUpdate(b *testing.B) {
led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
require.NoError(b, err)

compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(b, err)

<-compactor.Ready()
Expand Down Expand Up @@ -212,7 +212,7 @@ func BenchmarkTrieRead(b *testing.B) {
led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
require.NoError(b, err)

compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(b, err)

<-compactor.Ready()
Expand Down Expand Up @@ -273,7 +273,7 @@ func BenchmarkLedgerGetOneValue(b *testing.B) {
led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
require.NoError(b, err)

compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(b, err)

<-compactor.Ready()
Expand Down Expand Up @@ -351,7 +351,7 @@ func BenchmarkTrieProve(b *testing.B) {
led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
require.NoError(b, err)

compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(b, err)

<-compactor.Ready()
Expand Down
8 changes: 4 additions & 4 deletions ledger/complete/ledger_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ func Test_WAL(t *testing.T) {
led, err := complete.NewLedger(diskWal, size, metricsCollector, logger, complete.DefaultPathFinderVersion)
require.NoError(t, err)

compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), size, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), size, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)

<-compactor.Ready()
Expand Down Expand Up @@ -551,7 +551,7 @@ func Test_WAL(t *testing.T) {
led2, err := complete.NewLedger(diskWal2, size+10, metricsCollector, logger, complete.DefaultPathFinderVersion)
require.NoError(t, err)

compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)

<-compactor2.Ready()
Expand Down Expand Up @@ -613,7 +613,7 @@ func TestLedgerFunctionality(t *testing.T) {
require.NoError(t, err)
led, err := complete.NewLedger(diskWal, activeTries, metricsCollector, logger, complete.DefaultPathFinderVersion)
assert.NoError(t, err)
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(activeTries), checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(activeTries), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)
<-compactor.Ready()

Expand Down Expand Up @@ -730,7 +730,7 @@ func TestWALUpdateFailuresBubbleUp(t *testing.T) {
led, err := complete.NewLedger(w, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
require.NoError(t, err)

compactor, err := complete.NewCompactor(led, w, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
compactor, err := complete.NewCompactor(led, w, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector())
require.NoError(t, err)

<-compactor.Ready()
Expand Down
2 changes: 1 addition & 1 deletion ledger/complete/mtrie/trie/trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func (mt *MTrie) AllocatedRegCount() uint64 {
return mt.regCount
}

// AllocatedRegSize returns the size of allocated registers in the trie.
// AllocatedRegSize returns the size (number of bytes) of allocated registers in the trie.
// Concurrency safe (as Tries are immutable structures by convention)
func (mt *MTrie) AllocatedRegSize() uint64 {
return mt.regSize
Expand Down
28 changes: 28 additions & 0 deletions ledger/complete/wal/checkpoint_v6_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,34 @@ func OpenAndReadCheckpointV6(dir string, fileName string, logger zerolog.Logger)
return triesToReturn, errToReturn
}

// ReadCheckpointFileSize returns the total size of the checkpoint file
func ReadCheckpointFileSize(dir string, fileName string) (uint64, error) {
paths := allFilePaths(dir, fileName)
totalSize := uint64(0)
for _, path := range paths {
fileInfo, err := os.Stat(path)
if err != nil {
return 0, fmt.Errorf("could not get file info for %v: %w", path, err)
}

totalSize += uint64(fileInfo.Size())
}

return totalSize, nil
}

func allFilePaths(dir string, fileName string) []string {
paths := make([]string, 0, 1+subtrieCount+1)
paths = append(paths, filePathCheckpointHeader(dir, fileName))
for i := 0; i < subtrieCount; i++ {
subTriePath, _, _ := filePathSubTries(dir, fileName, i)
paths = append(paths, subTriePath)
}
topTriePath, _ := filePathTopTries(dir, fileName)
paths = append(paths, topTriePath)
return paths
}

func filePathCheckpointHeader(dir string, fileName string) string {
return path.Join(dir, fileName)
}
Expand Down
3 changes: 3 additions & 0 deletions ledger/complete/wal/checkpoint_v6_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"path"
"path/filepath"

"github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
"github.com/rs/zerolog"

Expand Down Expand Up @@ -79,8 +80,10 @@ func storeCheckpointV6(
lg.Info().
Str("first_hash", first.RootHash().String()).
Uint64("first_reg_count", first.AllocatedRegCount()).
Str("first_reg_size", units.BytesSize(float64(first.AllocatedRegSize()))).
Str("last_hash", last.RootHash().String()).
Uint64("last_reg_count", last.AllocatedRegCount()).
Str("last_reg_size", units.BytesSize(float64(last.AllocatedRegSize()))).
Msg("storing checkpoint")

// make sure a checkpoint file with same name doesn't exist
Expand Down
Loading

0 comments on commit a260653

Please sign in to comment.