@@ -30,6 +30,7 @@ import (
30
30
"github.com/ethereum/go-ethereum/core/types"
31
31
"github.com/ethereum/go-ethereum/rlp"
32
32
bloomfilter "github.com/holiman/bloomfilter/v2"
33
+ "golang.org/x/exp/maps"
33
34
)
34
35
35
36
var (
@@ -73,23 +74,14 @@ var (
73
74
// bloom key for an account/slot. This is randomized at init(), so that the
74
75
// global population of nodes do not all display the exact same behaviour with
75
76
// regards to bloom content
76
- bloomDestructHasherOffset = 0
77
- bloomAccountHasherOffset = 0
78
- bloomStorageHasherOffset = 0
77
+ bloomAccountHasherOffset = 0
78
+ bloomStorageHasherOffset = 0
79
79
)
80
80
81
81
func init () {
82
82
// Init the bloom offsets in the range [0:24] (requires 8 bytes)
83
- bloomDestructHasherOffset = rand .Intn (25 )
84
83
bloomAccountHasherOffset = rand .Intn (25 )
85
84
bloomStorageHasherOffset = rand .Intn (25 )
86
-
87
- // The destruct and account blooms must be different, as the storage slots
88
- // will check for destruction too for every bloom miss. It should not collide
89
- // with modified accounts.
90
- for bloomAccountHasherOffset == bloomDestructHasherOffset {
91
- bloomAccountHasherOffset = rand .Intn (25 )
92
- }
93
85
}
94
86
95
87
// diffLayer represents a collection of modifications made to a state snapshot
@@ -106,29 +98,16 @@ type diffLayer struct {
106
98
root common.Hash // Root hash to which this snapshot diff belongs to
107
99
stale atomic.Bool // Signals that the layer became stale (state progressed)
108
100
109
- // destructSet is a very special helper marker. If an account is marked as
110
- // deleted, then it's recorded in this set. However it's allowed that an account
111
- // is included here but still available in other sets(e.g. storageData). The
112
- // reason is the diff layer includes all the changes in a *block*. It can
113
- // happen that in the tx_1, account A is self-destructed while in the tx_2
114
- // it's recreated. But we still need this marker to indicate the "old" A is
115
- // deleted, all data in other set belongs to the "new" A.
116
- destructSet map [common.Hash ]struct {} // Keyed markers for deleted (and potentially) recreated accounts
117
- accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
118
101
accountData map [common.Hash ][]byte // Keyed accounts for direct retrieval (nil means deleted)
119
- storageList map [common.Hash ][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
120
102
storageData map [common.Hash ]map [common.Hash ][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
103
+ accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
104
+ storageList map [common.Hash ][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
121
105
122
106
diffed * bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
123
107
124
108
lock sync.RWMutex
125
109
}
126
110
127
- // destructBloomHash is used to convert a destruct event into a 64 bit mini hash.
128
- func destructBloomHash (h common.Hash ) uint64 {
129
- return binary .BigEndian .Uint64 (h [bloomDestructHasherOffset : bloomDestructHasherOffset + 8 ])
130
- }
131
-
132
111
// accountBloomHash is used to convert an account hash into a 64 bit mini hash.
133
112
func accountBloomHash (h common.Hash ) uint64 {
134
113
return binary .BigEndian .Uint64 (h [bloomAccountHasherOffset : bloomAccountHasherOffset + 8 ])
@@ -142,12 +121,11 @@ func storageBloomHash(h0, h1 common.Hash) uint64 {
142
121
143
122
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
144
123
// level persistent database or a hierarchical diff already.
145
- func newDiffLayer (parent snapshot , root common.Hash , destructs map [common. Hash ] struct {}, accounts map [common.Hash ][]byte , storage map [common.Hash ]map [common.Hash ][]byte ) * diffLayer {
124
+ func newDiffLayer (parent snapshot , root common.Hash , accounts map [common.Hash ][]byte , storage map [common.Hash ]map [common.Hash ][]byte ) * diffLayer {
146
125
// Create the new layer with some pre-allocated data segments
147
126
dl := & diffLayer {
148
127
parent : parent ,
149
128
root : root ,
150
- destructSet : destructs ,
151
129
accountData : accounts ,
152
130
storageData : storage ,
153
131
storageList : make (map [common.Hash ][]common.Hash ),
@@ -161,10 +139,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
161
139
panic ("unknown parent type" )
162
140
}
163
141
// Sanity check that accounts or storage slots are never nil
164
- for accountHash , blob := range accounts {
165
- if blob == nil {
166
- panic (fmt .Sprintf ("account %#x nil" , accountHash ))
167
- }
142
+ for _ , blob := range accounts {
168
143
// Determine memory size and track the dirty writes
169
144
dl .memory += uint64 (common .HashLength + len (blob ))
170
145
snapshotDirtyAccountWriteMeter .Mark (int64 (len (blob )))
@@ -179,7 +154,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
179
154
snapshotDirtyStorageWriteMeter .Mark (int64 (len (data )))
180
155
}
181
156
}
182
- dl .memory += uint64 (len (destructs ) * common .HashLength )
183
157
return dl
184
158
}
185
159
@@ -204,10 +178,6 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
204
178
} else {
205
179
dl .diffed , _ = bloomfilter .New (uint64 (bloomSize ), uint64 (bloomFuncs ))
206
180
}
207
- // Iterate over all the accounts and storage slots and index them
208
- for hash := range dl .destructSet {
209
- dl .diffed .AddHash (destructBloomHash (hash ))
210
- }
211
181
for hash := range dl .accountData {
212
182
dl .diffed .AddHash (accountBloomHash (hash ))
213
183
}
@@ -274,11 +244,8 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
274
244
}
275
245
// Check the bloom filter first whether there's even a point in reaching into
276
246
// all the maps in all the layers below
277
- hit := dl .diffed .ContainsHash (accountBloomHash (hash ))
278
- if ! hit {
279
- hit = dl .diffed .ContainsHash (destructBloomHash (hash ))
280
- }
281
247
var origin * diskLayer
248
+ hit := dl .diffed .ContainsHash (accountBloomHash (hash ))
282
249
if ! hit {
283
250
origin = dl .origin // extract origin while holding the lock
284
251
}
@@ -310,18 +277,14 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
310
277
if data , ok := dl .accountData [hash ]; ok {
311
278
snapshotDirtyAccountHitMeter .Mark (1 )
312
279
snapshotDirtyAccountHitDepthHist .Update (int64 (depth ))
313
- snapshotDirtyAccountReadMeter .Mark (int64 (len (data )))
280
+ if n := len (data ); n > 0 {
281
+ snapshotDirtyAccountReadMeter .Mark (int64 (n ))
282
+ } else {
283
+ snapshotDirtyAccountInexMeter .Mark (1 )
284
+ }
314
285
snapshotBloomAccountTrueHitMeter .Mark (1 )
315
286
return data , nil
316
287
}
317
- // If the account is known locally, but deleted, return it
318
- if _ , ok := dl .destructSet [hash ]; ok {
319
- snapshotDirtyAccountHitMeter .Mark (1 )
320
- snapshotDirtyAccountHitDepthHist .Update (int64 (depth ))
321
- snapshotDirtyAccountInexMeter .Mark (1 )
322
- snapshotBloomAccountTrueHitMeter .Mark (1 )
323
- return nil , nil
324
- }
325
288
// Account unknown to this diff, resolve from parent
326
289
if diff , ok := dl .parent .(* diffLayer ); ok {
327
290
return diff .accountRLP (hash , depth + 1 )
@@ -345,11 +308,8 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
345
308
dl .lock .RUnlock ()
346
309
return nil , ErrSnapshotStale
347
310
}
348
- hit := dl .diffed .ContainsHash (storageBloomHash (accountHash , storageHash ))
349
- if ! hit {
350
- hit = dl .diffed .ContainsHash (destructBloomHash (accountHash ))
351
- }
352
311
var origin * diskLayer
312
+ hit := dl .diffed .ContainsHash (storageBloomHash (accountHash , storageHash ))
353
313
if ! hit {
354
314
origin = dl .origin // extract origin while holding the lock
355
315
}
@@ -391,14 +351,6 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
391
351
return data , nil
392
352
}
393
353
}
394
- // If the account is known locally, but deleted, return an empty slot
395
- if _ , ok := dl .destructSet [accountHash ]; ok {
396
- snapshotDirtyStorageHitMeter .Mark (1 )
397
- snapshotDirtyStorageHitDepthHist .Update (int64 (depth ))
398
- snapshotDirtyStorageInexMeter .Mark (1 )
399
- snapshotBloomStorageTrueHitMeter .Mark (1 )
400
- return nil , nil
401
- }
402
354
// Storage slot unknown to this diff, resolve from parent
403
355
if diff , ok := dl .parent .(* diffLayer ); ok {
404
356
return diff .storage (accountHash , storageHash , depth + 1 )
@@ -410,8 +362,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
410
362
411
363
// Update creates a new layer on top of the existing snapshot diff tree with
412
364
// the specified data items.
413
- func (dl * diffLayer ) Update (blockRoot common.Hash , destructs map [common. Hash ] struct {}, accounts map [common.Hash ][]byte , storage map [common.Hash ]map [common.Hash ][]byte ) * diffLayer {
414
- return newDiffLayer (dl , blockRoot , destructs , accounts , storage )
365
+ func (dl * diffLayer ) Update (blockRoot common.Hash , accounts map [common.Hash ][]byte , storage map [common.Hash ]map [common.Hash ][]byte ) * diffLayer {
366
+ return newDiffLayer (dl , blockRoot , accounts , storage )
415
367
}
416
368
417
369
// flatten pushes all data from this point downwards, flattening everything into
@@ -436,12 +388,6 @@ func (dl *diffLayer) flatten() snapshot {
436
388
if parent .stale .Swap (true ) {
437
389
panic ("parent diff layer is stale" ) // we've flattened into the same parent from two children, boo
438
390
}
439
- // Overwrite all the updated accounts blindly, merge the sorted list
440
- for hash := range dl .destructSet {
441
- parent .destructSet [hash ] = struct {}{}
442
- delete (parent .accountData , hash )
443
- delete (parent .storageData , hash )
444
- }
445
391
for hash , data := range dl .accountData {
446
392
parent .accountData [hash ] = data
447
393
}
@@ -453,17 +399,13 @@ func (dl *diffLayer) flatten() snapshot {
453
399
continue
454
400
}
455
401
// Storage exists in both parent and child, merge the slots
456
- comboData := parent .storageData [accountHash ]
457
- for storageHash , data := range storage {
458
- comboData [storageHash ] = data
459
- }
402
+ maps .Copy (parent .storageData [accountHash ], storage )
460
403
}
461
404
// Return the combo parent
462
405
return & diffLayer {
463
406
parent : parent .parent ,
464
407
origin : parent .origin ,
465
408
root : dl .root ,
466
- destructSet : parent .destructSet ,
467
409
accountData : parent .accountData ,
468
410
storageData : parent .storageData ,
469
411
storageList : make (map [common.Hash ][]common.Hash ),
@@ -489,15 +431,7 @@ func (dl *diffLayer) AccountList() []common.Hash {
489
431
dl .lock .Lock ()
490
432
defer dl .lock .Unlock ()
491
433
492
- dl .accountList = make ([]common.Hash , 0 , len (dl .destructSet )+ len (dl .accountData ))
493
- for hash := range dl .accountData {
494
- dl .accountList = append (dl .accountList , hash )
495
- }
496
- for hash := range dl .destructSet {
497
- if _ , ok := dl .accountData [hash ]; ! ok {
498
- dl .accountList = append (dl .accountList , hash )
499
- }
500
- }
434
+ dl .accountList = maps .Keys (dl .accountData )
501
435
slices .SortFunc (dl .accountList , common .Hash .Cmp )
502
436
dl .memory += uint64 (len (dl .accountList ) * common .HashLength )
503
437
return dl .accountList
@@ -512,32 +446,27 @@ func (dl *diffLayer) AccountList() []common.Hash {
512
446
// not empty but the flag is true.
513
447
//
514
448
// Note, the returned slice is not a copy, so do not modify it.
515
- func (dl * diffLayer ) StorageList (accountHash common.Hash ) ( []common.Hash , bool ) {
449
+ func (dl * diffLayer ) StorageList (accountHash common.Hash ) []common.Hash {
516
450
dl .lock .RLock ()
517
- _ , destructed := dl .destructSet [accountHash ]
518
451
if _ , ok := dl .storageData [accountHash ]; ! ok {
519
452
// Account not tracked by this layer
520
453
dl .lock .RUnlock ()
521
- return nil , destructed
454
+ return nil
522
455
}
523
456
// If an old list already exists, return it
524
457
if list , exist := dl .storageList [accountHash ]; exist {
525
458
dl .lock .RUnlock ()
526
- return list , destructed // the cached list can't be nil
459
+ return list // the cached list can't be nil
527
460
}
528
461
dl .lock .RUnlock ()
529
462
530
463
// No old sorted account list exists, generate a new one
531
464
dl .lock .Lock ()
532
465
defer dl .lock .Unlock ()
533
466
534
- storageMap := dl .storageData [accountHash ]
535
- storageList := make ([]common.Hash , 0 , len (storageMap ))
536
- for k := range storageMap {
537
- storageList = append (storageList , k )
538
- }
467
+ storageList := maps .Keys (dl .storageData [accountHash ])
539
468
slices .SortFunc (storageList , common .Hash .Cmp )
540
469
dl .storageList [accountHash ] = storageList
541
470
dl .memory += uint64 (len (dl .storageList )* common .HashLength + common .HashLength )
542
- return storageList , destructed
471
+ return storageList
543
472
}
0 commit comments