Skip to content

Commit baf53a4

Browse files
authored
reduces number of iterations when recovering Merkle shreds (#4474)
Changing the iterator type returned from shred::merkle::recover to Iterator<Item = Result<Shred, Error>> instead of Iterator<Item = Shred> will save us one traversal over the vector of shreds.
1 parent 3904356 commit baf53a4

File tree

2 files changed

+23
-13
lines changed

2 files changed

+23
-13
lines changed

ledger/src/shred.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1140,15 +1140,15 @@ pub(crate) fn recover(
11401140
// The same signature also verifies for recovered shreds because
11411141
// when reconstructing the Merkle tree for the erasure batch, we
11421142
// will obtain the same Merkle root.
1143-
Ok(merkle::recover(shreds, reed_solomon_cache)?
1143+
merkle::recover(shreds, reed_solomon_cache)?
11441144
.map(|shred| {
1145-
let shred = Shred::from(shred);
1145+
let shred = Shred::from(shred?);
11461146
debug_assert!(get_slot_leader(shred.slot())
11471147
.map(|pubkey| shred.verify(&pubkey))
11481148
.unwrap_or_default());
1149-
shred
1149+
Ok(shred)
11501150
})
1151-
.collect())
1151+
.collect()
11521152
}
11531153
}
11541154
}

ledger/src/shred/merkle.rs

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -734,7 +734,7 @@ fn make_merkle_proof(
734734
pub(super) fn recover(
735735
mut shreds: Vec<Shred>,
736736
reed_solomon_cache: &ReedSolomonCache,
737-
) -> Result<impl Iterator<Item = Shred>, Error> {
737+
) -> Result<impl Iterator<Item = Result<Shred, Error>>, Error> {
738738
// Sort shreds by their erasure shard index.
739739
// In particular this places all data shreds before coding shreds.
740740
let is_sorted = |(a, b)| cmp_shred_erasure_shard_index(a, b).is_le();
@@ -904,28 +904,31 @@ pub(super) fn recover(
904904
if tree.last() != Some(&merkle_root) {
905905
return Err(Error::InvalidMerkleRoot);
906906
}
907-
for (index, (shred, mask)) in shreds.iter_mut().zip(&mask).enumerate() {
907+
let set_merkle_proof = move |(index, (mut shred, mask)): (_, (Shred, _))| {
908908
let proof = make_merkle_proof(index, num_shards, &tree);
909-
if *mask {
909+
if mask {
910910
if shred.merkle_proof()?.map(Some).ne(proof.map(Result::ok)) {
911911
return Err(Error::InvalidMerkleProof);
912912
}
913+
Ok(None)
913914
} else {
914915
shred.set_merkle_proof(proof)?;
915916
// Already sanitized after reconstruct.
916917
debug_assert_matches!(shred.sanitize(), Ok(()));
917918
// Assert that shred payload is fully populated.
918919
debug_assert_eq!(shred, {
919920
let shred = shred.payload().clone();
920-
&Shred::from_payload(shred).unwrap()
921+
Shred::from_payload(shred).unwrap()
921922
});
923+
Ok(Some(shred))
922924
}
923-
}
925+
};
924926
Ok(shreds
925927
.into_iter()
926928
.zip(mask)
927-
.filter(|(_, mask)| !mask)
928-
.map(|(shred, _)| shred))
929+
.enumerate()
930+
.map(set_merkle_proof)
931+
.filter_map(Result::transpose))
929932
}
930933

931934
// Compares shreds of the same erasure batch by their erasure shard index
@@ -1698,7 +1701,10 @@ mod test {
16981701
);
16991702
continue;
17001703
}
1701-
let recovered_shreds: Vec<_> = recover(shreds, reed_solomon_cache).unwrap().collect();
1704+
let recovered_shreds: Vec<_> = recover(shreds, reed_solomon_cache)
1705+
.unwrap()
1706+
.map(Result::unwrap)
1707+
.collect();
17021708
assert_eq!(size + recovered_shreds.len(), num_shreds);
17031709
assert_eq!(recovered_shreds.len(), removed_shreds.len());
17041710
removed_shreds.sort_by(|a, b| {
@@ -2018,7 +2024,11 @@ mod test {
20182024
})
20192025
.group_by(|shred| shred.common_header().fec_set_index)
20202026
.into_iter()
2021-
.flat_map(|(_, shreds)| recover(shreds.collect(), reed_solomon_cache).unwrap())
2027+
.flat_map(|(_, shreds)| {
2028+
recover(shreds.collect(), reed_solomon_cache)
2029+
.unwrap()
2030+
.map(Result::unwrap)
2031+
})
20222032
.collect();
20232033
assert_eq!(recovered_data_shreds.len(), data_shreds.len());
20242034
for (shred, other) in recovered_data_shreds.into_iter().zip(data_shreds) {

0 commit comments

Comments
 (0)