|
| 1 | +package run |
| 2 | + |
| 3 | +import ( |
| 4 | + "testing" |
| 5 | + |
| 6 | + "github.com/onflow/cadence" |
| 7 | + "github.com/stretchr/testify/assert" |
| 8 | + "github.com/stretchr/testify/require" |
| 9 | + "pgregory.net/rapid" |
| 10 | + |
| 11 | + "github.com/onflow/flow-go/cmd/bootstrap/utils" |
| 12 | + "github.com/onflow/flow-go/cmd/util/cmd/common" |
| 13 | + "github.com/onflow/flow-go/model/flow" |
| 14 | + "github.com/onflow/flow-go/model/flow/filter" |
| 15 | + "github.com/onflow/flow-go/utils/unittest" |
| 16 | +) |
| 17 | + |
| 18 | +// TestGenerateRecoverTxArgsWithDKG_ExcludeIncludeParticipants tests that GenerateRecoverTxArgsWithDKG produces expected arguments |
| 19 | +// for the recover epoch transaction, when excluding and including participants recovery epoch participants. |
| 20 | +// This test uses fuzzy testing to generate random combinations of participants to exclude and include and checks that the |
| 21 | +// generated arguments match the expected output. |
| 22 | +// This test assumes that we include nodes that are not part of the protocol state and exclude nodes that are part of the protocol state. |
| 23 | +// This test also verifies that the DKG index map contains all consensus nodes despite the exclusion and inclusion filters. |
| 24 | +func TestGenerateRecoverTxArgsWithDKG_ExcludeIncludeParticipants(testifyT *testing.T) { |
| 25 | + utils.RunWithSporkBootstrapDir(testifyT, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { |
| 26 | + log := unittest.Logger() |
| 27 | + internalNodes, err := common.ReadFullInternalNodeInfos(log, internalPrivDir, configPath) |
| 28 | + require.NoError(testifyT, err) |
| 29 | + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, partnerWeights, partnerDir) |
| 30 | + require.NoError(testifyT, err) |
| 31 | + |
| 32 | + allNodeIds := make(flow.IdentityList, 0) |
| 33 | + for _, node := range append(internalNodes, partnerNodes...) { |
| 34 | + allNodeIds = append(allNodeIds, node.Identity()) |
| 35 | + } |
| 36 | + |
| 37 | + rootSnapshot := unittest.RootSnapshotFixture(allNodeIds) |
| 38 | + allIdentities, err := rootSnapshot.Identities(filter.Any) |
| 39 | + require.NoError(testifyT, err) |
| 40 | + |
| 41 | + rapid.Check(testifyT, func(t *rapid.T) { |
| 42 | + numberOfNodesToInclude := rapid.IntRange(0, 3).Draw(t, "nodes-to-include") |
| 43 | + numberOfNodesToExclude := rapid.UintRange(0, 3).Draw(t, "nodes-to-exclude") |
| 44 | + |
| 45 | + // we specifically omit collection nodes from the exclusion list since we have a specific |
| 46 | + // check that there must be a valid cluster of collection nodes. |
| 47 | + excludedNodes, err := allIdentities.Filter( |
| 48 | + filter.Not(filter.HasRole[flow.Identity](flow.RoleCollection))).Sample(numberOfNodesToExclude) |
| 49 | + require.NoError(t, err) |
| 50 | + excludeNodeIds := excludedNodes.NodeIDs() |
| 51 | + // an eligible participant is a current epoch participant with a weight greater than zero that has not been explicitly excluded |
| 52 | + eligibleEpochIdentities := allIdentities.Filter(filter.And( |
| 53 | + filter.IsValidCurrentEpochParticipant, |
| 54 | + filter.HasWeightGreaterThanZero[flow.Identity], |
| 55 | + filter.Not(filter.HasNodeID[flow.Identity](excludeNodeIds...)))) |
| 56 | + |
| 57 | + expectedNodeIds := make(map[cadence.String]struct{}) |
| 58 | + includeNodeIds := unittest.IdentifierListFixture(numberOfNodesToInclude) |
| 59 | + for _, nodeID := range eligibleEpochIdentities.NodeIDs().Union(includeNodeIds) { |
| 60 | + expectedNodeIds[cadence.String(nodeID.String())] = struct{}{} |
| 61 | + } |
| 62 | + |
| 63 | + epochProtocolState, err := rootSnapshot.EpochProtocolState() |
| 64 | + require.NoError(t, err) |
| 65 | + currentEpochCommit := epochProtocolState.EpochCommit() |
| 66 | + expectedDKGIndexMap := make(map[cadence.String]cadence.Int) |
| 67 | + for nodeID, index := range currentEpochCommit.DKGIndexMap { |
| 68 | + expectedDKGIndexMap[cadence.String(nodeID.String())] = cadence.NewInt(index) |
| 69 | + } |
| 70 | + |
| 71 | + args, err := GenerateRecoverTxArgsWithDKG( |
| 72 | + log, |
| 73 | + internalNodes, |
| 74 | + 2, // number of collection clusters |
| 75 | + currentEpochCommit.Counter+1, |
| 76 | + flow.Localnet, |
| 77 | + 100, // staking auction length, in views |
| 78 | + 4000, // recovery epoch length, in views |
| 79 | + 60*60, // recovery epoch duration, in seconds |
| 80 | + false, // unsafe overwrite |
| 81 | + currentEpochCommit.DKGIndexMap, |
| 82 | + currentEpochCommit.DKGParticipantKeys, |
| 83 | + currentEpochCommit.DKGGroupKey, |
| 84 | + excludeNodeIds, |
| 85 | + includeNodeIds, |
| 86 | + rootSnapshot, |
| 87 | + ) |
| 88 | + require.NoError(t, err) |
| 89 | + |
| 90 | + // dkg index map |
| 91 | + dkgIndexMapArgPairs := args[10].(cadence.Dictionary).Pairs |
| 92 | + assert.Equal(t, len(dkgIndexMapArgPairs), len(expectedDKGIndexMap)) |
| 93 | + for _, pair := range dkgIndexMapArgPairs { |
| 94 | + expectedIndex, ok := expectedDKGIndexMap[pair.Key.(cadence.String)] |
| 95 | + require.True(t, ok) |
| 96 | + require.Equal(t, expectedIndex, pair.Value.(cadence.Int)) |
| 97 | + } |
| 98 | + // node ids |
| 99 | + nodeIDsArgValues := args[11].(cadence.Array).Values |
| 100 | + assert.Equal(t, len(nodeIDsArgValues), len(expectedNodeIds)) |
| 101 | + for _, nodeId := range nodeIDsArgValues { |
| 102 | + _, ok := expectedNodeIds[nodeId.(cadence.String)] |
| 103 | + require.True(t, ok) |
| 104 | + } |
| 105 | + }) |
| 106 | + }) |
| 107 | +} |
0 commit comments