From bbd8566f958d1fe1e2a432fcbf1328fd9f2b075f Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Wed, 5 Feb 2025 12:35:14 +0000 Subject: [PATCH] feat: vote keyed leader schedule --- Cargo.lock | 1 + ledger/Cargo.toml | 1 + ledger/src/leader_schedule.rs | 186 +++++++++++++++++++++++++--- ledger/src/leader_schedule_utils.rs | 59 +++++---- runtime/src/bank.rs | 30 +++++ sdk/feature-set/src/lib.rs | 5 + 6 files changed, 242 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e75d3225706aef..d33368207f8d66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7980,6 +7980,7 @@ dependencies = [ "solana-feature-set", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-ledger", "solana-logger", "solana-measure", "solana-metrics", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 836a81fc75b29d..0ff6a62deab0d4 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -94,6 +94,7 @@ features = ["lz4"] bs58 = { workspace = true } criterion = { workspace = true } solana-account-decoder = { workspace = true } +solana-ledger = { path = ".", features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } spl-pod = { workspace = true } diff --git a/ledger/src/leader_schedule.rs b/ledger/src/leader_schedule.rs index 04f7817e7ce0c2..370440f29f7bc1 100644 --- a/ledger/src/leader_schedule.rs +++ b/ledger/src/leader_schedule.rs @@ -4,6 +4,7 @@ use { rand_chacha::{rand_core::SeedableRng, ChaChaRng}, solana_pubkey::Pubkey, solana_sdk::clock::Epoch, + solana_vote::vote_account::VoteAccountsHashMap, std::{collections::HashMap, convert::identity, ops::Index, sync::Arc}, }; @@ -14,14 +15,113 @@ pub struct FixedSchedule { } /// Stake-weighted leader schedule for one epoch. -#[derive(Debug, Default, PartialEq, Eq, Clone)] -pub struct LeaderSchedule { +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct LeaderSchedule(LeaderScheduleVariants); + +#[cfg(feature = "dev-context-only-utils")] +impl Default for LeaderSchedule { + fn default() -> Self { + Self( + LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule( + ValidatorIdentityKeyedLeaderSchedule { + slot_leaders: vec![], + index: HashMap::new(), + }, + ), + ) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +struct VoteAccountKeyedLeaderSchedule { + slot_leader_vote_account_addresses: Vec, + // cached leader schedule keyed by validator identities created by mapping + // vote account addresses to the validator identity designated at the time + // of leader schedule generation. This is used to avoid the need to look up + // the validator identity address for each slot. + validator_identity_keyed_leader_schedule: ValidatorIdentityKeyedLeaderSchedule, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +struct ValidatorIdentityKeyedLeaderSchedule { slot_leaders: Vec, // Inverted index from pubkeys to indices where they are the leader. index: HashMap>>, } +#[derive(Debug, PartialEq, Eq, Clone)] +enum LeaderScheduleVariants { + // Latest leader schedule algorithm which designates a specific vote account + // to each slot so that the runtime can load vote state (e.g. commission and + // fee collector accounts) for a given slot + VoteAccountKeyedLeaderSchedule(VoteAccountKeyedLeaderSchedule), + // Old leader schedule algorithm which designates a specific validator + // identity to each slot. Since multiple vote accounts can be associated + // with a single validator identity, it's not possible to use this to load + // vote state for a given slot. + ValidatorIdentityKeyedLeaderSchedule(ValidatorIdentityKeyedLeaderSchedule), +} + impl LeaderSchedule { + // Note: passing in zero vote accounts will cause a panic. + pub fn new_keyed_by_vote_account( + vote_accounts_map: &VoteAccountsHashMap, + epoch: Epoch, + len: u64, + repeat: u64, + ) -> Self { + let keyed_stakes: Vec<_> = vote_accounts_map + .iter() + .map(|(vote_pubkey, (stake, _account))| (vote_pubkey, *stake)) + .collect(); + let slot_leader_vote_account_addresses = + Self::stake_weighted_slot_leaders(keyed_stakes, epoch, len, repeat); + + let validator_identity_keyed_leader_schedule = { + struct SlotLeaderInfo<'a> { + vote_account_address: &'a Pubkey, + validator_identity_address: &'a Pubkey, + } + + let default_pubkey = Pubkey::default(); + let mut current_slot_leader_info = SlotLeaderInfo { + vote_account_address: &default_pubkey, + validator_identity_address: &default_pubkey, + }; + + let slot_leaders: Vec = slot_leader_vote_account_addresses + .iter() + .map(|vote_account_address| { + if vote_account_address != current_slot_leader_info.vote_account_address { + let validator_identity_address = vote_accounts_map + .get(vote_account_address) + .unwrap() + .1 + .node_pubkey(); + current_slot_leader_info = SlotLeaderInfo { + vote_account_address, + validator_identity_address, + }; + } + *current_slot_leader_info.validator_identity_address + }) + .collect(); + + let index = Self::index_from_slot_leaders(&slot_leaders); + ValidatorIdentityKeyedLeaderSchedule { + slot_leaders, + index, + } + }; + + Self(LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule( + VoteAccountKeyedLeaderSchedule { + slot_leader_vote_account_addresses, + validator_identity_keyed_leader_schedule, + }, + )) + } + // Note: passing in zero stakers will cause a panic. pub fn new_keyed_by_validator_identity( epoch_staked_nodes: &HashMap, @@ -37,6 +137,17 @@ impl LeaderSchedule { Self::new_from_schedule(slot_leaders) } + pub fn new_from_schedule(slot_leaders: Vec) -> Self { + Self( + LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule( + ValidatorIdentityKeyedLeaderSchedule { + index: Self::index_from_slot_leaders(&slot_leaders), + slot_leaders, + }, + ), + ) + } + // Note: passing in zero stakers will cause a panic. fn stake_weighted_slot_leaders( mut keyed_stakes: Vec<(&Pubkey, u64)>, @@ -61,13 +172,6 @@ impl LeaderSchedule { .collect() } - pub fn new_from_schedule(slot_leaders: Vec) -> Self { - Self { - index: Self::index_from_slot_leaders(&slot_leaders), - slot_leaders, - } - } - fn index_from_slot_leaders(slot_leaders: &[Pubkey]) -> HashMap>> { slot_leaders .iter() @@ -79,12 +183,54 @@ impl LeaderSchedule { .collect() } + pub fn is_keyed_by_vote_account(&self) -> bool { + matches!( + self.0, + LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(_) + ) + } + + /// Get the vote account address for the given epoch slot index. This is + /// guaranteed to be Some if the leader schedule is keyed by vote account + /// and the slot index is within the range of the leader schedule. + pub fn get_vote_account_address_for_slot_index( + &self, + epoch_slot_index: usize, + ) -> Option<&Pubkey> { + match &self.0 { + LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(schedule) => schedule + .slot_leader_vote_account_addresses + .get(epoch_slot_index), + LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(_) => None, + } + } + pub fn get_slot_leaders(&self) -> &[Pubkey] { - &self.slot_leaders + match self.0 { + LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(ref schedule) => { + &schedule + .validator_identity_keyed_leader_schedule + .slot_leaders + } + LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(ref schedule) => { + &schedule.slot_leaders + } + } } pub fn num_slots(&self) -> usize { - self.slot_leaders.len() + self.get_slot_leaders().len() + } + + fn index(&self) -> &HashMap>> { + match &self.0 { + LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(schedule) => { + &schedule.validator_identity_keyed_leader_schedule.index + } + LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(schedule) => { + &schedule.index + } + } } /// 'offset' is an index into the leader schedule. The function returns an @@ -94,8 +240,8 @@ impl LeaderSchedule { pubkey: &Pubkey, offset: usize, // Starting index. ) -> impl Iterator { - let index = self.index.get(pubkey).cloned().unwrap_or_default(); - let num_slots = self.slot_leaders.len(); + let index = self.index().get(pubkey).cloned().unwrap_or_default(); + let num_slots = self.num_slots(); let size = index.len(); #[allow(clippy::reversed_empty_ranges)] let range = if index.is_empty() { @@ -119,7 +265,7 @@ impl Index for LeaderSchedule { type Output = Pubkey; fn index(&self, index: u64) -> &Pubkey { let index = index as usize; - &self.slot_leaders[index % self.slot_leaders.len()] + &self.get_slot_leaders()[index % self.num_slots()] } } @@ -185,7 +331,7 @@ mod tests { LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, repeat); assert_eq!(leader_schedule.num_slots() as u64, len); let mut leader_node = Pubkey::default(); - for (i, node) in leader_schedule.slot_leaders.iter().enumerate() { + for (i, node) in leader_schedule.get_slot_leaders().iter().enumerate() { if i % repeat as usize == 0 { leader_node = *node; } else { @@ -203,12 +349,14 @@ mod tests { let epoch = 0; let len = 8; // What the schedule looks like without any repeats - let leaders1 = - LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 1).slot_leaders; + let leaders1 = LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 1) + .get_slot_leaders() + .to_vec(); // What the schedule looks like with repeats - let leaders2 = - LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 2).slot_leaders; + let leaders2 = LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 2) + .get_slot_leaders() + .to_vec(); assert_eq!(leaders1.len(), leaders2.len()); let leaders1_expected = vec![ diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 2b4212a49156db..f52b9aceaedb67 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -10,14 +10,26 @@ use { /// Return the leader schedule for the given epoch. pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option { - bank.epoch_staked_nodes(epoch).map(|stakes| { - LeaderSchedule::new_keyed_by_validator_identity( - &stakes, - epoch, - bank.get_slots_in_epoch(epoch), - NUM_CONSECUTIVE_LEADER_SLOTS, - ) - }) + let use_new_leader_schedule = bank.should_use_vote_address_leader_schedule(epoch); + if use_new_leader_schedule { + bank.epoch_vote_accounts(epoch).map(|vote_accounts_map| { + LeaderSchedule::new_keyed_by_vote_account( + vote_accounts_map, + epoch, + bank.get_slots_in_epoch(epoch), + NUM_CONSECUTIVE_LEADER_SLOTS, + ) + }) + } else { + bank.epoch_staked_nodes(epoch).map(|stakes| { + LeaderSchedule::new_keyed_by_validator_identity( + &stakes, + epoch, + bank.get_slots_in_epoch(epoch), + NUM_CONSECUTIVE_LEADER_SLOTS, + ) + }) + } } /// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot @@ -64,27 +76,32 @@ mod tests { super::*, solana_runtime::genesis_utils::{ bootstrap_validator_stake_lamports, create_genesis_config_with_leader, + deactivate_features, }, + test_case::test_case, }; - #[test] - fn test_leader_schedule_via_bank() { + #[test_case(true; "vote keyed leader schedule")] + #[test_case(false; "identity keyed leader schedule")] + fn test_leader_schedule_via_bank(use_vote_keyed_leader_schedule: bool) { let pubkey = solana_pubkey::new_rand(); - let genesis_config = + let mut genesis_config = create_genesis_config_with_leader(0, &pubkey, bootstrap_validator_stake_lamports()) .genesis_config; + + if !use_vote_keyed_leader_schedule { + deactivate_features( + &mut genesis_config, + &vec![solana_feature_set::enable_vote_address_leader_schedule::id()], + ); + } + let bank = Bank::new_for_tests(&genesis_config); + let leader_schedule = leader_schedule(0, &bank).unwrap(); - let pubkeys_and_stakes: HashMap<_, _> = bank - .current_epoch_staked_nodes() - .iter() - .map(|(pubkey, stake)| (*pubkey, *stake)) - .collect(); - let leader_schedule = LeaderSchedule::new_keyed_by_validator_identity( - &pubkeys_and_stakes, - 0, - genesis_config.epoch_schedule.slots_per_epoch, - NUM_CONSECUTIVE_LEADER_SLOTS, + assert_eq!( + leader_schedule.is_keyed_by_vote_account(), + use_vote_keyed_leader_schedule ); assert_eq!(leader_schedule[0], pubkey); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b8e70f5a9bfb94..8df1251b71ee78 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6067,6 +6067,36 @@ impl Bank { self.epoch_schedule().get_leader_schedule_epoch(slot) } + /// Returns whether the specified epoch should use the new vote account + /// keyed leader schedule + pub fn should_use_vote_address_leader_schedule(&self, epoch: Epoch) -> bool { + const NEW_LEADER_SCHEDULE_EPOCH_DELAY: u64 = 1; + + // Cannot determine if an epoch should use the new leader schedule if the + // the epoch is too far in the future because we won't know if the feature + // will have been activated or not. + assert!(self.epoch.saturating_add(NEW_LEADER_SCHEDULE_EPOCH_DELAY) >= epoch); + + self.feature_set + .activated_slot(&solana_feature_set::enable_vote_address_leader_schedule::id()) + .map(|activation_slot| { + let activation_epoch = self.epoch_schedule().get_epoch(activation_slot); + let effective_epoch = if activation_epoch == 0 { + // If the feature was activated at genesis, then the new + // leader schedule should be used immediately + activation_epoch + } else { + // Otherwise, the new leader schedule should be used + // starting in the next epoch after activation since the + // leader schedule for the current epoch would have already + // been calculated in the previous epoch + activation_epoch.wrapping_add(NEW_LEADER_SCHEDULE_EPOCH_DELAY) + }; + epoch >= effective_epoch + }) + .unwrap_or_default() + } + /// a bank-level cache of vote accounts and stake delegation info fn update_stakes_cache( &self, diff --git a/sdk/feature-set/src/lib.rs b/sdk/feature-set/src/lib.rs index bf064e709f29fc..1892a6acbaca1f 100644 --- a/sdk/feature-set/src/lib.rs +++ b/sdk/feature-set/src/lib.rs @@ -924,6 +924,10 @@ pub mod reserve_minimal_cus_for_builtin_instructions { solana_pubkey::declare_id!("C9oAhLxDBm3ssWtJx1yBGzPY55r2rArHmN1pbQn6HogH"); } +pub mod enable_vote_address_leader_schedule { + solana_pubkey::declare_id!("5JsG4NWH8Jbrqdd8uL6BNwnyZK3dQSoieRXG5vmofj9y"); +} + pub mod raise_block_limits_to_50m { solana_pubkey::declare_id!("5oMCU3JPaFLr8Zr4ct7yFA7jdk6Mw1RmB8K4u9ZbS42z"); } @@ -1158,6 +1162,7 @@ lazy_static! { (deplete_cu_meter_on_vm_failure::id(), "Deplete compute meter for vm errors SIMD-0182 #3993"), (reserve_minimal_cus_for_builtin_instructions::id(), "Reserve minimal CUs for builtin instructions SIMD-170 #2562"), (raise_block_limits_to_50m::id(), "Raise block limit to 50M SIMD-0207"), + (enable_vote_address_leader_schedule::id(), "Enable vote address leader schedule SIMD-0180 #4573"), (fix_alt_bn128_multiplication_input_length::id(), "fix alt_bn128 multiplication input length SIMD-0222 #3686"), (drop_unchained_merkle_shreds::id(), "drops unchained Merkle shreds #2149"), /*************** ADD NEW FEATURES HERE ***************/