Skip to content

Commit

Permalink
feat: vote keyed leader schedule
Browse files Browse the repository at this point in the history
  • Loading branch information
jstarry committed Feb 6, 2025
1 parent 2d4d7c1 commit bbd8566
Show file tree
Hide file tree
Showing 6 changed files with 242 additions and 40 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions ledger/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ features = ["lz4"]
bs58 = { workspace = true }
criterion = { workspace = true }
solana-account-decoder = { workspace = true }
solana-ledger = { path = ".", features = ["dev-context-only-utils"] }
solana-logger = { workspace = true }
solana-runtime = { workspace = true, features = ["dev-context-only-utils"] }
spl-pod = { workspace = true }
Expand Down
186 changes: 167 additions & 19 deletions ledger/src/leader_schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use {
rand_chacha::{rand_core::SeedableRng, ChaChaRng},
solana_pubkey::Pubkey,
solana_sdk::clock::Epoch,
solana_vote::vote_account::VoteAccountsHashMap,
std::{collections::HashMap, convert::identity, ops::Index, sync::Arc},
};

Expand All @@ -14,14 +15,113 @@ pub struct FixedSchedule {
}

/// Stake-weighted leader schedule for one epoch.
#[derive(Debug, Default, PartialEq, Eq, Clone)]
pub struct LeaderSchedule {
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct LeaderSchedule(LeaderScheduleVariants);

#[cfg(feature = "dev-context-only-utils")]
impl Default for LeaderSchedule {
fn default() -> Self {
Self(
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(
ValidatorIdentityKeyedLeaderSchedule {
slot_leaders: vec![],
index: HashMap::new(),
},
),
)
}
}

#[derive(Debug, PartialEq, Eq, Clone)]
struct VoteAccountKeyedLeaderSchedule {
slot_leader_vote_account_addresses: Vec<Pubkey>,
// cached leader schedule keyed by validator identities created by mapping
// vote account addresses to the validator identity designated at the time
// of leader schedule generation. This is used to avoid the need to look up
// the validator identity address for each slot.
validator_identity_keyed_leader_schedule: ValidatorIdentityKeyedLeaderSchedule,
}

#[derive(Debug, PartialEq, Eq, Clone)]
struct ValidatorIdentityKeyedLeaderSchedule {
slot_leaders: Vec<Pubkey>,
// Inverted index from pubkeys to indices where they are the leader.
index: HashMap<Pubkey, Arc<Vec<usize>>>,
}

#[derive(Debug, PartialEq, Eq, Clone)]
enum LeaderScheduleVariants {
// Latest leader schedule algorithm which designates a specific vote account
// to each slot so that the runtime can load vote state (e.g. commission and
// fee collector accounts) for a given slot
VoteAccountKeyedLeaderSchedule(VoteAccountKeyedLeaderSchedule),
// Old leader schedule algorithm which designates a specific validator
// identity to each slot. Since multiple vote accounts can be associated
// with a single validator identity, it's not possible to use this to load
// vote state for a given slot.
ValidatorIdentityKeyedLeaderSchedule(ValidatorIdentityKeyedLeaderSchedule),
}

impl LeaderSchedule {
// Note: passing in zero vote accounts will cause a panic.
pub fn new_keyed_by_vote_account(
vote_accounts_map: &VoteAccountsHashMap,
epoch: Epoch,
len: u64,
repeat: u64,
) -> Self {
let keyed_stakes: Vec<_> = vote_accounts_map
.iter()
.map(|(vote_pubkey, (stake, _account))| (vote_pubkey, *stake))
.collect();
let slot_leader_vote_account_addresses =
Self::stake_weighted_slot_leaders(keyed_stakes, epoch, len, repeat);

let validator_identity_keyed_leader_schedule = {
struct SlotLeaderInfo<'a> {
vote_account_address: &'a Pubkey,
validator_identity_address: &'a Pubkey,
}

let default_pubkey = Pubkey::default();
let mut current_slot_leader_info = SlotLeaderInfo {
vote_account_address: &default_pubkey,
validator_identity_address: &default_pubkey,
};

let slot_leaders: Vec<Pubkey> = slot_leader_vote_account_addresses
.iter()
.map(|vote_account_address| {
if vote_account_address != current_slot_leader_info.vote_account_address {
let validator_identity_address = vote_accounts_map
.get(vote_account_address)
.unwrap()
.1
.node_pubkey();
current_slot_leader_info = SlotLeaderInfo {
vote_account_address,
validator_identity_address,
};
}
*current_slot_leader_info.validator_identity_address
})
.collect();

let index = Self::index_from_slot_leaders(&slot_leaders);
ValidatorIdentityKeyedLeaderSchedule {
slot_leaders,
index,
}
};

Self(LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(
VoteAccountKeyedLeaderSchedule {
slot_leader_vote_account_addresses,
validator_identity_keyed_leader_schedule,
},
))
}

// Note: passing in zero stakers will cause a panic.
pub fn new_keyed_by_validator_identity(
epoch_staked_nodes: &HashMap<Pubkey, u64>,
Expand All @@ -37,6 +137,17 @@ impl LeaderSchedule {
Self::new_from_schedule(slot_leaders)
}

pub fn new_from_schedule(slot_leaders: Vec<Pubkey>) -> Self {
Self(
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(
ValidatorIdentityKeyedLeaderSchedule {
index: Self::index_from_slot_leaders(&slot_leaders),
slot_leaders,
},
),
)
}

// Note: passing in zero stakers will cause a panic.
fn stake_weighted_slot_leaders(
mut keyed_stakes: Vec<(&Pubkey, u64)>,
Expand All @@ -61,13 +172,6 @@ impl LeaderSchedule {
.collect()
}

pub fn new_from_schedule(slot_leaders: Vec<Pubkey>) -> Self {
Self {
index: Self::index_from_slot_leaders(&slot_leaders),
slot_leaders,
}
}

fn index_from_slot_leaders(slot_leaders: &[Pubkey]) -> HashMap<Pubkey, Arc<Vec<usize>>> {
slot_leaders
.iter()
Expand All @@ -79,12 +183,54 @@ impl LeaderSchedule {
.collect()
}

pub fn is_keyed_by_vote_account(&self) -> bool {
matches!(
self.0,
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(_)
)
}

/// Get the vote account address for the given epoch slot index. This is
/// guaranteed to be Some if the leader schedule is keyed by vote account
/// and the slot index is within the range of the leader schedule.
pub fn get_vote_account_address_for_slot_index(
&self,
epoch_slot_index: usize,
) -> Option<&Pubkey> {
match &self.0 {
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(schedule) => schedule
.slot_leader_vote_account_addresses
.get(epoch_slot_index),
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(_) => None,
}
}

pub fn get_slot_leaders(&self) -> &[Pubkey] {
&self.slot_leaders
match self.0 {
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(ref schedule) => {
&schedule
.validator_identity_keyed_leader_schedule
.slot_leaders
}
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(ref schedule) => {
&schedule.slot_leaders
}
}
}

pub fn num_slots(&self) -> usize {
self.slot_leaders.len()
self.get_slot_leaders().len()
}

fn index(&self) -> &HashMap<Pubkey, Arc<Vec<usize>>> {
match &self.0 {
LeaderScheduleVariants::VoteAccountKeyedLeaderSchedule(schedule) => {
&schedule.validator_identity_keyed_leader_schedule.index
}
LeaderScheduleVariants::ValidatorIdentityKeyedLeaderSchedule(schedule) => {
&schedule.index
}
}
}

/// 'offset' is an index into the leader schedule. The function returns an
Expand All @@ -94,8 +240,8 @@ impl LeaderSchedule {
pubkey: &Pubkey,
offset: usize, // Starting index.
) -> impl Iterator<Item = usize> {
let index = self.index.get(pubkey).cloned().unwrap_or_default();
let num_slots = self.slot_leaders.len();
let index = self.index().get(pubkey).cloned().unwrap_or_default();
let num_slots = self.num_slots();
let size = index.len();
#[allow(clippy::reversed_empty_ranges)]
let range = if index.is_empty() {
Expand All @@ -119,7 +265,7 @@ impl Index<u64> for LeaderSchedule {
type Output = Pubkey;
fn index(&self, index: u64) -> &Pubkey {
let index = index as usize;
&self.slot_leaders[index % self.slot_leaders.len()]
&self.get_slot_leaders()[index % self.num_slots()]
}
}

Expand Down Expand Up @@ -185,7 +331,7 @@ mod tests {
LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, repeat);
assert_eq!(leader_schedule.num_slots() as u64, len);
let mut leader_node = Pubkey::default();
for (i, node) in leader_schedule.slot_leaders.iter().enumerate() {
for (i, node) in leader_schedule.get_slot_leaders().iter().enumerate() {
if i % repeat as usize == 0 {
leader_node = *node;
} else {
Expand All @@ -203,12 +349,14 @@ mod tests {
let epoch = 0;
let len = 8;
// What the schedule looks like without any repeats
let leaders1 =
LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 1).slot_leaders;
let leaders1 = LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 1)
.get_slot_leaders()
.to_vec();

// What the schedule looks like with repeats
let leaders2 =
LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 2).slot_leaders;
let leaders2 = LeaderSchedule::new_keyed_by_validator_identity(&stakes, epoch, len, 2)
.get_slot_leaders()
.to_vec();
assert_eq!(leaders1.len(), leaders2.len());

let leaders1_expected = vec![
Expand Down
59 changes: 38 additions & 21 deletions ledger/src/leader_schedule_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,26 @@ use {

/// Return the leader schedule for the given epoch.
pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option<LeaderSchedule> {
bank.epoch_staked_nodes(epoch).map(|stakes| {
LeaderSchedule::new_keyed_by_validator_identity(
&stakes,
epoch,
bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
let use_new_leader_schedule = bank.should_use_vote_address_leader_schedule(epoch);
if use_new_leader_schedule {
bank.epoch_vote_accounts(epoch).map(|vote_accounts_map| {
LeaderSchedule::new_keyed_by_vote_account(
vote_accounts_map,
epoch,
bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
} else {
bank.epoch_staked_nodes(epoch).map(|stakes| {
LeaderSchedule::new_keyed_by_validator_identity(
&stakes,
epoch,
bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
}
}

/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
Expand Down Expand Up @@ -64,27 +76,32 @@ mod tests {
super::*,
solana_runtime::genesis_utils::{
bootstrap_validator_stake_lamports, create_genesis_config_with_leader,
deactivate_features,
},
test_case::test_case,
};

#[test]
fn test_leader_schedule_via_bank() {
#[test_case(true; "vote keyed leader schedule")]
#[test_case(false; "identity keyed leader schedule")]
fn test_leader_schedule_via_bank(use_vote_keyed_leader_schedule: bool) {
let pubkey = solana_pubkey::new_rand();
let genesis_config =
let mut genesis_config =
create_genesis_config_with_leader(0, &pubkey, bootstrap_validator_stake_lamports())
.genesis_config;

if !use_vote_keyed_leader_schedule {
deactivate_features(
&mut genesis_config,
&vec![solana_feature_set::enable_vote_address_leader_schedule::id()],
);
}

let bank = Bank::new_for_tests(&genesis_config);
let leader_schedule = leader_schedule(0, &bank).unwrap();

let pubkeys_and_stakes: HashMap<_, _> = bank
.current_epoch_staked_nodes()
.iter()
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
let leader_schedule = LeaderSchedule::new_keyed_by_validator_identity(
&pubkeys_and_stakes,
0,
genesis_config.epoch_schedule.slots_per_epoch,
NUM_CONSECUTIVE_LEADER_SLOTS,
assert_eq!(
leader_schedule.is_keyed_by_vote_account(),
use_vote_keyed_leader_schedule
);

assert_eq!(leader_schedule[0], pubkey);
Expand Down
30 changes: 30 additions & 0 deletions runtime/src/bank.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6067,6 +6067,36 @@ impl Bank {
self.epoch_schedule().get_leader_schedule_epoch(slot)
}

/// Returns whether the specified epoch should use the new vote account
/// keyed leader schedule
pub fn should_use_vote_address_leader_schedule(&self, epoch: Epoch) -> bool {
const NEW_LEADER_SCHEDULE_EPOCH_DELAY: u64 = 1;

// Cannot determine if an epoch should use the new leader schedule if the
// the epoch is too far in the future because we won't know if the feature
// will have been activated or not.
assert!(self.epoch.saturating_add(NEW_LEADER_SCHEDULE_EPOCH_DELAY) >= epoch);

self.feature_set
.activated_slot(&solana_feature_set::enable_vote_address_leader_schedule::id())
.map(|activation_slot| {
let activation_epoch = self.epoch_schedule().get_epoch(activation_slot);
let effective_epoch = if activation_epoch == 0 {
// If the feature was activated at genesis, then the new
// leader schedule should be used immediately
activation_epoch
} else {
// Otherwise, the new leader schedule should be used
// starting in the next epoch after activation since the
// leader schedule for the current epoch would have already
// been calculated in the previous epoch
activation_epoch.wrapping_add(NEW_LEADER_SCHEDULE_EPOCH_DELAY)
};
epoch >= effective_epoch
})
.unwrap_or_default()
}

/// a bank-level cache of vote accounts and stake delegation info
fn update_stakes_cache(
&self,
Expand Down
Loading

0 comments on commit bbd8566

Please sign in to comment.