solana_epoch_schedule/
lib.rs#![cfg_attr(feature = "frozen-abi", feature(min_specialization))]
#![no_std]
#[cfg(feature = "frozen-abi")]
extern crate std;
#[cfg(feature = "sysvar")]
pub mod sysvar;
#[cfg(feature = "serde")]
use serde_derive::{Deserialize, Serialize};
use solana_sdk_macro::CloneZeroed;
const DEFAULT_SLOTS_PER_EPOCH: u64 = 432_000;
#[cfg(test)]
static_assertions::const_assert_eq!(
DEFAULT_SLOTS_PER_EPOCH,
solana_clock::DEFAULT_SLOTS_PER_EPOCH
);
pub const DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET: u64 = DEFAULT_SLOTS_PER_EPOCH;
pub const MAX_LEADER_SCHEDULE_EPOCH_OFFSET: u64 = 3;
pub const MINIMUM_SLOTS_PER_EPOCH: u64 = 32;
#[repr(C)]
#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))]
#[cfg_attr(
feature = "serde",
derive(Deserialize, Serialize),
serde(rename_all = "camelCase")
)]
#[derive(Debug, CloneZeroed, PartialEq, Eq)]
pub struct EpochSchedule {
pub slots_per_epoch: u64,
pub leader_schedule_slot_offset: u64,
pub warmup: bool,
pub first_normal_epoch: u64,
pub first_normal_slot: u64,
}
impl Default for EpochSchedule {
fn default() -> Self {
Self::custom(
DEFAULT_SLOTS_PER_EPOCH,
DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET,
true,
)
}
}
impl EpochSchedule {
pub fn new(slots_per_epoch: u64) -> Self {
Self::custom(slots_per_epoch, slots_per_epoch, true)
}
pub fn without_warmup() -> Self {
Self::custom(
DEFAULT_SLOTS_PER_EPOCH,
DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET,
false,
)
}
pub fn custom(slots_per_epoch: u64, leader_schedule_slot_offset: u64, warmup: bool) -> Self {
assert!(slots_per_epoch >= MINIMUM_SLOTS_PER_EPOCH);
let (first_normal_epoch, first_normal_slot) = if warmup {
let next_power_of_two = slots_per_epoch.next_power_of_two();
let log2_slots_per_epoch = next_power_of_two
.trailing_zeros()
.saturating_sub(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros());
(
u64::from(log2_slots_per_epoch),
next_power_of_two.saturating_sub(MINIMUM_SLOTS_PER_EPOCH),
)
} else {
(0, 0)
};
EpochSchedule {
slots_per_epoch,
leader_schedule_slot_offset,
warmup,
first_normal_epoch,
first_normal_slot,
}
}
pub fn get_slots_in_epoch(&self, epoch: u64) -> u64 {
if epoch < self.first_normal_epoch {
2u64.saturating_pow(
(epoch as u32).saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()),
)
} else {
self.slots_per_epoch
}
}
pub fn get_leader_schedule_epoch(&self, slot: u64) -> u64 {
if slot < self.first_normal_slot {
self.get_epoch_and_slot_index(slot).0.saturating_add(1)
} else {
let new_slots_since_first_normal_slot = slot.saturating_sub(self.first_normal_slot);
let new_first_normal_leader_schedule_slot =
new_slots_since_first_normal_slot.saturating_add(self.leader_schedule_slot_offset);
let new_epochs_since_first_normal_leader_schedule =
new_first_normal_leader_schedule_slot
.checked_div(self.slots_per_epoch)
.unwrap_or(0);
self.first_normal_epoch
.saturating_add(new_epochs_since_first_normal_leader_schedule)
}
}
pub fn get_epoch(&self, slot: u64) -> u64 {
self.get_epoch_and_slot_index(slot).0
}
pub fn get_epoch_and_slot_index(&self, slot: u64) -> (u64, u64) {
if slot < self.first_normal_slot {
let epoch = slot
.saturating_add(MINIMUM_SLOTS_PER_EPOCH)
.saturating_add(1)
.next_power_of_two()
.trailing_zeros()
.saturating_sub(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros())
.saturating_sub(1);
let epoch_len =
2u64.saturating_pow(epoch.saturating_add(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()));
(
u64::from(epoch),
slot.saturating_sub(epoch_len.saturating_sub(MINIMUM_SLOTS_PER_EPOCH)),
)
} else {
let normal_slot_index = slot.saturating_sub(self.first_normal_slot);
let normal_epoch_index = normal_slot_index
.checked_div(self.slots_per_epoch)
.unwrap_or(0);
let epoch = self.first_normal_epoch.saturating_add(normal_epoch_index);
let slot_index = normal_slot_index
.checked_rem(self.slots_per_epoch)
.unwrap_or(0);
(epoch, slot_index)
}
}
pub fn get_first_slot_in_epoch(&self, epoch: u64) -> u64 {
if epoch <= self.first_normal_epoch {
2u64.saturating_pow(epoch as u32)
.saturating_sub(1)
.saturating_mul(MINIMUM_SLOTS_PER_EPOCH)
} else {
epoch
.saturating_sub(self.first_normal_epoch)
.saturating_mul(self.slots_per_epoch)
.saturating_add(self.first_normal_slot)
}
}
pub fn get_last_slot_in_epoch(&self, epoch: u64) -> u64 {
self.get_first_slot_in_epoch(epoch)
.saturating_add(self.get_slots_in_epoch(epoch))
.saturating_sub(1)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_epoch_schedule() {
for slots_per_epoch in MINIMUM_SLOTS_PER_EPOCH..=MINIMUM_SLOTS_PER_EPOCH * 16 {
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, slots_per_epoch / 2, true);
assert_eq!(epoch_schedule.get_first_slot_in_epoch(0), 0);
assert_eq!(
epoch_schedule.get_last_slot_in_epoch(0),
MINIMUM_SLOTS_PER_EPOCH - 1
);
let mut last_leader_schedule = 0;
let mut last_epoch = 0;
let mut last_slots_in_epoch = MINIMUM_SLOTS_PER_EPOCH;
for slot in 0..(2 * slots_per_epoch) {
let leader_schedule = epoch_schedule.get_leader_schedule_epoch(slot);
if leader_schedule != last_leader_schedule {
assert_eq!(leader_schedule, last_leader_schedule + 1);
last_leader_schedule = leader_schedule;
}
let (epoch, offset) = epoch_schedule.get_epoch_and_slot_index(slot);
if epoch != last_epoch {
assert_eq!(epoch, last_epoch + 1);
last_epoch = epoch;
assert_eq!(epoch_schedule.get_first_slot_in_epoch(epoch), slot);
assert_eq!(epoch_schedule.get_last_slot_in_epoch(epoch - 1), slot - 1);
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
if slots_in_epoch != last_slots_in_epoch && slots_in_epoch != slots_per_epoch {
assert_eq!(slots_in_epoch, last_slots_in_epoch * 2);
}
last_slots_in_epoch = slots_in_epoch;
}
assert!(offset < last_slots_in_epoch);
}
assert!(last_leader_schedule != 0); assert!(last_epoch != 0);
assert!(last_slots_in_epoch == slots_per_epoch);
}
}
#[test]
fn test_clone() {
let epoch_schedule = EpochSchedule {
slots_per_epoch: 1,
leader_schedule_slot_offset: 2,
warmup: true,
first_normal_epoch: 4,
first_normal_slot: 5,
};
#[allow(clippy::clone_on_copy)]
let cloned_epoch_schedule = epoch_schedule.clone();
assert_eq!(cloned_epoch_schedule, epoch_schedule);
}
}