Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6913,6 +6913,44 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
);
}

/// Get the earliest epoch in which the node has met its custody requirements.
/// A `None` response indicates that we've met our cutody requirements up to the
/// column data availability window
pub fn earliest_custodied_data_column_epoch(&self) -> Option<Epoch> {
self.store
.get_data_column_custody_info()
.unwrap_or(None)
.and_then(|info| info.earliest_data_column_slot)
.map(|slot| {
let mut epoch = slot.epoch(T::EthSpec::slots_per_epoch());
// If the earliest custodied slot isn't the first slot in the epoch
// The node has only met its custody requirements for the next epoch.
if slot > epoch.start_slot(T::EthSpec::slots_per_epoch()) {
epoch += 1;
}
epoch
})
}

/// The data availability boundary for custodying columns. It will just be the
/// regular data availability boundary unless we are near the Fulu fork epoch.
pub fn column_data_availability_boundary(&self) -> Option<Epoch> {
match self.data_availability_boundary() {
Some(da_boundary_epoch) => {
if let Some(fulu_fork_epoch) = self.spec.fulu_fork_epoch {
if da_boundary_epoch < fulu_fork_epoch {
Some(fulu_fork_epoch)
} else {
Some(da_boundary_epoch)
}
} else {
None // Fulu hasn't been enabled
}
}
None => None, // Deneb hasn't been enabled
}
}

/// This method serves to get a sense of the current chain health. It is used in block proposal
/// to determine whether we should outsource payload production duties.
///
Expand Down
11 changes: 0 additions & 11 deletions beacon_node/beacon_chain/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -388,12 +388,6 @@ where
.init_blob_info(genesis.beacon_block.slot())
.map_err(|e| format!("Failed to initialize genesis blob info: {:?}", e))?,
);
self.pending_io_batch.push(
store
.init_data_column_info(genesis.beacon_block.slot())
.map_err(|e| format!("Failed to initialize genesis data column info: {:?}", e))?,
);

let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, genesis.clone())
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
let current_slot = None;
Expand Down Expand Up @@ -604,11 +598,6 @@ where
.init_blob_info(weak_subj_block.slot())
.map_err(|e| format!("Failed to initialize blob info: {:?}", e))?,
);
self.pending_io_batch.push(
store
.init_data_column_info(weak_subj_block.slot())
.map_err(|e| format!("Failed to initialize data column info: {:?}", e))?,
);

let snapshot = BeaconSnapshot {
beacon_block_root: weak_subj_block_root,
Expand Down
20 changes: 1 addition & 19 deletions beacon_node/beacon_chain/src/historical_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use state_processing::{
use std::borrow::Cow;
use std::iter;
use std::time::Duration;
use store::metadata::DataColumnInfo;
use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp};
use strum::IntoStaticStr;
use tracing::{debug, instrument};
Expand Down Expand Up @@ -70,7 +69,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<usize, HistoricalBlockError> {
let anchor_info = self.store.get_anchor_info();
let blob_info = self.store.get_blob_info();
let data_column_info = self.store.get_data_column_info();

// Take all blocks with slots less than the oldest block slot.
let num_relevant = blocks.partition_point(|available_block| {
Expand All @@ -97,7 +95,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut expected_block_root = anchor_info.oldest_block_parent;
let mut prev_block_slot = anchor_info.oldest_block_slot;
let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;
let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot;

let mut blob_batch = Vec::<KeyValueStoreOp>::new();
let mut cold_batch = Vec::with_capacity(blocks_to_import.len());
Expand Down Expand Up @@ -133,9 +130,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
AvailableBlockData::Blobs(..) => {
new_oldest_blob_slot = Some(block.slot());
}
AvailableBlockData::DataColumns(_) => {
new_oldest_data_column_slot = Some(block.slot());
}
AvailableBlockData::DataColumns(_) => {}
}

// Store the blobs or data columns too
Expand Down Expand Up @@ -250,19 +245,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
);
}

// Update the data column info.
if new_oldest_data_column_slot != data_column_info.oldest_data_column_slot
&& let Some(oldest_data_column_slot) = new_oldest_data_column_slot
{
let new_data_column_info = DataColumnInfo {
oldest_data_column_slot: Some(oldest_data_column_slot),
};
anchor_and_blob_batch.push(
self.store
.compare_and_set_data_column_info(data_column_info, new_data_column_info)?,
);
}

// Update the anchor.
let new_anchor = AnchorInfo {
oldest_block_slot: prev_block_slot,
Expand Down
6 changes: 6 additions & 0 deletions beacon_node/beacon_chain/src/schema_change.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ mod migration_schema_v25;
mod migration_schema_v26;
mod migration_schema_v27;
mod migration_schema_v28;
mod migration_schema_v29;

use crate::beacon_chain::BeaconChainTypes;
use std::sync::Arc;
Expand Down Expand Up @@ -88,6 +89,11 @@ pub fn migrate_schema<T: BeaconChainTypes>(
let ops = migration_schema_v28::downgrade_from_v28::<T>(db.clone())?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(28), SchemaVersion(29)) => {
let ops = migration_schema_v29::upgrade_to_v29()?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(29), SchemaVersion(28)) => migration_schema_v29::downgrade_from_v29(),
// Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to,
Expand Down
15 changes: 15 additions & 0 deletions beacon_node/beacon_chain/src/schema_change/migration_schema_v29.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
use store::{DBColumn, Error, KeyValueStoreOp, metadata::DATA_COLUMN_INFO_KEY};

/// Add `DataColumnCustodyInfo` entry to v27.
pub fn upgrade_to_v29() -> Result<Vec<KeyValueStoreOp>, Error> {
Ok(vec![KeyValueStoreOp::DeleteKey(
DBColumn::BeaconMeta,
DATA_COLUMN_INFO_KEY.as_slice().to_vec(),
)])
}

pub fn downgrade_from_v29() -> Result<(), Error> {
Err(Error::MigrationError(
"Cannot downgrade from v29".to_string(),
))
}
7 changes: 2 additions & 5 deletions beacon_node/beacon_chain/tests/schema_stability.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,8 @@ use operation_pool::PersistedOperationPool;
use ssz::Encode;
use std::sync::{Arc, LazyLock};
use store::{
DBColumn, HotColdDB, StoreConfig, StoreItem,
database::interface::BeaconNodeBackend,
hot_cold_store::Split,
metadata::{DataColumnCustodyInfo, DataColumnInfo},
DBColumn, HotColdDB, StoreConfig, StoreItem, database::interface::BeaconNodeBackend,
hot_cold_store::Split, metadata::DataColumnCustodyInfo,
};
use strum::IntoEnumIterator;
use tempfile::{TempDir, tempdir};
Expand Down Expand Up @@ -133,7 +131,6 @@ fn check_metadata_sizes(store: &Store<E>) {
6
}
);
assert_eq!(DataColumnInfo::default().ssz_bytes_len(), 5);
assert_eq!(DataColumnCustodyInfo::default().ssz_bytes_len(), 5);
}

Expand Down
45 changes: 27 additions & 18 deletions beacon_node/network/src/network_beacon_processor/rpc_methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1154,33 +1154,42 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {

let request_start_slot = Slot::from(req.start_slot);

let data_availability_boundary_slot = match self.chain.data_availability_boundary() {
Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()),
None => {
debug!("Deneb fork is disabled");
return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled"));
}
};
let column_data_availability_boundary_slot =
match self.chain.column_data_availability_boundary() {
Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()),
None => {
debug!("Deneb fork is disabled");
return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled"));
Comment on lines +1161 to +1162
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It could be Fulu or Deneb that is disabled, but I think it's not wrong to say Fulu is disabled (because Deneb being disabled also implies Fulu being disabled).

Suggested change
debug!("Deneb fork is disabled");
return Err((RpcErrorResponse::InvalidRequest, "Deneb fork is disabled"));
debug!("Fulu fork is disabled");
return Err((RpcErrorResponse::InvalidRequest, "Fulu fork is disabled"));

}
};

let oldest_data_column_slot = self
.chain
.store
.get_data_column_info()
.oldest_data_column_slot
.unwrap_or(data_availability_boundary_slot);
let earliest_custodied_data_column_slot =
match self.chain.earliest_custodied_data_column_epoch() {
Some(earliest_custodied_epoch) => {
let earliest_custodied_slot =
earliest_custodied_epoch.start_slot(T::EthSpec::slots_per_epoch());
// Ensure the earliest columns we serve are within the data availability window
if earliest_custodied_slot < column_data_availability_boundary_slot {
column_data_availability_boundary_slot
} else {
earliest_custodied_slot
}
}
None => column_data_availability_boundary_slot,
};

if request_start_slot < oldest_data_column_slot {
if request_start_slot < earliest_custodied_data_column_slot {
debug!(
%request_start_slot,
%oldest_data_column_slot,
%data_availability_boundary_slot,
%earliest_custodied_data_column_slot,
%column_data_availability_boundary_slot,
"Range request start slot is older than data availability boundary."
);

return if data_availability_boundary_slot < oldest_data_column_slot {
return if earliest_custodied_data_column_slot > column_data_availability_boundary_slot {
Err((
RpcErrorResponse::ResourceUnavailable,
"blobs pruned within boundary",
"columns pruned within boundary",
))
} else {
Err((
Expand Down
3 changes: 0 additions & 3 deletions beacon_node/store/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@ pub enum Error {
AnchorInfoConcurrentMutation,
/// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied.
BlobInfoConcurrentMutation,
/// The store's `data_column_info` was mutated concurrently, the latest modification wasn't applied.
DataColumnInfoConcurrentMutation,
/// The block or state is unavailable due to weak subjectivity sync.
HistoryUnavailable,
/// State reconstruction cannot commence because not all historic blocks are known.
Expand Down Expand Up @@ -94,7 +92,6 @@ pub enum Error {
LoadAnchorInfo(Box<Error>),
LoadSplit(Box<Error>),
LoadBlobInfo(Box<Error>),
LoadDataColumnInfo(Box<Error>),
LoadConfig(Box<Error>),
LoadHotStateSummary(Hash256, Box<Error>),
LoadHotStateSummaryForSplit(Box<Error>),
Expand Down
Loading
Loading