Skip to content

Commit 964c667

Browse files
committed
Squashed commit of the following (sigp#8188):
commit aa64356 Author: Pawan Dhananjay <[email protected]> Date: Fri Oct 10 15:30:45 2025 -0700 lint commit 9fab592 Author: Pawan Dhananjay <[email protected]> Date: Fri Oct 10 15:10:05 2025 -0700 Get claude to write tests commit bd181a3 Author: Pawan Dhananjay <[email protected]> Date: Fri Oct 10 13:34:41 2025 -0700 Only persist custody columns
1 parent dfe7844 commit 964c667

File tree

3 files changed

+203
-4
lines changed

3 files changed

+203
-4
lines changed

beacon_node/beacon_chain/src/beacon_chain.rs

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3957,7 +3957,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
39573957
// See https://github.com/sigp/lighthouse/issues/2028
39583958
let (_, signed_block, block_data) = signed_block.deconstruct();
39593959

3960-
match self.get_blobs_or_columns_store_op(block_root, block_data) {
3960+
match self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data) {
39613961
Ok(Some(blobs_or_columns_store_op)) => {
39623962
ops.push(blobs_or_columns_store_op);
39633963
}
@@ -7163,6 +7163,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
71637163
pub(crate) fn get_blobs_or_columns_store_op(
71647164
&self,
71657165
block_root: Hash256,
7166+
block_slot: Slot,
71667167
block_data: AvailableBlockData<T::EthSpec>,
71677168
) -> Result<Option<StoreOp<'_, T::EthSpec>>, String> {
71687169
match block_data {
@@ -7175,7 +7176,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
71757176
);
71767177
Ok(Some(StoreOp::PutBlobs(block_root, blobs)))
71777178
}
7178-
AvailableBlockData::DataColumns(data_columns) => {
7179+
AvailableBlockData::DataColumns(mut data_columns) => {
7180+
let columns_to_custody = self.custody_columns_for_epoch(Some(
7181+
block_slot.epoch(T::EthSpec::slots_per_epoch()),
7182+
));
7183+
// Supernodes need to persist all sampled custody columns
7184+
if columns_to_custody.len() != self.spec.number_of_custody_groups as usize {
7185+
data_columns
7186+
.retain(|data_column| columns_to_custody.contains(&data_column.index));
7187+
}
71797188
debug!(
71807189
%block_root,
71817190
count = data_columns.len(),

beacon_node/beacon_chain/src/historical_blocks.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
140140

141141
// Store the blobs or data columns too
142142
if let Some(op) = self
143-
.get_blobs_or_columns_store_op(block_root, block_data)
143+
.get_blobs_or_columns_store_op(block_root, block.slot(), block_data)
144144
.map_err(|e| {
145145
HistoricalBlockError::StoreError(StoreError::DBError {
146146
message: format!("get_blobs_or_columns_store_op error {e:?}"),

beacon_node/beacon_chain/tests/store_tests.rs

Lines changed: 191 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@ use beacon_chain::test_utils::{
1313
use beacon_chain::{
1414
BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig,
1515
NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
16-
data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError,
16+
data_availability_checker::{AvailableBlockData, MaybeAvailableBlock},
17+
historical_blocks::HistoricalBlockError,
1718
migrate::MigratorConfig,
1819
};
1920
use logging::create_test_tracing_subscriber;
@@ -4137,6 +4138,195 @@ async fn replay_from_split_state() {
41374138
assert_eq!(state.slot(), split.slot);
41384139
}
41394140

4141+
/// Test that regular nodes filter and store only custody columns when processing blocks with data columns.
4142+
#[tokio::test]
4143+
async fn test_custody_column_filtering_regular_node() {
4144+
let db_path = tempdir().unwrap();
4145+
let store = get_store(&db_path);
4146+
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
4147+
4148+
// Skip test if PeerDAS is not scheduled
4149+
if !harness.spec.is_peer_das_scheduled() {
4150+
return;
4151+
}
4152+
4153+
// Generate a block with data columns using public test utils
4154+
let fork_name = harness.spec.fork_name_at_epoch(Epoch::new(0));
4155+
let mut rng = rand::rng();
4156+
let (signed_block, all_data_columns) =
4157+
beacon_chain::test_utils::generate_rand_block_and_data_columns::<E>(
4158+
fork_name,
4159+
beacon_chain::test_utils::NumBlobs::Number(3), // Generate 3 blobs to produce data columns
4160+
&mut rng,
4161+
&harness.spec,
4162+
);
4163+
4164+
// Skip test if no data columns are generated
4165+
if all_data_columns.is_empty() {
4166+
return;
4167+
}
4168+
4169+
let block_root = signed_block.canonical_root();
4170+
let slot = signed_block.slot();
4171+
4172+
// Get custody columns for this epoch - regular nodes only store a subset
4173+
let custody_columns = harness
4174+
.chain
4175+
.custody_columns_for_epoch(Some(slot.epoch(E::slots_per_epoch())));
4176+
4177+
// Verify that custody columns is a proper subset (not all columns for regular node)
4178+
assert!(
4179+
custody_columns.len() < harness.spec.number_of_custody_groups as usize,
4180+
"Regular node should not custody all columns"
4181+
);
4182+
4183+
// Create AvailableBlock with all data columns
4184+
let available_block_data = AvailableBlockData::DataColumns(all_data_columns.clone());
4185+
let available_block = AvailableBlock::__new_for_testing(
4186+
block_root,
4187+
Arc::new(signed_block),
4188+
available_block_data,
4189+
harness.spec.clone(),
4190+
);
4191+
4192+
// Import the block through the historical block batch import which calls the custody filtering internally
4193+
harness
4194+
.chain
4195+
.import_historical_block_batch(vec![available_block])
4196+
.expect("should import block with data columns");
4197+
4198+
// Check what actually got stored in the database
4199+
let stored_column_keys = store
4200+
.get_data_column_keys(block_root)
4201+
.expect("should get stored column keys");
4202+
4203+
// Verify only custody columns were stored
4204+
let stored_column_indices: std::collections::HashSet<_> =
4205+
stored_column_keys.into_iter().collect();
4206+
let expected_column_indices: std::collections::HashSet<_> =
4207+
custody_columns.iter().copied().collect();
4208+
4209+
assert_eq!(
4210+
stored_column_indices, expected_column_indices,
4211+
"Regular node should only store custody columns"
4212+
);
4213+
4214+
// Verify no non-custody columns are included
4215+
let all_column_indices: std::collections::HashSet<_> =
4216+
all_data_columns.iter().map(|dc| dc.index).collect();
4217+
let non_custody_columns: std::collections::HashSet<_> = all_column_indices
4218+
.difference(&expected_column_indices)
4219+
.collect();
4220+
4221+
for &non_custody_column in &non_custody_columns {
4222+
assert!(
4223+
!stored_column_indices.contains(non_custody_column),
4224+
"Non-custody column {} should not be stored",
4225+
non_custody_column
4226+
);
4227+
}
4228+
}
4229+
4230+
/// Test that supernodes store all data columns when processing blocks with data columns.
4231+
#[tokio::test]
4232+
async fn test_custody_column_filtering_supernode() {
4233+
let db_path = tempdir().unwrap();
4234+
let store = get_store(&db_path);
4235+
let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT);
4236+
4237+
// Skip test if PeerDAS is not scheduled
4238+
if !harness.spec.is_peer_das_scheduled() {
4239+
return;
4240+
}
4241+
4242+
// Generate a block with data columns using public test utils
4243+
let fork_name = harness.spec.fork_name_at_epoch(Epoch::new(0));
4244+
let mut rng = rand::rng();
4245+
let (signed_block, all_data_columns) =
4246+
beacon_chain::test_utils::generate_rand_block_and_data_columns::<E>(
4247+
fork_name,
4248+
beacon_chain::test_utils::NumBlobs::Number(3), // Generate 3 blobs to produce data columns
4249+
&mut rng,
4250+
&harness.spec,
4251+
);
4252+
4253+
// Skip test if no data columns are generated
4254+
if all_data_columns.is_empty() {
4255+
return;
4256+
}
4257+
4258+
let block_root = signed_block.canonical_root();
4259+
4260+
// Create AvailableBlock with all data columns
4261+
let available_block_data = AvailableBlockData::DataColumns(all_data_columns.clone());
4262+
let available_block = AvailableBlock::__new_for_testing(
4263+
block_root,
4264+
Arc::new(signed_block),
4265+
available_block_data,
4266+
harness.spec.clone(),
4267+
);
4268+
4269+
// Import the block through the historical block batch import
4270+
harness
4271+
.chain
4272+
.import_historical_block_batch(vec![available_block])
4273+
.expect("should import block with data columns");
4274+
4275+
// Check what actually got stored in the database
4276+
let stored_column_keys = store
4277+
.get_data_column_keys(block_root)
4278+
.expect("should get stored column keys");
4279+
4280+
// Verify ALL data columns were stored for supernodes
4281+
let stored_column_indices: std::collections::HashSet<_> =
4282+
stored_column_keys.into_iter().collect();
4283+
let all_column_indices: std::collections::HashSet<_> =
4284+
all_data_columns.iter().map(|dc| dc.index).collect();
4285+
4286+
assert_eq!(
4287+
stored_column_indices, all_column_indices,
4288+
"Supernode should store ALL data columns"
4289+
);
4290+
4291+
// Verify the count matches
4292+
assert_eq!(
4293+
stored_column_indices.len(),
4294+
all_data_columns.len(),
4295+
"Supernode should store the same number of columns as generated"
4296+
);
4297+
4298+
// Verify each stored column can be retrieved and matches original data
4299+
let stored_columns = store
4300+
.get_data_columns(&block_root)
4301+
.expect("should get data columns")
4302+
.expect("data columns should exist");
4303+
4304+
assert_eq!(
4305+
stored_columns.len(),
4306+
all_data_columns.len(),
4307+
"All data columns should be retrievable"
4308+
);
4309+
4310+
for original_column in &all_data_columns {
4311+
let matching_stored = stored_columns
4312+
.iter()
4313+
.find(|stored| stored.index == original_column.index);
4314+
4315+
assert!(
4316+
matching_stored.is_some(),
4317+
"Column {} should be present in stored data",
4318+
original_column.index
4319+
);
4320+
4321+
let stored_column = matching_stored.unwrap();
4322+
assert_eq!(
4323+
stored_column, original_column,
4324+
"Stored column {} should match original",
4325+
original_column.index
4326+
);
4327+
}
4328+
}
4329+
41404330
/// Checks that two chains are the same, for the purpose of these tests.
41414331
///
41424332
/// Several fields that are hard/impossible to check are ignored (e.g., the store).

0 commit comments

Comments
 (0)