Skip to content

Commit 6ec23f2

Browse files
authored
Portal + era1: Remove use of hardcoded mainnet mergeBlockNumber (#3742)
Make it configurable in historical_hashes_accumulator and era1 code. This way it can be used for different networks (testnets). As a result we also don't have to separately compile the historical_hashes_accumulator tests with a different compile time define. Also tested nimbus_execution_client import for Sepolia in the process and found an issue with the era1 files there which is why an additional check in era1 open call got commented out for now. Additional things that came up: - Time to bring the two eraDBs together (Portal + EL). - Time to bring Portal and EL ChainConfig together but the latter could use some cleanup
1 parent f5eebe4 commit 6ec23f2

17 files changed

+85
-79
lines changed

Makefile

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -290,17 +290,13 @@ portal-test-reproducibility:
290290
[ "$$MD5SUM1" = "$$MD5SUM2" ] && echo -e "\e[92mSuccess: identical binaries.\e[39m" || \
291291
{ echo -e "\e[91mFailure: the binary changed between builds.\e[39m"; exit 1; }
292292

293-
# Portal tests
294-
all_eth_history_custom_chain_tests: | build deps
295-
echo -e $(BUILD_MSG) "build/$@" && \
296-
$(ENV_SCRIPT) nim c -r $(NIM_PARAMS) -d:chronicles_log_level=ERROR -d:mergeBlockNumber:38130 -o:build/$@ "portal/tests/eth_history_tests/$@.nim"
297-
293+
# builds and runs the Portal test suite
298294
all_portal_tests: | build deps
299295
echo -e $(BUILD_MSG) "build/$@" && \
300296
$(ENV_SCRIPT) nim c -r $(NIM_PARAMS) -d:chronicles_log_level=ERROR -o:build/$@ "portal/tests/$@.nim"
301297

302-
# builds and runs the Portal test suite
303-
portal-test: | all_portal_tests all_eth_history_custom_chain_tests
298+
# alias for all_portal_tests
299+
portal-test: | all_portal_tests
304300

305301
# builds the Portal tools, wherever they are
306302
$(PORTAL_TOOLS): | build deps rocksdb

execution_chain/db/era1_db/db_desc.nim

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ type Era1DbRef* = ref object
3030
network: string
3131
files: seq[Era1File]
3232
filenames: Table[uint64, string]
33+
mergeBlockNumber: uint64
3334

3435
proc getEra1File*(db: Era1DbRef, era: Era1): Result[Era1File, string] =
3536
for f in db.files:
@@ -49,7 +50,7 @@ proc getEra1File*(db: Era1DbRef, era: Era1): Result[Era1File, string] =
4950

5051
# TODO: The open call does not do full verification. It is assumed here that
5152
# trusted files are used. We might want to add a full validation option.
52-
let f = Era1File.open(path).valueOr:
53+
let f = Era1File.open(path, db.mergeBlockNumber).valueOr:
5354
return err(path & ": " & error)
5455

5556
if db.files.len > 16: # TODO LRU
@@ -60,7 +61,7 @@ proc getEra1File*(db: Era1DbRef, era: Era1): Result[Era1File, string] =
6061
ok(f)
6162

6263
proc init*(
63-
T: type Era1DbRef, path: string, network: string
64+
T: type Era1DbRef, path: string, network: string, mergeBlockNumber: uint64
6465
): Result[Era1DbRef, string] =
6566
var filenames: Table[uint64, string]
6667
try:
@@ -78,7 +79,7 @@ proc init*(
7879
if filenames.len == 0:
7980
return err "No era files found in " & path
8081

81-
ok Era1DbRef(path: path, network: network, filenames: filenames)
82+
ok Era1DbRef(path: path, network: network, filenames: filenames, mergeBlockNumber: mergeBlockNumber)
8283

8384
proc getEthBlock*(
8485
db: Era1DbRef, blockNumber: uint64, res: var Block

execution_chain/nimbus_import.nim

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
275275
"sepolia"
276276
else:
277277
raiseAssert "Other networks are unsupported or do not have an era1"
278-
db = Era1DbRef.init(conf.era1Dir, era1Name).valueOr:
278+
db = Era1DbRef.init(conf.era1Dir, era1Name, lastEra1Block + 1).valueOr:
279279
fatal "Could not open era1 database",
280280
era1Dir = conf.era1Dir, era1Name = era1Name, error = error
281281
quit(QuitFailure)

portal/bridge/history/portal_history_bridge.nim

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,18 +22,21 @@ import
2222
../../rpc/portal_rpc_client,
2323
../../network/history/[history_content, history_validation],
2424
../../eth_history/block_proofs/historical_hashes_accumulator,
25-
../../network/network_metadata,
2625
../../eth_history/[era1, history_data_ssz_e2s],
2726
../../database/era1_db,
27+
../../../execution_chain/common/[hardforks, chain_config],
2828
../common/rpc_helpers,
2929
../nimbus_portal_bridge_conf
3030

31+
from ../../network/network_metadata import loadAccumulator
32+
3133
const newHeadPollInterval = 6.seconds # Slot with potential block is every 12s
3234

3335
type PortalHistoryBridge = ref object
3436
portalClient: RpcClient
3537
web3Client: RpcClient
3638
gossipQueue: AsyncQueue[(seq[byte], seq[byte])]
39+
cfg*: ChainConfig
3740

3841
proc gossipBlockBody(
3942
bridge: PortalHistoryBridge, blockNumber: uint64, body: BlockBody
@@ -106,7 +109,7 @@ proc runLatestLoop(
106109
proc gossipBlockContent(
107110
bridge: PortalHistoryBridge, era1File: string, verifyEra = false
108111
): Future[Result[void, string]] {.async: (raises: [CancelledError]).} =
109-
let f = ?Era1File.open(era1File)
112+
let f = ?Era1File.open(era1File, bridge.cfg.posBlock.get())
110113

111114
if verifyEra:
112115
let _ = ?f.verify()
@@ -143,7 +146,7 @@ proc runBackfillLoopAuditMode(
143146
) {.async: (raises: [CancelledError]).} =
144147
let
145148
rng = newRng()
146-
db = Era1DB.new(era1Dir, "mainnet", loadAccumulator())
149+
db = Era1DB.new(era1Dir, "mainnet", loadAccumulator(), bridge.cfg.posBlock.get())
147150
blockLowerBound = startEra * EPOCH_SIZE # inclusive
148151
blockUpperBound = ((endEra + 1) * EPOCH_SIZE) - 1 # inclusive
149152
blockRange = blockUpperBound - blockLowerBound
@@ -207,6 +210,7 @@ proc runHistory*(config: PortalBridgeConf) =
207210
portalClient: newRpcClientConnect(config.portalRpcUrl),
208211
web3Client: newRpcClientConnect(config.web3Url),
209212
gossipQueue: newAsyncQueue[(seq[byte], seq[byte])](config.gossipConcurrency),
213+
cfg: chainConfigForNetwork(MainNet),
210214
)
211215

212216
proc gossipWorker(bridge: PortalHistoryBridge) {.async: (raises: []).} =

portal/database/era1_db.nim

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,14 @@ type Era1DB* = ref object
2222
network: string
2323
accumulator: FinishedHistoricalHashesAccumulator
2424
files: seq[Era1File]
25+
mergeBlockNumber: uint64
2526

2627
proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] =
2728
for f in db.files:
2829
if f.blockIdx.startNumber.era == era:
2930
return ok(f)
3031

31-
if era > mergeBlockNumber.era():
32+
if era > db.mergeBlockNumber.era():
3233
return err("Selected era1 past pre-merge data")
3334

3435
let
@@ -41,7 +42,7 @@ proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] =
4142

4243
# TODO: The open call does not do full verification. It is assumed here that
4344
# trusted files are used. We might want to add a full validation option.
44-
let f = Era1File.open(path).valueOr:
45+
let f = Era1File.open(path, db.mergeBlockNumber).valueOr:
4546
return err(error)
4647

4748
if db.files.len > 16: # TODO LRU
@@ -56,8 +57,15 @@ proc new*(
5657
path: string,
5758
network: string,
5859
accumulator: FinishedHistoricalHashesAccumulator,
60+
mergeBlockNumber: uint64,
5961
): Era1DB =
60-
Era1DB(path: path, network: network, accumulator: accumulator)
62+
# TODO: Calculate mergeBlockNumber from accumulator instead.
63+
Era1DB(
64+
path: path,
65+
network: network,
66+
accumulator: accumulator,
67+
mergeBlockNumber: mergeBlockNumber,
68+
)
6169

6270
proc getEthBlock*(
6371
db: Era1DB, blockNumber: uint64, res: var Block

portal/eth_history/block_proofs/block_proof_historical_hashes_accumulator.nim

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,12 @@ import
1212
ssz_serialization,
1313
ssz_serialization/[proofs, merkleization],
1414
../../common/common_types,
15+
../../network/network_metadata,
1516
./historical_hashes_accumulator
1617

1718
export
18-
ssz_serialization, merkleization, proofs, common_types, historical_hashes_accumulator
19+
ssz_serialization, merkleization, proofs, common_types, historical_hashes_accumulator,
20+
network_metadata
1921

2022
#
2123
# Implementation of pre-merge block proofs by making use of the frozen HistoricalHashesAccumulator
@@ -60,12 +62,6 @@ func getHeaderRecordIndex*(header: Header, epochIndex: uint64): uint64 =
6062
## Get the relative header index for the epoch accumulator
6163
getHeaderRecordIndex(header.number, epochIndex)
6264

63-
func isPreMerge*(blockNumber: uint64): bool =
64-
blockNumber < mergeBlockNumber
65-
66-
func isPreMerge*(header: Header): bool =
67-
isPreMerge(header.number)
68-
6965
func verifyProof*(
7066
a: FinishedHistoricalHashesAccumulator,
7167
header: Header,
@@ -95,9 +91,11 @@ func verifyProof*(
9591
verify_merkle_multiproof(@[leave], proof, @[gIndex], epochRecordHash)
9692

9793
func buildProof*(
98-
header: Header, epochRecord: EpochRecord | EpochRecordCached
94+
chainConfig: ChainConfig,
95+
header: Header,
96+
epochRecord: EpochRecord | EpochRecordCached,
9997
): Result[HistoricalHashesAccumulatorProof, string] =
100-
doAssert(header.isPreMerge(), "Must be pre merge header")
98+
doAssert(not chainConfig.isPoSBlock(header), "Must be pre merge header")
10199

102100
let
103101
epochIndex = getEpochIndex(header)

portal/eth_history/block_proofs/historical_hashes_accumulator.nim

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -27,18 +27,6 @@ const
2727
MAX_HISTORICAL_EPOCHS = 2048'u64 # Should be sufficient for all networks as for
2828
# mainnet this is not even reached: ceil(mergeBlockNumber / EPOCH_SIZE) = 1897
2929

30-
# Allow this to be adjusted at compile time for testing. If more constants
31-
# need to be adjusted we can add some presets file.
32-
mergeBlockNumber* {.intdefine.}: uint64 = 15537394
33-
34-
# Note: This is like a ceil(mergeBlockNumber / EPOCH_SIZE)
35-
# Could use ceilDiv(mergeBlockNumber, EPOCH_SIZE) in future versions
36-
preMergeEpochs* = (mergeBlockNumber + EPOCH_SIZE - 1) div EPOCH_SIZE
37-
38-
# TODO:
39-
# Currently disabled, because issue when testing with other
40-
# `mergeBlockNumber`, but it could be used as value to double check on at
41-
# merge block.
4230
# TODO: Could also be used as value to actual finish the accumulator, instead
4331
# of `mergeBlockNumber`, but:
4432
# - Still need to store the actual `mergeBlockNumber` and run-time somewhere
@@ -84,10 +72,6 @@ func getEpochRecordRoot*(headerRecords: openArray[HeaderRecord]): Digest =
8472
hash_tree_root(epochRecord)
8573

8674
func updateAccumulator*(a: var HistoricalHashesAccumulator, header: Header) =
87-
doAssert(
88-
header.number < mergeBlockNumber, "No post merge blocks for header accumulator"
89-
)
90-
9175
let lastTotalDifficulty =
9276
if a.currentEpoch.len() == 0:
9377
0.stuint(256)

portal/eth_history/era1.nim

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ proc skipRecord*(f: IoHandle): Result[void, string] =
156156
func startNumber*(era: Era1): uint64 =
157157
era * MaxEra1Size
158158

159-
func endNumber*(era: Era1): uint64 =
159+
func endNumber*(era: Era1, mergeBlockNumber: uint64): uint64 =
160160
if (era + 1) * MaxEra1Size - 1'u64 >= mergeBlockNumber:
161161
# The incomplete era just before the merge
162162
mergeBlockNumber - 1'u64
@@ -169,7 +169,7 @@ func endNumber*(blockIdx: BlockIndex): uint64 =
169169
func era*(blockNumber: uint64): Era1 =
170170
Era1(blockNumber div MaxEra1Size)
171171

172-
func offsetsLen(startNumber: uint64): int =
172+
func offsetsLen(startNumber: uint64, mergeBlockNumber: uint64): int =
173173
# For the era where the merge happens the era files only holds the blocks
174174
# until the merge block so the offsets length needs to be adapted too.
175175
if startNumber.era() >= mergeBlockNumber.era():
@@ -187,13 +187,16 @@ proc fromCompressedRlpBytes[T](bytes: openArray[byte], v: var T): Result[void, s
187187
except RlpError as e:
188188
err("Invalid compressed RLP data for " & $T & ": " & e.msg)
189189

190-
proc init*(T: type Era1Group, f: IoHandle, startNumber: uint64): Result[T, string] =
190+
proc init*(
191+
T: type Era1Group, f: IoHandle, startNumber: uint64, mergeBlockNumber: uint64
192+
): Result[T, string] =
191193
discard ?f.appendHeader(E2Version, 0)
192194

193195
ok(
194196
Era1Group(
195197
blockIndex: BlockIndex(
196-
startNumber: startNumber, offsets: newSeq[int64](startNumber.offsetsLen())
198+
startNumber: startNumber,
199+
offsets: newSeq[int64](offsetsLen(startNumber, mergeBlockNumber)),
197200
)
198201
)
199202
)
@@ -268,7 +271,9 @@ type
268271
BlockTuple* =
269272
tuple[header: headers.Header, body: BlockBody, receipts: seq[Receipt], td: UInt256]
270273

271-
proc open*(_: type Era1File, name: string): Result[Era1File, string] =
274+
proc open*(
275+
_: type Era1File, name: string, mergeBlockNumber: uint64
276+
): Result[Era1File, string] =
272277
var f = Opt[IoHandle].ok(?openFile(name, {OpenFlags.Read}).mapErr(ioErrorMsg))
273278

274279
defer:
@@ -284,8 +289,11 @@ proc open*(_: type Era1File, name: string): Result[Era1File, string] =
284289
?f[].setFilePos(blockIdxPos, SeekPosition.SeekCurrent).mapErr(ioErrorMsg)
285290

286291
let blockIdx = ?f[].readBlockIndex()
287-
if blockIdx.offsets.len() != blockIdx.startNumber.offsetsLen():
288-
return err("Block index length invalid")
292+
# TODO: Re-enable this check when Sepolia era files are fixed
293+
# let offsetLen = offsetsLen(blockIdx.startNumber, mergeBlockNumber)
294+
# if blockIdx.offsets.len() != offsetLen:
295+
# return err("Block index length invalid: " & $blockIdx.offsets.len() & " vs expected " &
296+
# $offsetLen)
289297

290298
let res = Era1File(handle: f, blockIdx: blockIdx)
291299
reset(f)

portal/network/network_metadata.nim

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import
1212
results,
1313
stew/io2,
1414
chronos/timer,
15+
eth/common/headers,
1516
beacon_chain/spec/forks,
1617
../eth_history/block_proofs/historical_hashes_accumulator
1718

@@ -95,6 +96,9 @@ func isTimestampForked(forkTime: Opt[Moment], timestamp: Moment): bool =
9596
func isPoSBlock*(c: ChainConfig, blockNumber: uint64): bool =
9697
c.mergeNetsplitBlock <= blockNumber
9798

99+
func isPoSBlock*(c: ChainConfig, header: Header): bool =
100+
c.mergeNetsplitBlock <= header.number
101+
98102
func isShanghai*(c: ChainConfig, timestamp: Moment): bool =
99103
isTimestampForked(c.shanghaiTime, timestamp)
100104

portal/tests/eth_history_tests/all_eth_history_custom_chain_tests.nim

Lines changed: 0 additions & 10 deletions
This file was deleted.

0 commit comments

Comments
 (0)