Skip to content

Commit 1044e6f

Browse files
committed
apollo_l1_provider: improve add_tx and prevent double scraping
1 parent 41bfe7f commit 1044e6f

File tree

3 files changed

+39
-15
lines changed

3 files changed

+39
-15
lines changed

crates/apollo_l1_provider/src/l1_provider.rs

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -122,15 +122,7 @@ impl L1Provider {
122122
block_timestamp,
123123
scrape_timestamp,
124124
} => {
125-
let tx_hash = l1_handler_tx.tx_hash;
126-
let successfully_inserted =
127-
self.tx_manager.add_tx(l1_handler_tx, block_timestamp, scrape_timestamp);
128-
if !successfully_inserted {
129-
debug!(
130-
"Unexpected L1 Handler transaction with hash: {tx_hash}, already \
131-
known or committed."
132-
);
133-
}
125+
self.tx_manager.add_tx(l1_handler_tx, block_timestamp, scrape_timestamp);
134126
}
135127
Event::TransactionCancellationStarted {
136128
tx_hash,

crates/apollo_l1_provider/src/l1_provider_tests.rs

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,21 @@ fn process_events_committed_txs() {
208208
assert_eq!(l1_provider, expected_l1_provider);
209209
}
210210

211+
#[test]
212+
fn add_tx_double_scraped_doesnt_update_scrape_timestamp() {
213+
// Setup.
214+
let mut l1_provider = L1ProviderContentBuilder::new()
215+
.with_timed_txs([(l1_handler(1), 1)])
216+
.with_state(ProviderState::Pending)
217+
.build_into_l1_provider();
218+
219+
let expected_l1_provider = l1_provider.clone();
220+
221+
// Test: double scrape doesn't update the scrape timestamp.
222+
l1_provider.add_events(vec![timed_l1_handler_event(tx_hash!(1), 2.into())]).unwrap();
223+
assert_eq!(l1_provider, expected_l1_provider);
224+
}
225+
211226
#[test]
212227
fn pending_state_returns_error() {
213228
// Setup.

crates/apollo_l1_provider/src/transaction_manager.rs

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use apollo_l1_provider_types::{InvalidValidationStatus, ValidationStatus};
77
use starknet_api::block::{BlockTimestamp, UnixTimestamp};
88
use starknet_api::executable_transaction::L1HandlerTransaction;
99
use starknet_api::transaction::TransactionHash;
10-
use tracing::debug;
10+
use tracing::{debug, info, warn};
1111

1212
use crate::transaction_record::{
1313
Records,
@@ -161,14 +161,31 @@ impl TransactionManager {
161161
tx: L1HandlerTransaction,
162162
block_timestamp: BlockTimestamp,
163163
scrape_timestamp: UnixTimestamp,
164-
) -> bool {
164+
) {
165165
let tx_hash = tx.tx_hash;
166+
// If exists, return false and do nothing. If not, create the record as a HashOnly payload.
166167
let is_new_record = self.create_record_if_not_exist(tx_hash);
167-
self.with_record(tx_hash, move |record| {
168-
record.tx.set(tx, block_timestamp, scrape_timestamp);
168+
// Replace a HashOnly payload with a Full payload. If a full payload already exists, do not
169+
// update it, since updating it should not happen (i.e., a double scrape, and may cause the
170+
// tx to be re-added to the proposable index).
171+
self.with_record(tx_hash, move |record| match &record.tx {
172+
TransactionPayload::HashOnly(_) => {
173+
if !is_new_record {
174+
info!(
175+
"Transaction {tx_hash} already exists as a HashOnly payload. It was \
176+
probably gotten via sync, and is now updated with a Full payload."
177+
);
178+
}
179+
record.tx.set(tx, block_timestamp, scrape_timestamp);
180+
}
181+
TransactionPayload::Full { tx: _, created_at_block_timestamp: _, scrape_timestamp } => {
182+
warn!(
183+
"Transaction {tx_hash} already exists as a Full payload, scraped at \
184+
{scrape_timestamp}. This could indicate a double scrape. Ignoring the new \
185+
transaction."
186+
);
187+
}
169188
});
170-
171-
is_new_record
172189
}
173190

174191
pub fn request_cancellation(

0 commit comments

Comments
 (0)