Skip to content

Commit 28d3c6b

Browse files
committed
address review comments from shesek
1 parent cc2aaef commit 28d3c6b

File tree

2 files changed

+66
-102
lines changed

2 files changed

+66
-102
lines changed

src/bin/electrs.rs

Lines changed: 3 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,9 @@ extern crate log;
55
extern crate electrs;
66

77
use error_chain::ChainedError;
8-
use std::collections::HashSet;
98
use std::process;
109
use std::sync::{Arc, RwLock};
11-
use std::time::{Duration, Instant};
12-
use bitcoin::Txid;
13-
use serde_json::json;
10+
use std::time::Duration;
1411

1512
use electrs::{
1613
config::{Config, get_num_threads},
@@ -88,7 +85,7 @@ fn run_server(config: Arc<Config>) -> Result<()> {
8885
&metrics,
8986
Arc::clone(&config),
9087
)));
91-
mempool.write().unwrap().update(&daemon)?;
88+
Mempool::update(&mempool, &daemon)?;
9289

9390
#[cfg(feature = "liquid")]
9491
let asset_db = config.asset_db_path.as_ref().map(|db_dir| {
@@ -125,29 +122,8 @@ fn run_server(config: Arc<Config>) -> Result<()> {
125122
tip = current_tip;
126123
};
127124

128-
// FIXME(jamesdorfman): couldn't figure out how to import it from util
129-
let log_fn_duration = |fn_name: &str, duration: u128| {
130-
let log = json!({
131-
"fn_name": fn_name,
132-
"duration_micros": duration,
133-
});
134-
println!("{}", log);
135-
};
136-
137125
// Update mempool
138-
139-
let t = Instant::now();
140-
141-
let old_txids = mempool.read().unwrap().old_txids();
142-
let new_txids = daemon
143-
.getmempooltxids()
144-
.chain_err(|| "failed to update mempool from daemon")?;
145-
let old_mempool_txs: HashSet<&Txid> = old_txids.difference(&new_txids).collect();
146-
147-
log_fn_duration("mempool::paratial_tx_fetch", t.elapsed().as_micros());
148-
149-
let new_mempool_txs = Mempool::download_new_mempool_txs(&daemon, &old_txids, &new_txids);
150-
mempool.write().unwrap().update_quick( &new_mempool_txs, &old_mempool_txs)?;
126+
Mempool::update(&mempool, &daemon)?;
151127

152128
// Update subscribed clients
153129
electrum_server.notify();

src/new_index/mempool.rs

Lines changed: 63 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ use elements::{encode::serialize, AssetId};
88

99
use std::collections::{BTreeSet, HashMap, HashSet};
1010
use std::iter::FromIterator;
11-
use std::sync::Arc;
11+
use std::sync::{Arc, RwLock};
1212
use std::time::{Duration, Instant};
1313

1414
use crate::chain::{deserialize, Network, OutPoint, Transaction, TxOut, Txid};
@@ -289,80 +289,6 @@ impl Mempool {
289289
return HashSet::from_iter(self.txstore.keys().cloned());
290290
}
291291

292-
pub fn download_new_mempool_txs(daemon: &Daemon, old_txids: &HashSet<Txid>, new_txids: &HashSet<Txid>) -> Vec<Transaction> {
293-
let t = Instant::now();
294-
295-
let txids: Vec<&Txid> = (*new_txids).difference(old_txids).collect();
296-
let tranactions = match daemon.gettransactions(&txids) {
297-
Ok(txs) => txs,
298-
Err(err) => {
299-
warn!("failed to get {} transactions: {}", txids.len(), err); // e.g. new block or RBF
300-
vec![] // return an empty vector if there's an error
301-
}
302-
};
303-
304-
log_fn_duration("mempool::download_new_mempool_txs", t.elapsed().as_micros());
305-
return tranactions;
306-
}
307-
308-
pub fn update_quick(&mut self, to_add: &Vec<Transaction>, to_remove: &HashSet<&Txid>) -> Result<()> {
309-
let t = Instant::now();
310-
let _timer = self.latency.with_label_values(&["update"]).start_timer();
311-
312-
// Add new transactions
313-
self.add(to_add.clone());
314-
// Remove missing transactions
315-
self.remove(to_remove.clone());
316-
317-
self.count
318-
.with_label_values(&["txs"])
319-
.set(self.txstore.len() as f64);
320-
321-
// Update cached backlog stats (if expired)
322-
if self.backlog_stats.1.elapsed() > Duration::from_secs(BACKLOG_STATS_TTL) {
323-
self.update_backlog_stats();
324-
}
325-
326-
log_fn_duration("mempool::update_quick", t.elapsed().as_micros());
327-
Ok(())
328-
}
329-
330-
pub fn update(&mut self, daemon: &Daemon) -> Result<()> {
331-
let t = Instant::now();
332-
let _timer = self.latency.with_label_values(&["update"]).start_timer();
333-
let new_txids = daemon
334-
.getmempooltxids()
335-
.chain_err(|| "failed to update mempool from daemon")?;
336-
let old_txids = HashSet::from_iter(self.txstore.keys().cloned());
337-
let to_remove: HashSet<&Txid> = old_txids.difference(&new_txids).collect();
338-
339-
// Download and add new transactions from bitcoind's mempool
340-
let txids: Vec<&Txid> = new_txids.difference(&old_txids).collect();
341-
let to_add = match daemon.gettransactions(&txids) {
342-
Ok(txs) => txs,
343-
Err(err) => {
344-
warn!("failed to get {} transactions: {}", txids.len(), err); // e.g. new block or RBF
345-
return Ok(()); // keep the mempool until next update()
346-
}
347-
};
348-
// Add new transactions
349-
self.add(to_add);
350-
// Remove missing transactions
351-
self.remove(to_remove);
352-
353-
self.count
354-
.with_label_values(&["txs"])
355-
.set(self.txstore.len() as f64);
356-
357-
// Update cached backlog stats (if expired)
358-
if self.backlog_stats.1.elapsed() > Duration::from_secs(BACKLOG_STATS_TTL) {
359-
self.update_backlog_stats();
360-
}
361-
362-
log_fn_duration("mempool::update", t.elapsed().as_micros());
363-
Ok(())
364-
}
365-
366292
pub fn update_backlog_stats(&mut self) {
367293
let _timer = self
368294
.latency
@@ -577,6 +503,68 @@ impl Mempool {
577503
.get(asset_id)
578504
.map_or_else(|| vec![], |entries| self._history(entries, limit))
579505
}
506+
507+
pub fn download_new_mempool_txs(
508+
daemon: &Daemon,
509+
old_txids: &HashSet<Txid>,
510+
new_txids: &HashSet<Txid>
511+
) -> Result<Vec<Transaction>>{
512+
let t = Instant::now();
513+
514+
let txids: Vec<&Txid> = (*new_txids).difference(old_txids).collect();
515+
let transactions = match daemon.gettransactions(&txids) {
516+
Ok(txs) => txs,
517+
Err(err) => {
518+
warn!("failed to get {} transactions: {}", txids.len(), err); // e.g. new block or RBF
519+
return Err(err);
520+
}
521+
};
522+
523+
log_fn_duration("mempool::download_new_mempool_txs", t.elapsed().as_micros());
524+
return Ok(transactions);
525+
}
526+
527+
pub fn update(mempool: &Arc<RwLock<Mempool>>, daemon: &Daemon) -> Result<()> {
528+
let t = Instant::now();
529+
530+
// 1. Determine which transactions are no longer in the daemon's mempool
531+
let old_txids = mempool.read().unwrap().old_txids();
532+
let new_txids = daemon
533+
.getmempooltxids()
534+
.chain_err(|| "failed to update mempool from daemon")?;
535+
let old_mempool_txs: HashSet<&Txid> = old_txids.difference(&new_txids).collect();
536+
log_fn_duration("mempool::paratial_tx_fetch", t.elapsed().as_micros());
537+
538+
// 2. Download new transactions from the daemon's mempool
539+
let new_mempool_txs = match Mempool::download_new_mempool_txs(&daemon, &old_txids, &new_txids) {
540+
Ok(txs) => txs,
541+
Err(_) => {
542+
warn!("Failed to get new mempool txs, skipping mempool update");
543+
return Ok(());
544+
}
545+
};
546+
547+
// 3. Update local mempool to match daemon's state
548+
{
549+
let mut mempool_guard = mempool.write().unwrap();
550+
// Add new transactions
551+
mempool_guard.add(new_mempool_txs.clone());
552+
// Remove missing transactions
553+
mempool_guard.remove(old_mempool_txs.clone());
554+
555+
mempool_guard.count
556+
.with_label_values(&["txs"])
557+
.set(mempool_guard.txstore.len() as f64);
558+
559+
// Update cached backlog stats (if expired)
560+
if mempool_guard.backlog_stats.1.elapsed() > Duration::from_secs(BACKLOG_STATS_TTL) {
561+
mempool_guard.update_backlog_stats();
562+
}
563+
}
564+
565+
log_fn_duration("mempool::update", t.elapsed().as_micros());
566+
Ok(())
567+
}
580568
}
581569

582570
#[derive(Serialize)]

0 commit comments

Comments
 (0)