Skip to content

Commit 427e74c

Browse files
authored
Merge pull request #108 from tnull/2023-05-improve-logging
Improve logging
2 parents d2d75a9 + 52ccd58 commit 427e74c

File tree

6 files changed

+122
-51
lines changed

6 files changed

+122
-51
lines changed

bindings/ldk_node.udl

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ dictionary Config {
66
Network network;
77
NetAddress? listening_address;
88
u32 default_cltv_expiry_delta;
9+
LogLevel log_level;
910
};
1011

1112
interface Builder {
@@ -174,6 +175,15 @@ dictionary PeerDetails {
174175
boolean is_connected;
175176
};
176177

178+
enum LogLevel {
179+
"Gossip",
180+
"Trace",
181+
"Debug",
182+
"Info",
183+
"Warn",
184+
"Error",
185+
};
186+
177187
[Custom]
178188
typedef string Txid;
179189

src/event.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -526,7 +526,8 @@ where
526526
}
527527
LdkEvent::SpendableOutputs { outputs } => {
528528
// TODO: We should eventually remember the outputs and supply them to the wallet's coin selection, once BDK allows us to do so.
529-
let destination_address = self.wallet.get_new_address().unwrap();
529+
let destination_address =
530+
self.wallet.get_new_address().expect("Failed to get destination address");
530531
let output_descriptors = &outputs.iter().collect::<Vec<_>>();
531532
let tx_feerate =
532533
self.wallet.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);

src/io/utils.rs

Lines changed: 50 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -92,85 +92,104 @@ where
9292
}
9393

9494
/// Read a previously persisted [`NetworkGraph`] from the store.
95-
pub(crate) fn read_network_graph<K: KVStore + Sync + Send, L: Deref>(
95+
pub(crate) fn read_network_graph<K: KVStore + Sync + Send, L: Deref + Clone>(
9696
kv_store: Arc<K>, logger: L,
9797
) -> Result<NetworkGraph<L>, std::io::Error>
9898
where
9999
L::Target: Logger,
100100
{
101101
let mut reader =
102102
kv_store.read(NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY)?;
103-
NetworkGraph::read(&mut reader, logger).map_err(|_| {
103+
NetworkGraph::read(&mut reader, logger.clone()).map_err(|e| {
104+
log_error!(logger, "Failed to deserialize NetworkGraph: {}", e);
104105
std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NetworkGraph")
105106
})
106107
}
107108

108109
/// Read a previously persisted [`Scorer`] from the store.
109-
pub(crate) fn read_scorer<K: KVStore + Sync + Send, G: Deref<Target = NetworkGraph<L>>, L: Deref>(
110+
pub(crate) fn read_scorer<
111+
K: KVStore + Send + Sync,
112+
G: Deref<Target = NetworkGraph<L>>,
113+
L: Deref + Clone,
114+
>(
110115
kv_store: Arc<K>, network_graph: G, logger: L,
111116
) -> Result<ProbabilisticScorer<G, L>, std::io::Error>
112117
where
113118
L::Target: Logger,
114119
{
115120
let params = ProbabilisticScoringParameters::default();
116121
let mut reader = kv_store.read(SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_KEY)?;
117-
let args = (params, network_graph, logger);
118-
ProbabilisticScorer::read(&mut reader, args).map_err(|_| {
122+
let args = (params, network_graph, logger.clone());
123+
ProbabilisticScorer::read(&mut reader, args).map_err(|e| {
124+
log_error!(logger, "Failed to deserialize scorer: {}", e);
119125
std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer")
120126
})
121127
}
122128

123129
/// Read previously persisted events from the store.
124-
pub(crate) fn read_event_queue<K: KVStore + Sync + Send, L: Deref>(
130+
pub(crate) fn read_event_queue<K: KVStore + Sync + Send, L: Deref + Clone>(
125131
kv_store: Arc<K>, logger: L,
126132
) -> Result<EventQueue<K, L>, std::io::Error>
127133
where
128134
L::Target: Logger,
129135
{
130136
let mut reader =
131137
kv_store.read(EVENT_QUEUE_PERSISTENCE_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY)?;
132-
EventQueue::read(&mut reader, (kv_store, logger)).map_err(|_| {
138+
EventQueue::read(&mut reader, (kv_store, logger.clone())).map_err(|e| {
139+
log_error!(logger, "Failed to deserialize event queue: {}", e);
133140
std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize EventQueue")
134141
})
135142
}
136143

137144
/// Read previously persisted peer info from the store.
138-
pub(crate) fn read_peer_info<K: KVStore + Sync + Send, L: Deref>(
145+
pub(crate) fn read_peer_info<K: KVStore + Sync + Send, L: Deref + Clone>(
139146
kv_store: Arc<K>, logger: L,
140147
) -> Result<PeerStore<K, L>, std::io::Error>
141148
where
142149
L::Target: Logger,
143150
{
144151
let mut reader = kv_store.read(PEER_INFO_PERSISTENCE_NAMESPACE, PEER_INFO_PERSISTENCE_KEY)?;
145-
PeerStore::read(&mut reader, (kv_store, logger)).map_err(|_| {
152+
PeerStore::read(&mut reader, (kv_store, logger.clone())).map_err(|e| {
153+
log_error!(logger, "Failed to deserialize peer store: {}", e);
146154
std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize PeerStore")
147155
})
148156
}
149157

150158
/// Read previously persisted payments information from the store.
151-
pub(crate) fn read_payments<K: KVStore + Sync + Send>(
152-
kv_store: Arc<K>,
153-
) -> Result<Vec<PaymentDetails>, std::io::Error> {
159+
pub(crate) fn read_payments<K: KVStore + Sync + Send, L: Deref>(
160+
kv_store: Arc<K>, logger: L,
161+
) -> Result<Vec<PaymentDetails>, std::io::Error>
162+
where
163+
L::Target: Logger,
164+
{
154165
let mut res = Vec::new();
155166

156167
for stored_key in kv_store.list(PAYMENT_INFO_PERSISTENCE_NAMESPACE)? {
157168
let payment = PaymentDetails::read(
158169
&mut kv_store.read(PAYMENT_INFO_PERSISTENCE_NAMESPACE, &stored_key)?,
159170
)
160-
.map_err(|_| {
161-
std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Payment")
171+
.map_err(|e| {
172+
log_error!(logger, "Failed to deserialize PaymentDetails: {}", e);
173+
std::io::Error::new(
174+
std::io::ErrorKind::InvalidData,
175+
"Failed to deserialize PaymentDetails",
176+
)
162177
})?;
163178
res.push(payment);
164179
}
165180
Ok(res)
166181
}
167182

168-
pub(crate) fn read_latest_rgs_sync_timestamp<K: KVStore + Sync + Send>(
169-
kv_store: Arc<K>,
170-
) -> Result<u32, std::io::Error> {
183+
pub(crate) fn read_latest_rgs_sync_timestamp<K: KVStore + Sync + Send, L: Deref>(
184+
kv_store: Arc<K>, logger: L,
185+
) -> Result<u32, std::io::Error>
186+
where
187+
L::Target: Logger,
188+
{
171189
let mut reader =
172190
kv_store.read(LATEST_RGS_SYNC_TIMESTAMP_NAMESPACE, LATEST_RGS_SYNC_TIMESTAMP_KEY)?;
173-
u32::read(&mut reader).map_err(|_| {
191+
u32::read(&mut reader).map_err(|e| {
192+
log_error!(logger, "Failed to deserialize latest RGS sync timestamp: {}", e);
174193
std::io::Error::new(
175194
std::io::ErrorKind::InvalidData,
176195
"Failed to deserialize latest RGS sync timestamp",
@@ -199,15 +218,23 @@ where
199218
})
200219
}
201220

202-
pub(crate) fn read_latest_node_ann_bcast_timestamp<K: KVStore + Sync + Send>(
203-
kv_store: Arc<K>,
204-
) -> Result<u64, std::io::Error> {
221+
pub(crate) fn read_latest_node_ann_bcast_timestamp<K: KVStore + Sync + Send, L: Deref>(
222+
kv_store: Arc<K>, logger: L,
223+
) -> Result<u64, std::io::Error>
224+
where
225+
L::Target: Logger,
226+
{
205227
let mut reader = kv_store
206228
.read(LATEST_NODE_ANN_BCAST_TIMESTAMP_NAMESPACE, LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY)?;
207-
u64::read(&mut reader).map_err(|_| {
229+
u64::read(&mut reader).map_err(|e| {
230+
log_error!(
231+
logger,
232+
"Failed to deserialize latest node announcement broadcast timestamp: {}",
233+
e
234+
);
208235
std::io::Error::new(
209236
std::io::ErrorKind::InvalidData,
210-
"Failed to deserialize latest node announcment broadcast timestamp",
237+
"Failed to deserialize latest node announcement broadcast timestamp",
211238
)
212239
})
213240
}

src/lib.rs

Lines changed: 46 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ mod wallet;
9393
pub use bip39;
9494
pub use bitcoin;
9595
pub use lightning;
96-
use lightning::ln::msgs::RoutingMessageHandler;
9796
pub use lightning_invoice;
9897

9998
pub use error::Error as NodeError;
@@ -126,11 +125,13 @@ use lightning::chain::{chainmonitor, BestBlock, Confirm, Watch};
126125
use lightning::ln::channelmanager::{
127126
self, ChainParameters, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, Retry,
128127
};
128+
use lightning::ln::msgs::RoutingMessageHandler;
129129
use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler};
130130
use lightning::ln::{PaymentHash, PaymentPreimage};
131131
use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters};
132132

133133
use lightning::util::config::{ChannelHandshakeConfig, ChannelHandshakeLimits, UserConfig};
134+
pub use lightning::util::logger::Level as LogLevel;
134135
use lightning::util::ser::ReadableArgs;
135136

136137
use lightning_background_processor::process_events_async;
@@ -173,6 +174,7 @@ const DEFAULT_NETWORK: Network = Network::Bitcoin;
173174
const DEFAULT_LISTENING_ADDR: &str = "0.0.0.0:9735";
174175
const DEFAULT_CLTV_EXPIRY_DELTA: u32 = 144;
175176
const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api";
177+
const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Debug;
176178

177179
// The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold
178180
// number of blocks after which BDK stops looking for scripts belonging to the wallet.
@@ -204,9 +206,10 @@ const WALLET_KEYS_SEED_LEN: usize = 64;
204206
/// | Parameter | Value |
205207
/// |-----------------------------|------------------|
206208
/// | `storage_dir_path` | /tmp/ldk_node/ |
207-
/// | `network` | Network::Bitcoin |
209+
/// | `network` | `Bitcoin` |
208210
/// | `listening_address` | 0.0.0.0:9735 |
209211
/// | `default_cltv_expiry_delta` | 144 |
212+
/// | `log_level` | `Debug` |
210213
///
211214
pub struct Config {
212215
/// The path where the underlying LDK and BDK persist their data.
@@ -217,6 +220,10 @@ pub struct Config {
217220
pub listening_address: Option<NetAddress>,
218221
/// The default CLTV expiry delta to be used for payments.
219222
pub default_cltv_expiry_delta: u32,
223+
/// The level at which we log messages.
224+
///
225+
/// Any messages below this level will be excluded from the logs.
226+
pub log_level: LogLevel,
220227
}
221228

222229
impl Default for Config {
@@ -226,6 +233,7 @@ impl Default for Config {
226233
network: DEFAULT_NETWORK,
227234
listening_address: Some(DEFAULT_LISTENING_ADDR.parse().unwrap()),
228235
default_cltv_expiry_delta: DEFAULT_CLTV_EXPIRY_DELTA,
236+
log_level: DEFAULT_LOG_LEVEL,
229237
}
230238
}
231239
}
@@ -348,6 +356,12 @@ impl Builder {
348356
config.listening_address = Some(listening_address);
349357
}
350358

359+
/// Sets the level at which [`Node`] will log messages.
360+
pub fn set_log_level(&self, level: LogLevel) {
361+
let mut config = self.config.write().unwrap();
362+
config.log_level = level;
363+
}
364+
351365
/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
352366
/// previously configured.
353367
pub fn build(&self) -> Arc<Node<FilesystemStore>> {
@@ -371,7 +385,7 @@ impl Builder {
371385

372386
// Initialize the Logger
373387
let log_file_path = format!("{}/ldk_node.log", config.storage_dir_path);
374-
let logger = Arc::new(FilesystemLogger::new(log_file_path));
388+
let logger = Arc::new(FilesystemLogger::new(log_file_path, config.log_level));
375389

376390
// Initialize the on-chain wallet and chain access
377391
let seed_bytes = match &*self.entropy_source_config.read().unwrap() {
@@ -469,7 +483,6 @@ impl Builder {
469483
if e.kind() == std::io::ErrorKind::NotFound {
470484
Arc::new(NetworkGraph::new(config.network, Arc::clone(&logger)))
471485
} else {
472-
log_error!(logger, "Failed to read network graph: {}", e.to_string());
473486
panic!("Failed to read network graph: {}", e.to_string());
474487
}
475488
}
@@ -490,7 +503,6 @@ impl Builder {
490503
Arc::clone(&logger),
491504
)))
492505
} else {
493-
log_error!(logger, "Failed to read scorer: {}", e.to_string());
494506
panic!("Failed to read scorer: {}", e.to_string());
495507
}
496508
}
@@ -609,8 +621,11 @@ impl Builder {
609621
p2p_source
610622
}
611623
GossipSourceConfig::RapidGossipSync(rgs_server) => {
612-
let latest_sync_timestamp =
613-
io::utils::read_latest_rgs_sync_timestamp(Arc::clone(&kv_store)).unwrap_or(0);
624+
let latest_sync_timestamp = io::utils::read_latest_rgs_sync_timestamp(
625+
Arc::clone(&kv_store),
626+
Arc::clone(&logger),
627+
)
628+
.unwrap_or(0);
614629
Arc::new(GossipSource::new_rgs(
615630
rgs_server.clone(),
616631
latest_sync_timestamp,
@@ -648,15 +663,17 @@ impl Builder {
648663
));
649664

650665
// Init payment info storage
651-
let payment_store = match io::utils::read_payments(Arc::clone(&kv_store)) {
652-
Ok(payments) => {
653-
Arc::new(PaymentStore::new(payments, Arc::clone(&kv_store), Arc::clone(&logger)))
654-
}
655-
Err(e) => {
656-
log_error!(logger, "Failed to read payment information: {}", e.to_string());
657-
panic!("Failed to read payment information: {}", e.to_string());
658-
}
659-
};
666+
let payment_store =
667+
match io::utils::read_payments(Arc::clone(&kv_store), Arc::clone(&logger)) {
668+
Ok(payments) => Arc::new(PaymentStore::new(
669+
payments,
670+
Arc::clone(&kv_store),
671+
Arc::clone(&logger),
672+
)),
673+
Err(e) => {
674+
panic!("Failed to read payment information: {}", e.to_string());
675+
}
676+
};
660677

661678
let event_queue =
662679
match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)) {
@@ -665,7 +682,6 @@ impl Builder {
665682
if e.kind() == std::io::ErrorKind::NotFound {
666683
Arc::new(EventQueue::new(Arc::clone(&kv_store), Arc::clone(&logger)))
667684
} else {
668-
log_error!(logger, "Failed to read event queue: {}", e.to_string());
669685
panic!("Failed to read event queue: {}", e.to_string());
670686
}
671687
}
@@ -678,7 +694,6 @@ impl Builder {
678694
if e.kind() == std::io::ErrorKind::NotFound {
679695
Arc::new(PeerStore::new(Arc::clone(&kv_store), Arc::clone(&logger)))
680696
} else {
681-
log_error!(logger, "Failed to read peer store: {}", e.to_string());
682697
panic!("Failed to read peer store: {}", e.to_string());
683698
}
684699
}
@@ -747,6 +762,8 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
747762
return Err(Error::AlreadyRunning);
748763
}
749764

765+
log_info!(self.logger, "Starting up LDK Node on network: {}", self.config.network);
766+
750767
let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap();
751768

752769
let event_handler = Arc::new(EventHandler::new(
@@ -969,7 +986,7 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
969986
return;
970987
}
971988
_ = interval.tick() => {
972-
let skip_broadcast = match io::utils::read_latest_node_ann_bcast_timestamp(Arc::clone(&bcast_store)) {
989+
let skip_broadcast = match io::utils::read_latest_node_ann_bcast_timestamp(Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) {
973990
Ok(latest_bcast_time_secs) => {
974991
// Skip if the time hasn't elapsed yet.
975992
let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL;
@@ -1049,6 +1066,8 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
10491066
});
10501067

10511068
*runtime_lock = Some(runtime);
1069+
1070+
log_info!(self.logger, "Startup complete.");
10521071
Ok(())
10531072
}
10541073

@@ -1057,6 +1076,9 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
10571076
/// After this returns most API methods will return [`Error::NotRunning`].
10581077
pub fn stop(&self) -> Result<(), Error> {
10591078
let runtime = self.runtime.write().unwrap().take().ok_or(Error::NotRunning)?;
1079+
1080+
log_info!(self.logger, "Shutting down LDK Node...");
1081+
10601082
// Stop the runtime.
10611083
match self.stop_sender.send(()) {
10621084
Ok(_) => (),
@@ -1074,6 +1096,8 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
10741096
self.peer_manager.disconnect_all_peers();
10751097

10761098
runtime.shutdown_timeout(Duration::from_secs(10));
1099+
1100+
log_info!(self.logger, "Shutdown complete.");
10771101
Ok(())
10781102
}
10791103

@@ -1099,7 +1123,9 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
10991123
///
11001124
/// **Note:** This **MUST** be called after each event has been handled.
11011125
pub fn event_handled(&self) {
1102-
self.event_queue.event_handled().unwrap();
1126+
self.event_queue
1127+
.event_handled()
1128+
.expect("Couldn't mark event handled due to persistence failure");
11031129
}
11041130

11051131
/// Returns our own node id

0 commit comments

Comments
 (0)