@@ -93,7 +93,6 @@ mod wallet;
93
93
pub use bip39;
94
94
pub use bitcoin;
95
95
pub use lightning;
96
- use lightning:: ln:: msgs:: RoutingMessageHandler ;
97
96
pub use lightning_invoice;
98
97
99
98
pub use error:: Error as NodeError ;
@@ -126,11 +125,13 @@ use lightning::chain::{chainmonitor, BestBlock, Confirm, Watch};
126
125
use lightning:: ln:: channelmanager:: {
127
126
self , ChainParameters , ChannelManagerReadArgs , PaymentId , RecipientOnionFields , Retry ,
128
127
} ;
128
+ use lightning:: ln:: msgs:: RoutingMessageHandler ;
129
129
use lightning:: ln:: peer_handler:: { IgnoringMessageHandler , MessageHandler } ;
130
130
use lightning:: ln:: { PaymentHash , PaymentPreimage } ;
131
131
use lightning:: routing:: scoring:: { ProbabilisticScorer , ProbabilisticScoringParameters } ;
132
132
133
133
use lightning:: util:: config:: { ChannelHandshakeConfig , ChannelHandshakeLimits , UserConfig } ;
134
+ pub use lightning:: util:: logger:: Level as LogLevel ;
134
135
use lightning:: util:: ser:: ReadableArgs ;
135
136
136
137
use lightning_background_processor:: process_events_async;
@@ -173,6 +174,7 @@ const DEFAULT_NETWORK: Network = Network::Bitcoin;
173
174
const DEFAULT_LISTENING_ADDR : & str = "0.0.0.0:9735" ;
174
175
const DEFAULT_CLTV_EXPIRY_DELTA : u32 = 144 ;
175
176
const DEFAULT_ESPLORA_SERVER_URL : & str = "https://blockstream.info/api" ;
177
+ const DEFAULT_LOG_LEVEL : LogLevel = LogLevel :: Debug ;
176
178
177
179
// The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold
178
180
// number of blocks after which BDK stops looking for scripts belonging to the wallet.
@@ -204,9 +206,10 @@ const WALLET_KEYS_SEED_LEN: usize = 64;
204
206
/// | Parameter | Value |
205
207
/// |-----------------------------|------------------|
206
208
/// | `storage_dir_path` | /tmp/ldk_node/ |
207
- /// | `network` | Network:: Bitcoin |
209
+ /// | `network` | ` Bitcoin` |
208
210
/// | `listening_address` | 0.0.0.0:9735 |
209
211
/// | `default_cltv_expiry_delta` | 144 |
212
+ /// | `log_level` | `Debug` |
210
213
///
211
214
pub struct Config {
212
215
/// The path where the underlying LDK and BDK persist their data.
@@ -217,6 +220,10 @@ pub struct Config {
217
220
pub listening_address : Option < NetAddress > ,
218
221
/// The default CLTV expiry delta to be used for payments.
219
222
pub default_cltv_expiry_delta : u32 ,
223
+ /// The level at which we log messages.
224
+ ///
225
+ /// Any messages below this level will be excluded from the logs.
226
+ pub log_level : LogLevel ,
220
227
}
221
228
222
229
impl Default for Config {
@@ -226,6 +233,7 @@ impl Default for Config {
226
233
network : DEFAULT_NETWORK ,
227
234
listening_address : Some ( DEFAULT_LISTENING_ADDR . parse ( ) . unwrap ( ) ) ,
228
235
default_cltv_expiry_delta : DEFAULT_CLTV_EXPIRY_DELTA ,
236
+ log_level : DEFAULT_LOG_LEVEL ,
229
237
}
230
238
}
231
239
}
@@ -348,6 +356,12 @@ impl Builder {
348
356
config. listening_address = Some ( listening_address) ;
349
357
}
350
358
359
+ /// Sets the level at which [`Node`] will log messages.
360
+ pub fn set_log_level ( & self , level : LogLevel ) {
361
+ let mut config = self . config . write ( ) . unwrap ( ) ;
362
+ config. log_level = level;
363
+ }
364
+
351
365
/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
352
366
/// previously configured.
353
367
pub fn build ( & self ) -> Arc < Node < FilesystemStore > > {
@@ -371,7 +385,7 @@ impl Builder {
371
385
372
386
// Initialize the Logger
373
387
let log_file_path = format ! ( "{}/ldk_node.log" , config. storage_dir_path) ;
374
- let logger = Arc :: new ( FilesystemLogger :: new ( log_file_path) ) ;
388
+ let logger = Arc :: new ( FilesystemLogger :: new ( log_file_path, config . log_level ) ) ;
375
389
376
390
// Initialize the on-chain wallet and chain access
377
391
let seed_bytes = match & * self . entropy_source_config . read ( ) . unwrap ( ) {
@@ -469,7 +483,6 @@ impl Builder {
469
483
if e. kind ( ) == std:: io:: ErrorKind :: NotFound {
470
484
Arc :: new ( NetworkGraph :: new ( config. network , Arc :: clone ( & logger) ) )
471
485
} else {
472
- log_error ! ( logger, "Failed to read network graph: {}" , e. to_string( ) ) ;
473
486
panic ! ( "Failed to read network graph: {}" , e. to_string( ) ) ;
474
487
}
475
488
}
@@ -490,7 +503,6 @@ impl Builder {
490
503
Arc :: clone ( & logger) ,
491
504
) ) )
492
505
} else {
493
- log_error ! ( logger, "Failed to read scorer: {}" , e. to_string( ) ) ;
494
506
panic ! ( "Failed to read scorer: {}" , e. to_string( ) ) ;
495
507
}
496
508
}
@@ -609,8 +621,11 @@ impl Builder {
609
621
p2p_source
610
622
}
611
623
GossipSourceConfig :: RapidGossipSync ( rgs_server) => {
612
- let latest_sync_timestamp =
613
- io:: utils:: read_latest_rgs_sync_timestamp ( Arc :: clone ( & kv_store) ) . unwrap_or ( 0 ) ;
624
+ let latest_sync_timestamp = io:: utils:: read_latest_rgs_sync_timestamp (
625
+ Arc :: clone ( & kv_store) ,
626
+ Arc :: clone ( & logger) ,
627
+ )
628
+ . unwrap_or ( 0 ) ;
614
629
Arc :: new ( GossipSource :: new_rgs (
615
630
rgs_server. clone ( ) ,
616
631
latest_sync_timestamp,
@@ -648,15 +663,17 @@ impl Builder {
648
663
) ) ;
649
664
650
665
// Init payment info storage
651
- let payment_store = match io:: utils:: read_payments ( Arc :: clone ( & kv_store) ) {
652
- Ok ( payments) => {
653
- Arc :: new ( PaymentStore :: new ( payments, Arc :: clone ( & kv_store) , Arc :: clone ( & logger) ) )
654
- }
655
- Err ( e) => {
656
- log_error ! ( logger, "Failed to read payment information: {}" , e. to_string( ) ) ;
657
- panic ! ( "Failed to read payment information: {}" , e. to_string( ) ) ;
658
- }
659
- } ;
666
+ let payment_store =
667
+ match io:: utils:: read_payments ( Arc :: clone ( & kv_store) , Arc :: clone ( & logger) ) {
668
+ Ok ( payments) => Arc :: new ( PaymentStore :: new (
669
+ payments,
670
+ Arc :: clone ( & kv_store) ,
671
+ Arc :: clone ( & logger) ,
672
+ ) ) ,
673
+ Err ( e) => {
674
+ panic ! ( "Failed to read payment information: {}" , e. to_string( ) ) ;
675
+ }
676
+ } ;
660
677
661
678
let event_queue =
662
679
match io:: utils:: read_event_queue ( Arc :: clone ( & kv_store) , Arc :: clone ( & logger) ) {
@@ -665,7 +682,6 @@ impl Builder {
665
682
if e. kind ( ) == std:: io:: ErrorKind :: NotFound {
666
683
Arc :: new ( EventQueue :: new ( Arc :: clone ( & kv_store) , Arc :: clone ( & logger) ) )
667
684
} else {
668
- log_error ! ( logger, "Failed to read event queue: {}" , e. to_string( ) ) ;
669
685
panic ! ( "Failed to read event queue: {}" , e. to_string( ) ) ;
670
686
}
671
687
}
@@ -678,7 +694,6 @@ impl Builder {
678
694
if e. kind ( ) == std:: io:: ErrorKind :: NotFound {
679
695
Arc :: new ( PeerStore :: new ( Arc :: clone ( & kv_store) , Arc :: clone ( & logger) ) )
680
696
} else {
681
- log_error ! ( logger, "Failed to read peer store: {}" , e. to_string( ) ) ;
682
697
panic ! ( "Failed to read peer store: {}" , e. to_string( ) ) ;
683
698
}
684
699
}
@@ -747,6 +762,8 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
747
762
return Err ( Error :: AlreadyRunning ) ;
748
763
}
749
764
765
+ log_info ! ( self . logger, "Starting up LDK Node on network: {}" , self . config. network) ;
766
+
750
767
let runtime = tokio:: runtime:: Builder :: new_multi_thread ( ) . enable_all ( ) . build ( ) . unwrap ( ) ;
751
768
752
769
let event_handler = Arc :: new ( EventHandler :: new (
@@ -969,7 +986,7 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
969
986
return ;
970
987
}
971
988
_ = interval. tick( ) => {
972
- let skip_broadcast = match io:: utils:: read_latest_node_ann_bcast_timestamp( Arc :: clone( & bcast_store) ) {
989
+ let skip_broadcast = match io:: utils:: read_latest_node_ann_bcast_timestamp( Arc :: clone( & bcast_store) , Arc :: clone ( & bcast_logger ) ) {
973
990
Ok ( latest_bcast_time_secs) => {
974
991
// Skip if the time hasn't elapsed yet.
975
992
let next_bcast_unix_time = SystemTime :: UNIX_EPOCH + Duration :: from_secs( latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL ;
@@ -1049,6 +1066,8 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
1049
1066
} ) ;
1050
1067
1051
1068
* runtime_lock = Some ( runtime) ;
1069
+
1070
+ log_info ! ( self . logger, "Startup complete." ) ;
1052
1071
Ok ( ( ) )
1053
1072
}
1054
1073
@@ -1057,6 +1076,9 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
1057
1076
/// After this returns most API methods will return [`Error::NotRunning`].
1058
1077
pub fn stop ( & self ) -> Result < ( ) , Error > {
1059
1078
let runtime = self . runtime . write ( ) . unwrap ( ) . take ( ) . ok_or ( Error :: NotRunning ) ?;
1079
+
1080
+ log_info ! ( self . logger, "Shutting down LDK Node..." ) ;
1081
+
1060
1082
// Stop the runtime.
1061
1083
match self . stop_sender . send ( ( ) ) {
1062
1084
Ok ( _) => ( ) ,
@@ -1074,6 +1096,8 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
1074
1096
self . peer_manager . disconnect_all_peers ( ) ;
1075
1097
1076
1098
runtime. shutdown_timeout ( Duration :: from_secs ( 10 ) ) ;
1099
+
1100
+ log_info ! ( self . logger, "Shutdown complete." ) ;
1077
1101
Ok ( ( ) )
1078
1102
}
1079
1103
@@ -1099,7 +1123,9 @@ impl<K: KVStore + Sync + Send + 'static> Node<K> {
1099
1123
///
1100
1124
/// **Note:** This **MUST** be called after each event has been handled.
1101
1125
pub fn event_handled ( & self ) {
1102
- self . event_queue . event_handled ( ) . unwrap ( ) ;
1126
+ self . event_queue
1127
+ . event_handled ( )
1128
+ . expect ( "Couldn't mark event handled due to persistence failure" ) ;
1103
1129
}
1104
1130
1105
1131
/// Returns our own node id
0 commit comments