Skip to content

Commit d5e20fb

Browse files
committed
Restore debug assertion removed due to lockorder restrictions
In 4582b20 we removed a debug assertion which tested that channels seeing HTLC claim replays on startup would be properly unblocked by later monitor update completion in the downstream chanel. We did so because we couldn't pass our lockorder tests taking a `per_peer_state` read lock to check if a channel was closed while the a `per_peer_state` read lock was already held. However, there's no actual reason for that, its just an arbitrary restriction of our lockorder tests. Here we restore the removed debug assertion by simply enabling the skipping of lockorder checking in `#[cfg(test)]` code - we don't care if there's a theoretical deadlock in test-only code as long as none of our tests actually hit it. Note that we also take this opportunity to enable deadlock detection in `no-std` tests as even `#[cfg(not(feature = "std"))]` test builds allow access to `std`.
1 parent 2f9898c commit d5e20fb

File tree

4 files changed

+73
-6
lines changed

4 files changed

+73
-6
lines changed

lightning-liquidity/src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
#![allow(ellipsis_inclusive_range_patterns)]
4747
#![allow(clippy::drop_non_drop)]
4848
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
49-
#![cfg_attr(not(feature = "std"), no_std)]
49+
#![cfg_attr(not(any(test, feature = "std")), no_std)]
5050

5151
#[macro_use]
5252
extern crate alloc;

lightning/src/ln/channelmanager.rs

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8311,6 +8311,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
83118311
hold_time,
83128312
);
83138313

8314+
#[cfg(test)]
8315+
let claiming_chan_funding_outpoint = hop_data.outpoint;
83148316
self.claim_funds_from_hop(
83158317
hop_data,
83168318
payment_preimage,
@@ -8329,6 +8331,50 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
83298331
// monitor updates still in flight. In that case, we shouldn't
83308332
// immediately free, but instead let that monitor update complete
83318333
// in the background.
8334+
#[cfg(test)]
8335+
{
8336+
let per_peer_state = self.per_peer_state.deadlocking_read();
8337+
// The channel we'd unblock should already be closed, or...
8338+
let channel_closed = per_peer_state
8339+
.get(&next_channel_counterparty_node_id)
8340+
.map(|lck| lck.deadlocking_lock())
8341+
.map(|peer| !peer.channel_by_id.contains_key(&next_channel_id))
8342+
.unwrap_or(true);
8343+
let background_events =
8344+
self.pending_background_events.lock().unwrap();
8345+
// there should be a `BackgroundEvent` pending...
8346+
let matching_bg_event =
8347+
background_events.iter().any(|ev| {
8348+
match ev {
8349+
// to apply a monitor update that blocked the claiming channel,
8350+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
8351+
funding_txo, update, ..
8352+
} => {
8353+
if *funding_txo == claiming_chan_funding_outpoint {
8354+
assert!(update.updates.iter().any(|upd|
8355+
if let ChannelMonitorUpdateStep::PaymentPreimage {
8356+
payment_preimage: update_preimage, ..
8357+
} = upd {
8358+
payment_preimage == *update_preimage
8359+
} else { false }
8360+
), "{:?}", update);
8361+
true
8362+
} else { false }
8363+
},
8364+
// or the monitor update has completed and will unblock
8365+
// immediately once we get going.
8366+
BackgroundEvent::MonitorUpdatesComplete {
8367+
channel_id, ..
8368+
} =>
8369+
*channel_id == prev_channel_id,
8370+
}
8371+
});
8372+
assert!(
8373+
channel_closed || matching_bg_event,
8374+
"{:?}",
8375+
*background_events
8376+
);
8377+
}
83328378
(None, None)
83338379
} else if definitely_duplicate {
83348380
if let Some(other_chan) = chan_to_release {

lightning/src/sync/debug_sync.rs

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,19 @@ impl<T> Mutex<T> {
332332
}
333333
}
334334

335+
#[cfg(test)]
336+
/// Takes a lock without any deadlock detection logic. This should never exist in production
337+
/// code (the deadlock detection logic is important!) but can be used in tests where we're
338+
/// willing to risk deadlocks (accepting that simply no existing tests hit them).
339+
pub fn deadlocking_lock<'a>(&'a self) -> MutexGuard<'a, T> {
340+
let lock = self.inner.lock();
341+
if self.poisoned.load(Ordering::Acquire) {
342+
panic!();
343+
} else {
344+
MutexGuard { mutex: self, lock: Some(lock) }
345+
}
346+
}
347+
335348
pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
336349
LockMetadata::pre_lock(&self.deps, false);
337350
let lock = self.inner.lock();
@@ -443,6 +456,14 @@ impl<T> RwLock<T> {
443456
self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ())
444457
}
445458

459+
#[cfg(test)]
460+
/// Takes a read lock without any deadlock detection logic. This should never exist in
461+
/// production code (the deadlock detection logic is important!) but can be used in tests where
462+
/// we're willing to risk deadlocks (accepting that simply no existing tests hit them).
463+
pub fn deadlocking_read<'a>(&'a self) -> RwLockReadGuard<'a, T> {
464+
self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).unwrap()
465+
}
466+
446467
pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
447468
LockMetadata::pre_lock(&self.deps, false);
448469
self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())

lightning/src/sync/mod.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,11 @@ pub(crate) trait LockTestExt<'a> {
2020
fn unsafe_well_ordered_double_lock_self(&'a self) -> Self::ExclLock;
2121
}
2222

23-
#[cfg(all(feature = "std", not(ldk_bench), test))]
23+
#[cfg(all(not(ldk_bench), test))]
2424
mod debug_sync;
25-
#[cfg(all(feature = "std", not(ldk_bench), test))]
25+
#[cfg(all(not(ldk_bench), test))]
2626
pub use debug_sync::*;
27-
#[cfg(all(feature = "std", not(ldk_bench), test))]
27+
#[cfg(all(not(ldk_bench), test))]
2828
// Note that to make debug_sync's regex work this must not contain `debug_string` in the module name
2929
mod test_lockorder_checks;
3030

@@ -63,7 +63,7 @@ mod ext_impl {
6363
}
6464
}
6565

66-
#[cfg(not(feature = "std"))]
66+
#[cfg(all(not(feature = "std"), any(ldk_bench, not(test))))]
6767
mod nostd_sync;
68-
#[cfg(not(feature = "std"))]
68+
#[cfg(all(not(feature = "std"), any(ldk_bench, not(test))))]
6969
pub use nostd_sync::*;

0 commit comments

Comments
 (0)