Skip to content

Commit 5fc3e01

Browse files
Include release_held_htlc blinded paths in RAA
As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. Here the counterparty starts including said reply paths in the revoke_and_ack message destined for the sender, so the sender can use these paths in subsequent held_htlc_available messages. We put the paths in the RAA to ensure the sender receives the blinded paths, because failure to deliver the paths means the HTLC will timeout/fail.
1 parent 1d3248f commit 5fc3e01

File tree

2 files changed

+72
-24
lines changed

2 files changed

+72
-24
lines changed

lightning/src/ln/channel.rs

Lines changed: 58 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ use bitcoin::{secp256k1, sighash, TxIn};
2828
#[cfg(splicing)]
2929
use bitcoin::{FeeRate, Sequence};
3030

31+
use crate::blinded_path::message::BlindedMessagePath;
3132
use crate::chain::chaininterface::{
3233
fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator,
3334
};
@@ -282,6 +283,29 @@ impl InboundHTLCState {
282283
_ => None,
283284
}
284285
}
286+
287+
/// Whether we need to hold onto this HTLC until receipt of a corresponding [`ReleaseHeldHtlc`]
288+
/// onion message.
289+
///
290+
///[`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc
291+
fn should_hold_htlc(&self) -> bool {
292+
match self {
293+
InboundHTLCState::RemoteAnnounced(res)
294+
| InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res)
295+
| InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res {
296+
InboundHTLCResolution::Resolved { pending_htlc_status } => {
297+
match pending_htlc_status {
298+
PendingHTLCStatus::Forward(info) => info.routing.should_hold_htlc(),
299+
_ => false,
300+
}
301+
},
302+
InboundHTLCResolution::Pending { update_add_htlc } => {
303+
update_add_htlc.hold_htlc.is_some()
304+
},
305+
},
306+
InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false,
307+
}
308+
}
285309
}
286310

287311
struct InboundHTLCOutput {
@@ -1602,12 +1626,12 @@ where
16021626
}
16031627

16041628
#[rustfmt::skip]
1605-
pub fn signer_maybe_unblocked<L: Deref>(
1606-
&mut self, chain_hash: ChainHash, logger: &L,
1607-
) -> Option<SignerResumeUpdates> where L::Target: Logger {
1629+
pub fn signer_maybe_unblocked<L: Deref, CBP>(
1630+
&mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP
1631+
) -> Option<SignerResumeUpdates> where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath {
16081632
match &mut self.phase {
16091633
ChannelPhase::Undefined => unreachable!(),
1610-
ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)),
1634+
ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger, path_for_release_htlc)),
16111635
ChannelPhase::UnfundedOutboundV1(chan) => {
16121636
let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger);
16131637
Some(SignerResumeUpdates {
@@ -8707,13 +8731,14 @@ where
87078731
/// successfully and we should restore normal operation. Returns messages which should be sent
87088732
/// to the remote side.
87098733
#[rustfmt::skip]
8710-
pub fn monitor_updating_restored<L: Deref, NS: Deref>(
8734+
pub fn monitor_updating_restored<L: Deref, NS: Deref, CBP>(
87118735
&mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
8712-
user_config: &UserConfig, best_block_height: u32
8736+
user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP
87138737
) -> MonitorRestoreUpdates
87148738
where
87158739
L::Target: Logger,
8716-
NS::Target: NodeSigner
8740+
NS::Target: NodeSigner,
8741+
CBP: Fn(u64) -> BlindedMessagePath
87178742
{
87188743
assert!(self.context.channel_state.is_monitor_update_in_progress());
87198744
self.context.channel_state.clear_monitor_update_in_progress();
@@ -8770,7 +8795,7 @@ where
87708795
}
87718796

87728797
let mut raa = if self.context.monitor_pending_revoke_and_ack {
8773-
self.get_last_revoke_and_ack(logger)
8798+
self.get_last_revoke_and_ack(path_for_release_htlc, logger)
87748799
} else { None };
87758800
let mut commitment_update = if self.context.monitor_pending_commitment_signed {
87768801
self.get_last_commitment_update_for_send(logger).ok()
@@ -8859,7 +8884,9 @@ where
88598884
/// Indicates that the signer may have some signatures for us, so we should retry if we're
88608885
/// blocked.
88618886
#[rustfmt::skip]
8862-
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
8887+
pub fn signer_maybe_unblocked<L: Deref, CBP>(
8888+
&mut self, logger: &L, path_for_release_htlc: CBP
8889+
) -> SignerResumeUpdates where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath {
88638890
if !self.holder_commitment_point.can_advance() {
88648891
log_trace!(logger, "Attempting to update holder per-commitment point...");
88658892
self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger);
@@ -8887,7 +8914,7 @@ where
88878914
} else { None };
88888915
let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack {
88898916
log_trace!(logger, "Attempting to generate pending revoke and ack...");
8890-
self.get_last_revoke_and_ack(logger)
8917+
self.get_last_revoke_and_ack(path_for_release_htlc, logger)
88918918
} else { None };
88928919

88938920
if self.context.resend_order == RAACommitmentOrder::CommitmentFirst
@@ -8958,9 +8985,12 @@ where
89588985
}
89598986
}
89608987

8961-
fn get_last_revoke_and_ack<L: Deref>(&mut self, logger: &L) -> Option<msgs::RevokeAndACK>
8988+
fn get_last_revoke_and_ack<CBP, L: Deref>(
8989+
&mut self, path_for_release_htlc: CBP, logger: &L,
8990+
) -> Option<msgs::RevokeAndACK>
89628991
where
89638992
L::Target: Logger,
8993+
CBP: Fn(u64) -> BlindedMessagePath,
89648994
{
89658995
debug_assert!(
89668996
self.holder_commitment_point.next_transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2
@@ -8973,14 +9003,22 @@ where
89739003
.ok();
89749004
if let Some(per_commitment_secret) = per_commitment_secret {
89759005
if self.holder_commitment_point.can_advance() {
9006+
let mut release_htlc_message_paths = Vec::new();
9007+
for htlc in &self.context.pending_inbound_htlcs {
9008+
if htlc.state.should_hold_htlc() {
9009+
let path = path_for_release_htlc(htlc.htlc_id);
9010+
release_htlc_message_paths.push((htlc.htlc_id, path));
9011+
}
9012+
}
9013+
89769014
self.context.signer_pending_revoke_and_ack = false;
89779015
return Some(msgs::RevokeAndACK {
89789016
channel_id: self.context.channel_id,
89799017
per_commitment_secret,
89809018
next_per_commitment_point: self.holder_commitment_point.next_point(),
89819019
#[cfg(taproot)]
89829020
next_local_nonce: None,
8983-
release_htlc_message_paths: Vec::new(),
9021+
release_htlc_message_paths,
89849022
});
89859023
}
89869024
}
@@ -9128,13 +9166,15 @@ where
91289166
/// May panic if some calls other than message-handling calls (which will all Err immediately)
91299167
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
91309168
#[rustfmt::skip]
9131-
pub fn channel_reestablish<L: Deref, NS: Deref>(
9169+
pub fn channel_reestablish<L: Deref, NS: Deref, CBP>(
91329170
&mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
9133-
chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
9171+
chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock,
9172+
path_for_release_htlc: CBP,
91349173
) -> Result<ReestablishResponses, ChannelError>
91359174
where
91369175
L::Target: Logger,
9137-
NS::Target: NodeSigner
9176+
NS::Target: NodeSigner,
9177+
CBP: Fn(u64) -> BlindedMessagePath
91389178
{
91399179
if !self.context.channel_state.is_peer_disconnected() {
91409180
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
@@ -9235,7 +9275,7 @@ where
92359275
self.context.monitor_pending_revoke_and_ack = true;
92369276
None
92379277
} else {
9238-
self.get_last_revoke_and_ack(logger)
9278+
self.get_last_revoke_and_ack(path_for_release_htlc, logger)
92399279
}
92409280
} else {
92419281
debug_assert!(false, "All values should have been handled in the four cases above");
@@ -16490,6 +16530,7 @@ mod tests {
1649016530
chain_hash,
1649116531
&config,
1649216532
0,
16533+
|_| unreachable!()
1649316534
);
1649416535

1649516536
// Receive funding_signed, but the channel will be configured to hold sending channel_ready and
@@ -16504,6 +16545,7 @@ mod tests {
1650416545
chain_hash,
1650516546
&config,
1650616547
0,
16548+
|_| unreachable!()
1650716549
);
1650816550
// Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
1650916551
// as the funding transaction depends on all channels in the batch becoming ready.

lightning/src/ln/channelmanager.rs

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ impl PendingHTLCRouting {
377377

378378
/// Whether this HTLC should be held by our node until we receive a corresponding
379379
/// [`ReleaseHeldHtlc`] onion message.
380-
fn should_hold_htlc(&self) -> bool {
380+
pub(super) fn should_hold_htlc(&self) -> bool {
381381
match self {
382382
Self::Forward { hold_htlc: Some(()), .. } => true,
383383
_ => false,
@@ -3430,18 +3430,20 @@ macro_rules! emit_initial_channel_ready_event {
34303430
/// set for this channel is empty!
34313431
macro_rules! handle_monitor_update_completion {
34323432
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3433+
let channel_id = $chan.context.channel_id();
3434+
let counterparty_node_id = $chan.context.get_counterparty_node_id();
34333435
#[cfg(debug_assertions)]
34343436
{
34353437
let in_flight_updates =
3436-
$peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id());
3438+
$peer_state.in_flight_monitor_updates.get(&channel_id);
34373439
assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
34383440
assert_eq!($chan.blocked_monitor_updates_pending(), 0);
34393441
}
34403442
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
34413443
let mut updates = $chan.monitor_updating_restored(&&logger,
34423444
&$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(),
3443-
$self.best_block.read().unwrap().height);
3444-
let counterparty_node_id = $chan.context.get_counterparty_node_id();
3445+
$self.best_block.read().unwrap().height,
3446+
|htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id));
34453447
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
34463448
// We only send a channel_update in the case where we are just now sending a
34473449
// channel_ready and the channel is in a usable state. We may re-send a
@@ -3457,7 +3459,7 @@ macro_rules! handle_monitor_update_completion {
34573459
} else { None };
34583460

34593461
let update_actions = $peer_state.monitor_update_blocked_actions
3460-
.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
3462+
.remove(&channel_id).unwrap_or(Vec::new());
34613463

34623464
let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
34633465
&mut $peer_state.pending_msg_events, $chan, updates.raa,
@@ -3468,7 +3470,6 @@ macro_rules! handle_monitor_update_completion {
34683470
$peer_state.pending_msg_events.push(upd);
34693471
}
34703472

3471-
let channel_id = $chan.context.channel_id();
34723473
let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding);
34733474
core::mem::drop($peer_state_lock);
34743475
core::mem::drop($per_peer_state_lock);
@@ -11138,6 +11139,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1113811139
self.chain_hash,
1113911140
&self.config.read().unwrap(),
1114011141
&*self.best_block.read().unwrap(),
11142+
|htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id)
1114111143
);
1114211144
let responses = try_channel_entry!(self, peer_state, res, chan_entry);
1114311145
let mut channel_update = None;
@@ -11601,9 +11603,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1160111603

1160211604
// Returns whether we should remove this channel as it's just been closed.
1160311605
let unblock_chan = |chan: &mut Channel<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
11606+
let channel_id = chan.context().channel_id();
1160411607
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
1160511608
let node_id = chan.context().get_counterparty_node_id();
11606-
if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) {
11609+
if let Some(msgs) = chan.signer_maybe_unblocked(
11610+
self.chain_hash, &&logger,
11611+
|htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id)
11612+
) {
1160711613
if let Some(msg) = msgs.open_channel {
1160811614
pending_msg_events.push(MessageSendEvent::SendOpenChannel {
1160911615
node_id,
@@ -11624,7 +11630,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1162411630
}
1162511631
let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs {
1162611632
node_id,
11627-
channel_id: chan.context().channel_id(),
11633+
channel_id,
1162811634
updates,
1162911635
});
1163011636
let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK {

0 commit comments

Comments
 (0)