Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 22 additions & 7 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::Filter;
use crate::chain::{BestBlock, WatchedOutput};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
use crate::events::{ClosureReason, Event, EventHandler, ReplayEvent};
use crate::events::{ClosureReason, Event, EventHandler, PaidBolt12Invoice, ReplayEvent};
use crate::ln::chan_utils::{
self, ChannelTransactionParameters, CommitmentTransaction, CounterpartyCommitmentSecrets,
HTLCClaim, HTLCOutputInCommitment, HolderCommitmentTransaction,
Expand Down Expand Up @@ -5964,9 +5964,12 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
// updates that peer sends us are update_fails, failing the channel if not. It's probably
// easier to just fail the channel as this case should be rare enough anyway.
let height = self.best_block.height;
// Grace period in number of blocks we allow for an async payment to resolve before we
// force-close. 4032 blocks are roughly four weeks.
const ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS: u32 = 4032;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add a comment that this is 28 days? Elsewhere in the codebase we have a month as 30 days, that's a super nit though.

macro_rules! scan_commitment {
($htlcs: expr, $holder_tx: expr) => {
for ref htlc in $htlcs {
for (ref htlc, ref source) in $htlcs {
// For inbound HTLCs which we know the preimage for, we have to ensure we hit the
// chain with enough room to claim the HTLC without our counterparty being able to
// time out the HTLC first.
Expand All @@ -5977,9 +5980,21 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
// can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
// chain when our counterparty is waiting for expiration to off-chain fail an HTLC
// we give ourselves a few blocks of headroom after expiration before going
// on-chain for an expired HTLC.
// on-chain for an expired HTLC. In the case of an outbound HTLC for
// an async payment, we allow `ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS` before
// we force-close the channel so that if we've been offline for a
// while we give a chance for the HTLC to be failed on reconnect
// instead closing the channel.
let htlc_outbound = $holder_tx == htlc.offered;
if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
let async_payment = htlc_outbound && matches!(
source.as_deref().expect("Every outbound HTLC should have a corresponding source"),
HTLCSource::OutboundRoute {
bolt12_invoice: Some(PaidBolt12Invoice::StaticInvoice(_)),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We may not want this behavior for async payments sent by always-online nodes, meaning we would want to check the hold_htlc field here. Unfortunately we don't have access to that field here, it would have to be added to the HTLCSource.

Don't think this matters enough to bother changing it for this PR, just wanted to note it.

..
}
);
if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height && !async_payment) ||
( htlc_outbound && htlc.cltv_expiry + ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS <= height && async_payment) ||
(!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
log_info!(logger, "Force-closing channel due to {} HTLC timeout - HTLC with payment hash {} expires at {}", if htlc_outbound { "outbound" } else { "inbound"}, htlc.payment_hash, htlc.cltv_expiry);
return Some(htlc.payment_hash);
Expand All @@ -5988,16 +6003,16 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
}
}

scan_commitment!(holder_commitment_htlcs!(self, CURRENT), true);
scan_commitment!(holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES), true);

if let Some(ref txid) = self.funding.current_counterparty_commitment_txid {
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
scan_commitment!(htlc_outputs.iter(), false);
}
}
if let Some(ref txid) = self.funding.prev_counterparty_commitment_txid {
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
scan_commitment!(htlc_outputs.iter(), false);
}
}

Expand Down
86 changes: 86 additions & 0 deletions lightning/src/ln/async_payments_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3031,6 +3031,92 @@ fn held_htlc_timeout() {
);
}

#[test]
fn fail_held_htlc_on_reconnect() {
// Test that if a held HTLC by the sender LSP fails but the async sender is offline, the HTLC
// is failed on reconnect instead of FC the channel.
let chanmon_cfgs = create_chanmon_cfgs(4);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);

let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg());
let mut sender_lsp_cfg = test_default_channel_config();
sender_lsp_cfg.enable_htlc_hold = true;
let mut invoice_server_cfg = test_default_channel_config();
invoice_server_cfg.accept_forwards_to_priv_channels = true;

let node_chanmgrs = create_node_chanmgrs(
4,
&node_cfgs,
&[Some(sender_cfg), Some(sender_lsp_cfg), Some(invoice_server_cfg), Some(recipient_cfg)],
);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
unify_blockheight_across_nodes(&nodes);
let sender = &nodes[0];
let sender_lsp = &nodes[1];
let invoice_server = &nodes[2];
let recipient = &nodes[3];

let amt_msat = 5000;
let (_, peer_node_id, static_invoice_om) = build_async_offer_and_init_payment(amt_msat, &nodes);
let payment_hash =
lock_in_htlc_for_static_invoice(&static_invoice_om, peer_node_id, sender, sender_lsp);

sender_lsp.node.process_pending_htlc_forwards();
let (peer_id, held_htlc_om) =
extract_held_htlc_available_oms(sender, &[sender_lsp, invoice_server, recipient])
.pop()
.unwrap();
recipient.onion_messenger.handle_onion_message(peer_id, &held_htlc_om);

let _ = extract_release_htlc_oms(recipient, &[sender, sender_lsp, invoice_server]);

// Disconnect async sender <-> sender LSP
sender.node.peer_disconnected(sender_lsp.node.get_our_node_id());
sender_lsp.node.peer_disconnected(sender.node.get_our_node_id());

// Connect blocks such that they cause the HTLC to timeout
let chan_id = chan.0.channel_id;
let channel =
sender.node.list_channels().iter().find(|c| c.channel_id == chan_id).unwrap().clone();
let htlc_expiry = channel
.pending_outbound_htlcs
.iter()
.find(|htlc| htlc.payment_hash == payment_hash)
.unwrap()
.cltv_expiry;
let blocks_to_connect = htlc_expiry - sender.best_block_info().1 + 100;
connect_blocks(sender, blocks_to_connect);
connect_blocks(sender_lsp, blocks_to_connect);

sender_lsp.node.process_pending_htlc_forwards();
let mut evs = sender_lsp.node.get_and_clear_pending_events();
assert_eq!(evs.len(), 1);
match evs.pop().unwrap() {
Event::HTLCHandlingFailed { failure_type, failure_reason, .. } => {
assert!(matches!(failure_type, HTLCHandlingFailureType::InvalidForward { .. }));
assert!(matches!(
failure_reason,
Some(HTLCHandlingFailureReason::Local {
reason: LocalHTLCFailureReason::ForwardExpiryBuffer
})
));
},
_ => panic!(),
}

// After reconnecting, check that HTLC was failed and channel is open.
let mut reconnect_args = ReconnectArgs::new(&sender, &sender_lsp);
reconnect_args.pending_cell_htlc_fails.0 = 1;
reconnect_nodes(reconnect_args);

expect_payment_failed!(sender, payment_hash, false);
assert_eq!(sender.node.list_channels().len(), 1);
assert_eq!(sender_lsp.node.list_channels().len(), 2);
}

#[test]
fn intercepted_hold_htlc() {
// Test a payment `sender --> LSP --> recipient` such that the HTLC is both a hold htlc and an
Expand Down
9 changes: 9 additions & 0 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12790,6 +12790,15 @@ where
/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
/// to construct a [`BlindedMessagePath`] for the reply path.
///
/// # Note
///
/// If the offer resolves to an async payment, and the HTLC is neither claimed nor failed by
/// our next-hop peer, we will not force-close the channel to resolve the payment for 4
/// weeks. This avoids an issue for often-offline nodes where channels are force-closed on
/// startup during chain sync prior to connecting to peers. If you want to resolve such a
/// timed-out payment more urgently, you can manually force-close the channel which will,
/// after some transaction confirmation(s), result in an [`Event::PaymentFailed`].
///
/// # Errors
///
/// Errors if:
Expand Down
Loading