Skip to content

Misc Followups #3942

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jul 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1138,7 +1138,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
dest.handle_update_add_htlc(nodes[$node].get_our_node_id(), &new_msg);
}
}
for update_fulfill in update_fulfill_htlcs.iter() {
let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
!update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
for update_fulfill in update_fulfill_htlcs {
out.locked_write(format!("Delivering update_fulfill_htlc from node {} to node {}.\n", $node, idx).as_bytes());
dest.handle_update_fulfill_htlc(nodes[$node].get_our_node_id(), update_fulfill);
}
Expand All @@ -1154,8 +1156,6 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
out.locked_write(format!("Delivering update_fee from node {} to node {}.\n", $node, idx).as_bytes());
dest.handle_update_fee(nodes[$node].get_our_node_id(), &msg);
}
let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() ||
!update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty();
if $limit_events != ProcessMessages::AllMessages && processed_change {
// If we only want to process some messages, don't deliver the CS until later.
extra_ev = Some(MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate {
Expand Down
4 changes: 2 additions & 2 deletions lightning-dns-resolver/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -458,8 +458,8 @@ mod test {
}

check_added_monitors(&nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], payer_id);
nodes[0].node.handle_update_fulfill_htlc(payee_id, &updates.update_fulfill_htlcs[0]);
let mut updates = get_htlc_update_msgs!(nodes[1], payer_id);
nodes[0].node.handle_update_fulfill_htlc(payee_id, updates.update_fulfill_htlcs.remove(0));
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);

expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
Expand Down
2 changes: 1 addition & 1 deletion lightning-net-tokio/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,7 @@ mod tests {
#[cfg(simple_close)]
fn handle_closing_sig(&self, _their_node_id: PublicKey, _msg: ClosingSig) {}
fn handle_update_add_htlc(&self, _their_node_id: PublicKey, _msg: &UpdateAddHTLC) {}
fn handle_update_fulfill_htlc(&self, _their_node_id: PublicKey, _msg: &UpdateFulfillHTLC) {}
fn handle_update_fulfill_htlc(&self, _their_node_id: PublicKey, _msg: UpdateFulfillHTLC) {}
fn handle_update_fail_htlc(&self, _their_node_id: PublicKey, _msg: &UpdateFailHTLC) {}
fn handle_update_fail_malformed_htlc(
&self, _their_node_id: PublicKey, _msg: &UpdateFailMalformedHTLC,
Expand Down
10 changes: 5 additions & 5 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1394,27 +1394,27 @@ mod tests {
// Now manually walk the commitment signed dance - because we claimed two payments
// back-to-back it doesn't fit into the neat walk commitment_signed_dance does.

let updates = get_htlc_update_msgs!(nodes[1], node_a_id);
nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]);
let mut updates = get_htlc_update_msgs!(nodes[1], node_a_id);
nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0));
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], node_b_id);

nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa);
check_added_monitors!(nodes[1], 1);
let bs_second_updates = get_htlc_update_msgs!(nodes[1], node_a_id);
let mut bs_2nd_updates = get_htlc_update_msgs!(nodes[1], node_a_id);
nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_first_update);
check_added_monitors!(nodes[1], 1);
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id);

nodes[0]
.node
.handle_update_fulfill_htlc(node_b_id, &bs_second_updates.update_fulfill_htlcs[0]);
.handle_update_fulfill_htlc(node_b_id, bs_2nd_updates.update_fulfill_htlcs.remove(0));
expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
nodes[0]
.node
.handle_commitment_signed_batch_test(node_b_id, &bs_second_updates.commitment_signed);
.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa);
expect_payment_path_successful!(nodes[0]);
Expand Down
42 changes: 32 additions & 10 deletions lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1102,11 +1102,22 @@ pub enum Event {
///
/// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
path: Path,
/// The hold times as reported by each hop. The unit in which the hold times are expressed are 100's of
/// milliseconds. So a hop reporting 2 is a hold time that corresponds to roughly 200 milliseconds. As earlier
/// hops hold on to an HTLC for longer, the hold times in the list are expected to decrease. When our peer
/// didn't provide attribution data, the list is empty. The same applies to HTLCs that were resolved onchain.
/// Because of unavailability of hold times, the list may be shorter than the number of hops in the path.
/// The time that each hop indicated it held the HTLC.
///
/// The unit in which the hold times are expressed are 100's of milliseconds. So a hop
/// reporting 2 is a hold time that corresponds to between 200 and 299 milliseconds.
///
/// We expect that at each hop the actual hold time will be strictly greater than the hold
/// time of the following hops, as a node along the path shouldn't have completed the HTLC
/// until the next node has completed it. Note that because hold times are in 100's of ms,
/// hold times as reported are likely to often be equal across hops.
///
/// If our peer didn't provide attribution data or the HTLC resolved on chain, the list
/// will be empty.
///
/// Each entry will correspond with one entry in [`Path::hops`], or, thereafter, the
/// [`BlindedTail::trampoline_hops`] in [`Path::blinded_tail`]. Because not all nodes
/// support hold times, the list may be shorter than the number of hops in the path.
hold_times: Vec<u32>,
},
/// Indicates an outbound HTLC we sent failed, likely due to an intermediary node being unable to
Expand Down Expand Up @@ -1159,11 +1170,22 @@ pub enum Event {
error_code: Option<u16>,
#[cfg(any(test, feature = "_test_utils"))]
error_data: Option<Vec<u8>>,
/// The hold times as reported by each hop. The unit in which the hold times are expressed are 100's of
/// milliseconds. So a hop reporting 2 is a hold time that corresponds to roughly 200 milliseconds. As earlier
/// hops hold on to an HTLC for longer, the hold times in the list are expected to decrease. When our peer
/// didn't provide attribution data, the list is empty. The same applies to HTLCs that were resolved onchain.
/// Because of unavailability of hold times, the list may be shorter than the number of hops in the path.
/// The time that each hop indicated it held the HTLC.
///
/// The unit in which the hold times are expressed are 100's of milliseconds. So a hop
/// reporting 2 is a hold time that corresponds to between 200 and 299 milliseconds.
///
/// We expect that at each hop the actual hold time will be strictly greater than the hold
/// time of the following hops, as a node along the path shouldn't have completed the HTLC
/// until the next node has completed it. Note that because hold times are in 100's of ms,
/// hold times as reported are likely to often be equal across hops.
///
/// If our peer didn't provide attribution data or the HTLC resolved on chain, the list
/// will be empty.
///
/// Each entry will correspond with one entry in [`Path::hops`], or, thereafter, the
/// [`BlindedTail::trampoline_hops`] in [`Path::blinded_tail`]. Because not all nodes
/// support hold times, the list may be shorter than the number of hops in the path.
hold_times: Vec<u32>,
},
/// Indicates that a probe payment we sent returned successful, i.e., only failed at the destination.
Expand Down
14 changes: 7 additions & 7 deletions lightning/src/ln/async_signer_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -825,20 +825,20 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) {

// Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the
// commitment_signed.
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
match events_2[0] {
match events_2.remove(0) {
MessageSendEvent::UpdateHTLCs {
node_id: _,
channel_id: _,
updates: msgs::CommitmentUpdate { ref update_fulfill_htlcs, ref commitment_signed, .. },
updates: msgs::CommitmentUpdate { mut update_fulfill_htlcs, commitment_signed, .. },
} => {
nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]);
nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_htlcs.remove(0));
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
if monitor_update_failure {
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
}
nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed);
nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed);
if monitor_update_failure {
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
} else {
Expand Down Expand Up @@ -1401,8 +1401,8 @@ fn test_no_disconnect_while_async_commitment_signed_expecting_remote_revoke_and_

// After processing the `update_fulfill`, they'll only be able to send `revoke_and_ack` until
// the `commitment_signed` is no longer pending.
let update = get_htlc_update_msgs!(&nodes[1], node_a_id);
nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update.update_fulfill_htlcs[0]);
let mut update = get_htlc_update_msgs!(&nodes[1], node_a_id);
nodes[0].node.handle_update_fulfill_htlc(node_b_id, update.update_fulfill_htlcs.remove(0));
nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &update.commitment_signed);
check_added_monitors(&nodes[0], 1);

Expand Down
Loading
Loading