Skip to content

Commit 1bb9e64

Browse files
authored
Merge pull request #977 from TheBlueMatt/2021-06-fix-double-claim-close
Handle double-HTLC-claims without failing the backwards channel
2 parents 0b4079d + f06f9d1 commit 1bb9e64

7 files changed

+327
-127
lines changed

lightning/src/chain/channelmonitor.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ impl Readable for ChannelMonitorUpdate {
114114
}
115115

116116
/// An error enum representing a failure to persist a channel monitor update.
117-
#[derive(Clone, Debug)]
117+
#[derive(Clone, Copy, Debug, PartialEq)]
118118
pub enum ChannelMonitorUpdateErr {
119119
/// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
120120
/// our state failed, but is expected to succeed at some point in the future).

lightning/src/ln/chanmon_update_fail_tests.rs

+119-3
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
197197
if disconnect {
198198
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
199199
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
200-
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
200+
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
201201
}
202202

203203
match persister_fail {
@@ -256,7 +256,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
256256
if disconnect {
257257
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
258258
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
259-
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
259+
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
260260
}
261261

262262
// ...and make sure we can force-close a frozen channel
@@ -1947,7 +1947,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
19471947
// Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
19481948
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
19491949
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1950-
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1950+
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
19511951
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
19521952
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
19531953

@@ -2250,3 +2250,119 @@ fn channel_holding_cell_serialize() {
22502250
do_channel_holding_cell_serialize(true, false);
22512251
do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
22522252
}
2253+
2254+
#[derive(PartialEq)]
2255+
enum HTLCStatusAtDupClaim {
2256+
Received,
2257+
HoldingCell,
2258+
Cleared,
2259+
}
2260+
fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2261+
// When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2262+
// along the payment path before waiting for a full commitment_signed dance. This is great, but
2263+
// can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2264+
// reconnects, and then has to re-send its update_fulfill_htlc message again.
2265+
// In previous code, we didn't handle the double-claim correctly, spuriously closing the
2266+
// channel on which the inbound HTLC was received.
2267+
let chanmon_cfgs = create_chanmon_cfgs(3);
2268+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2269+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2270+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2271+
2272+
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2273+
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
2274+
2275+
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2276+
2277+
let mut as_raa = None;
2278+
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2279+
// In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2280+
// awaiting a remote revoke_and_ack from nodes[0].
2281+
let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]);
2282+
let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
2283+
&nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
2284+
nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
2285+
check_added_monitors!(nodes[0], 1);
2286+
2287+
let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2288+
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2289+
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2290+
check_added_monitors!(nodes[1], 1);
2291+
2292+
let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2293+
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2294+
check_added_monitors!(nodes[0], 1);
2295+
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2296+
check_added_monitors!(nodes[0], 1);
2297+
2298+
as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2299+
}
2300+
2301+
let fulfill_msg = msgs::UpdateFulfillHTLC {
2302+
channel_id: chan_2,
2303+
htlc_id: 0,
2304+
payment_preimage,
2305+
};
2306+
if second_fails {
2307+
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
2308+
expect_pending_htlcs_forwardable!(nodes[2]);
2309+
check_added_monitors!(nodes[2], 1);
2310+
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2311+
} else {
2312+
assert!(nodes[2].node.claim_funds(payment_preimage));
2313+
check_added_monitors!(nodes[2], 1);
2314+
let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2315+
assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2316+
// Check that the message we're about to deliver matches the one generated:
2317+
assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2318+
}
2319+
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2320+
check_added_monitors!(nodes[1], 1);
2321+
2322+
let mut bs_updates = None;
2323+
if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2324+
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2325+
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2326+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2327+
expect_payment_sent!(nodes[0], payment_preimage);
2328+
if htlc_status == HTLCStatusAtDupClaim::Cleared {
2329+
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2330+
}
2331+
} else {
2332+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2333+
}
2334+
2335+
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
2336+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
2337+
2338+
if second_fails {
2339+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
2340+
expect_pending_htlcs_forwardable!(nodes[1]);
2341+
} else {
2342+
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
2343+
}
2344+
2345+
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2346+
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2347+
check_added_monitors!(nodes[1], 1);
2348+
expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2349+
2350+
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2351+
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2352+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2353+
expect_payment_sent!(nodes[0], payment_preimage);
2354+
}
2355+
if htlc_status != HTLCStatusAtDupClaim::Cleared {
2356+
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2357+
}
2358+
}
2359+
2360+
#[test]
2361+
fn test_reconnect_dup_htlc_claims() {
2362+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2363+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2364+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2365+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2366+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2367+
do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2368+
}

0 commit comments

Comments
 (0)