From f3fbd8cf6f3fa6b271153c23b1a1283b273473bf Mon Sep 17 00:00:00 2001 From: okekefrancis112 Date: Sun, 8 Feb 2026 11:26:19 +0100 Subject: [PATCH 1/3] feat: Upgrade test for MonitorUpdatingPersister --- lightning/src/util/persist.rs | 125 ++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 1b750c63cd8..510d4151735 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1532,6 +1532,7 @@ impl From for UpdateName { #[cfg(test)] mod tests { use super::*; + use crate::chain::channelmonitor::ChannelMonitorUpdateStep; use crate::chain::ChannelMonitorUpdateStatus; use crate::check_closed_broadcast; use crate::events::ClosureReason; @@ -1937,6 +1938,130 @@ mod tests { .is_err()); } + // Test that a monitor with a legacy u64::MAX update_id (from pre-0.1 LDK) can still be read + // and applied correctly on startup. LDK prior to 0.1 used u64::MAX as a sentinel update_id + // for all ChannelMonitorUpdates generated after a channel was closed. We no longer generate + // these, but must still handle them for nodes upgrading from older versions. + #[test] + fn legacy_closed_channel_update_id_upgrade() { + let test_max_pending_updates = 7; + let chanmon_cfgs = create_chanmon_cfgs(3); + let kv_store_0 = TestStore::new(false); + let persister_0 = MonitorUpdatingPersister::new( + &kv_store_0, + &chanmon_cfgs[0].logger, + test_max_pending_updates, + &chanmon_cfgs[0].keys_manager, + &chanmon_cfgs[0].keys_manager, + &chanmon_cfgs[0].tx_broadcaster, + &chanmon_cfgs[0].fee_estimator, + ); + let kv_store_1 = TestStore::new(false); + let persister_1 = MonitorUpdatingPersister::new( + &kv_store_1, + &chanmon_cfgs[1].logger, + test_max_pending_updates, + &chanmon_cfgs[1].keys_manager, + &chanmon_cfgs[1].keys_manager, + &chanmon_cfgs[1].tx_broadcaster, + &chanmon_cfgs[1].fee_estimator, + ); + let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chain_mon_0 = test_utils::TestChainMonitor::new( + Some(&chanmon_cfgs[0].chain_source), + &chanmon_cfgs[0].tx_broadcaster, + &chanmon_cfgs[0].logger, + &chanmon_cfgs[0].fee_estimator, + &persister_0, + &chanmon_cfgs[0].keys_manager, + ); + let chain_mon_1 = test_utils::TestChainMonitor::new( + Some(&chanmon_cfgs[1].chain_source), + &chanmon_cfgs[1].tx_broadcaster, + &chanmon_cfgs[1].logger, + &chanmon_cfgs[1].fee_estimator, + &persister_1, + &chanmon_cfgs[1].keys_manager, + ); + node_cfgs[0].chain_monitor = chain_mon_0; + node_cfgs[1].chain_monitor = chain_mon_1; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Create a channel and send a payment to build up real monitor data. + let _ = create_announced_chan_between_nodes(&nodes, 0, 1); + send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000); + + // Read the current monitor state before we inject the legacy update. + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap(); + assert_eq!(persisted_chan_data.len(), 1); + let (_, monitor) = &persisted_chan_data[0]; + let monitor_name = monitor.persistence_key(); + let pre_legacy_update_id = monitor.get_latest_update_id(); + assert!(pre_legacy_update_id < u64::MAX); + + // Construct a legacy ChannelForceClosed update with update_id = u64::MAX, simulating + // what a pre-0.1 LDK node would have written to the store after force-closing a channel. + let legacy_update = ChannelMonitorUpdate { + update_id: u64::MAX, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], + channel_id: None, // Pre-0.0.121 updates had no channel_id + }; + KVStoreSync::write( + &kv_store_0, + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + &monitor_name.to_string(), + UpdateName::from(u64::MAX).as_str(), + legacy_update.encode(), + ) + .unwrap(); + + // Verify the legacy update file is present in the store. + let updates_list = KVStoreSync::list( + &kv_store_0, + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + &monitor_name.to_string(), + ) + .unwrap(); + assert!(updates_list.iter().any(|name| name == UpdateName::from(u64::MAX).as_str())); + + // Now read the monitors with updates. The legacy update should be found and applied. + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap(); + assert_eq!(persisted_chan_data.len(), 1); + let (_, upgraded_monitor) = &persisted_chan_data[0]; + + // After applying the legacy force-close update, the monitor's latest_update_id should + // be u64::MAX. + assert_eq!(upgraded_monitor.get_latest_update_id(), u64::MAX); + + // Now simulate a full monitor re-persist (as would happen during normal operation after + // startup, e.g., when a new update triggers a full monitor write). Write the upgraded + // monitor back to the store so the on-disk state reflects the applied legacy update. + KVStoreSync::write( + &kv_store_0, + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + &monitor_name.to_string(), + upgraded_monitor.encode(), + ) + .unwrap(); + + // Now cleanup_stale_updates should remove the legacy update, since the persisted + // monitor's latest_update_id (u64::MAX) >= the update's id (u64::MAX). + persister_0.cleanup_stale_updates(false).unwrap(); + + let updates_list = KVStoreSync::list( + &kv_store_0, + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + &monitor_name.to_string(), + ) + .unwrap(); + assert!( + updates_list.is_empty(), + "All updates including the legacy u64::MAX update should have been cleaned up" + ); + } + fn persist_fn(_persist: P) -> bool where P::Target: Persist, From 2a07f69f43cec52d81780c00f21148896b16b726 Mon Sep 17 00:00:00 2001 From: okekefrancis112 Date: Thu, 12 Feb 2026 13:14:48 +0100 Subject: [PATCH 2/3] refactor: move u64::MAX update test to lightning-tests with proper upgrade pattern --- .../src/upgrade_downgrade_tests.rs | 49 +++++++ lightning/src/util/persist.rs | 125 ------------------ 2 files changed, 49 insertions(+), 125 deletions(-) diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 14b0a5c5822..9e6d638acba 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -699,3 +699,52 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } + +#[test] +fn test_0_0_125_max_update_id_upgrade() { + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser); + { + let chanmon_cfgs = lightning_0_0_125_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_0_0_125_utils::create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_0_125_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = lightning_0_0_125_utils::create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let chan_id = lightning_0_0_125_utils::create_announced_chan_between_nodes(&nodes, 0, 1).2; + + lightning_0_0_125_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + let err = "".to_owned(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_id, &node_a_id, err) + .unwrap(); + + lightning_0_0_125_utils::check_added_monitors(&nodes[1], 1); + let reason = ClosureReason_0_0_125::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + lightning_0_0_125_utils::check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); + lightning_0_0_125_utils::check_closed_broadcast(&nodes[1], 1, true); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + mon_a_ser = get_monitor_0_0_125!(nodes[0], chan_id).encode(); + mon_b_ser = get_monitor_0_0_125!(nodes[1], chan_id).encode(); + } + + let mut chanmon_cfgs = create_chanmon_cfgs(2); + + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_a, persister_b, chain_mon_a, chain_mon_b); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let (node_a, node_b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); +} diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 510d4151735..1b750c63cd8 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1532,7 +1532,6 @@ impl From for UpdateName { #[cfg(test)] mod tests { use super::*; - use crate::chain::channelmonitor::ChannelMonitorUpdateStep; use crate::chain::ChannelMonitorUpdateStatus; use crate::check_closed_broadcast; use crate::events::ClosureReason; @@ -1938,130 +1937,6 @@ mod tests { .is_err()); } - // Test that a monitor with a legacy u64::MAX update_id (from pre-0.1 LDK) can still be read - // and applied correctly on startup. LDK prior to 0.1 used u64::MAX as a sentinel update_id - // for all ChannelMonitorUpdates generated after a channel was closed. We no longer generate - // these, but must still handle them for nodes upgrading from older versions. - #[test] - fn legacy_closed_channel_update_id_upgrade() { - let test_max_pending_updates = 7; - let chanmon_cfgs = create_chanmon_cfgs(3); - let kv_store_0 = TestStore::new(false); - let persister_0 = MonitorUpdatingPersister::new( - &kv_store_0, - &chanmon_cfgs[0].logger, - test_max_pending_updates, - &chanmon_cfgs[0].keys_manager, - &chanmon_cfgs[0].keys_manager, - &chanmon_cfgs[0].tx_broadcaster, - &chanmon_cfgs[0].fee_estimator, - ); - let kv_store_1 = TestStore::new(false); - let persister_1 = MonitorUpdatingPersister::new( - &kv_store_1, - &chanmon_cfgs[1].logger, - test_max_pending_updates, - &chanmon_cfgs[1].keys_manager, - &chanmon_cfgs[1].keys_manager, - &chanmon_cfgs[1].tx_broadcaster, - &chanmon_cfgs[1].fee_estimator, - ); - let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let chain_mon_0 = test_utils::TestChainMonitor::new( - Some(&chanmon_cfgs[0].chain_source), - &chanmon_cfgs[0].tx_broadcaster, - &chanmon_cfgs[0].logger, - &chanmon_cfgs[0].fee_estimator, - &persister_0, - &chanmon_cfgs[0].keys_manager, - ); - let chain_mon_1 = test_utils::TestChainMonitor::new( - Some(&chanmon_cfgs[1].chain_source), - &chanmon_cfgs[1].tx_broadcaster, - &chanmon_cfgs[1].logger, - &chanmon_cfgs[1].fee_estimator, - &persister_1, - &chanmon_cfgs[1].keys_manager, - ); - node_cfgs[0].chain_monitor = chain_mon_0; - node_cfgs[1].chain_monitor = chain_mon_1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - // Create a channel and send a payment to build up real monitor data. - let _ = create_announced_chan_between_nodes(&nodes, 0, 1); - send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000); - - // Read the current monitor state before we inject the legacy update. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap(); - assert_eq!(persisted_chan_data.len(), 1); - let (_, monitor) = &persisted_chan_data[0]; - let monitor_name = monitor.persistence_key(); - let pre_legacy_update_id = monitor.get_latest_update_id(); - assert!(pre_legacy_update_id < u64::MAX); - - // Construct a legacy ChannelForceClosed update with update_id = u64::MAX, simulating - // what a pre-0.1 LDK node would have written to the store after force-closing a channel. - let legacy_update = ChannelMonitorUpdate { - update_id: u64::MAX, - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], - channel_id: None, // Pre-0.0.121 updates had no channel_id - }; - KVStoreSync::write( - &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &monitor_name.to_string(), - UpdateName::from(u64::MAX).as_str(), - legacy_update.encode(), - ) - .unwrap(); - - // Verify the legacy update file is present in the store. - let updates_list = KVStoreSync::list( - &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &monitor_name.to_string(), - ) - .unwrap(); - assert!(updates_list.iter().any(|name| name == UpdateName::from(u64::MAX).as_str())); - - // Now read the monitors with updates. The legacy update should be found and applied. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap(); - assert_eq!(persisted_chan_data.len(), 1); - let (_, upgraded_monitor) = &persisted_chan_data[0]; - - // After applying the legacy force-close update, the monitor's latest_update_id should - // be u64::MAX. - assert_eq!(upgraded_monitor.get_latest_update_id(), u64::MAX); - - // Now simulate a full monitor re-persist (as would happen during normal operation after - // startup, e.g., when a new update triggers a full monitor write). Write the upgraded - // monitor back to the store so the on-disk state reflects the applied legacy update. - KVStoreSync::write( - &kv_store_0, - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &monitor_name.to_string(), - upgraded_monitor.encode(), - ) - .unwrap(); - - // Now cleanup_stale_updates should remove the legacy update, since the persisted - // monitor's latest_update_id (u64::MAX) >= the update's id (u64::MAX). - persister_0.cleanup_stale_updates(false).unwrap(); - - let updates_list = KVStoreSync::list( - &kv_store_0, - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - &monitor_name.to_string(), - ) - .unwrap(); - assert!( - updates_list.is_empty(), - "All updates including the legacy u64::MAX update should have been cleaned up" - ); - } - fn persist_fn(_persist: P) -> bool where P::Target: Persist, From 79e3ee09e634516349e76e2484bebf0e72e1340d Mon Sep 17 00:00:00 2001 From: okekefrancis112 Date: Thu, 12 Feb 2026 13:37:02 +0100 Subject: [PATCH 3/3] fix: ci issues --- lightning-tests/src/upgrade_downgrade_tests.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 9e6d638acba..f655b73ad64 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -716,14 +716,19 @@ fn test_0_0_125_max_update_id_upgrade() { lightning_0_0_125_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); let err = "".to_owned(); - nodes[1] - .node - .force_close_broadcasting_latest_txn(&chan_id, &node_a_id, err) - .unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &node_a_id, err).unwrap(); lightning_0_0_125_utils::check_added_monitors(&nodes[1], 1); - let reason = ClosureReason_0_0_125::HolderForceClosed { broadcasted_latest_txn: Some(true) }; - lightning_0_0_125_utils::check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); + let reason = + ClosureReason_0_0_125::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + lightning_0_0_125_utils::check_closed_event( + &nodes[1], + 1, + reason, + false, + &[node_a_id], + 100000, + ); lightning_0_0_125_utils::check_closed_broadcast(&nodes[1], 1, true); node_a_ser = nodes[0].node.encode();