diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 22006897a0f..892d489fa44 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -282,6 +282,7 @@ impl TestChainMonitor { Arc::clone(&persister), Arc::clone(&keys), keys.get_peer_storage_key(), + false, )), logger, keys, diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 5dfa51079d8..47aebf41ac9 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -603,6 +603,7 @@ pub fn do_test(mut data: &[u8], logger: &Arc Arc::new(TestPersister { update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed) }), Arc::clone(&keys_manager), keys_manager.get_peer_storage_key(), + false, )); let network = Network::Bitcoin; diff --git a/fuzz/src/lsps_message.rs b/fuzz/src/lsps_message.rs index 8371d1c5fc7..a4c4108a6cc 100644 --- a/fuzz/src/lsps_message.rs +++ b/fuzz/src/lsps_message.rs @@ -59,6 +59,7 @@ pub fn do_test(data: &[u8]) { Arc::clone(&kv_store), Arc::clone(&keys_manager), keys_manager.get_peer_storage_key(), + false, )); let best_block = BestBlock::from_network(network); let params = ChainParameters { network, best_block }; diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index da415c70a32..16ced03b7fc 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -2444,6 +2444,7 @@ mod tests { Arc::clone(&kv_store), Arc::clone(&keys_manager), keys_manager.get_peer_storage_key(), + false, )); let best_block = BestBlock::from_network(network); let params = ChainParameters { network, best_block }; diff --git a/lightning-persister/src/fs_store/v1.rs b/lightning-persister/src/fs_store/v1.rs index 776aba630c4..9ba5ef97199 100644 --- a/lightning-persister/src/fs_store/v1.rs +++ b/lightning-persister/src/fs_store/v1.rs @@ -217,6 +217,7 @@ mod tests { &chanmon_cfgs[0].fee_estimator, &store, node_cfgs[0].keys_manager, + false, ); node_cfgs[0].chain_monitor = chain_mon_0; let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]); diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index b8f3eb0bd99..0c0683f963f 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -120,6 +120,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { &chanmon_cfgs[0].fee_estimator, store_0, node_cfgs[0].keys_manager, + false, ); let chain_mon_1 = test_utils::TestChainMonitor::new( Some(&chanmon_cfgs[1].chain_source), @@ -128,6 +129,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { &chanmon_cfgs[1].fee_estimator, store_1, node_cfgs[1].keys_manager, + false, ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 7db1b697c2b..a3e9eed6075 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -29,12 +29,12 @@ use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::secp256k1::PublicKey; use crate::chain; -use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; +use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator}; #[cfg(peer_storage)] use crate::chain::channelmonitor::write_chanmon_internal; use crate::chain::channelmonitor::{ - Balance, ChannelMonitor, ChannelMonitorUpdate, MonitorEvent, TransactionOutputs, - WithChannelMonitor, + Balance, ChannelMonitor, ChannelMonitorUpdate, ClaimInfo, ClaimKey, ClaimMetadata, + MonitorEvent, TransactionOutputs, WithChannelMonitor, }; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, WatchedOutput}; @@ -371,6 +371,9 @@ pub struct ChainMonitor< #[cfg(peer_storage)] our_peerstorage_encryption_key: PeerStorageKey, + + /// If false, claim info persistence events are swallowed. + offload_claim_info: bool, } impl< @@ -397,7 +400,7 @@ where pub fn new_async_beta( chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: MonitorUpdatingPersisterAsync, _entropy_source: ES, - _our_peerstorage_encryption_key: PeerStorageKey, + _our_peerstorage_encryption_key: PeerStorageKey, offload_claim_info: bool, ) -> Self { let event_notifier = Arc::new(Notifier::new()); Self { @@ -414,6 +417,7 @@ where pending_send_only_events: Mutex::new(Vec::new()), #[cfg(peer_storage)] our_peerstorage_encryption_key: _our_peerstorage_encryption_key, + offload_claim_info, } } } @@ -590,6 +594,15 @@ where /// always need to fetch full blocks absent another means for determining which blocks contain /// transactions relevant to the watched channels. /// + /// If `offload_claim_info` is set to `true`, [`Event::PersistClaimInfo`] events will be + /// surfaced, allowing callers to offload claim information from [`ChannelMonitor`]s to reduce + /// their size. If set to `false`, these events will be silently ignored and the claim + /// information will remain in-memory and in each [`ChannelMonitor`] on disk. + /// + /// Note that no matter the value of `offload_claim_info`, [`Event::ClaimInfoRequest`]s will be + /// surfaced if needed. If [`Event::PersistClaimInfo`]s have never been surfaced/handled for a + /// node, no [`Event::ClaimInfoRequest`] will be generated. + /// /// # Note /// `our_peerstorage_encryption_key` must be obtained from [`NodeSigner::get_peer_storage_key`]. /// This key is used to encrypt peer storage backups. @@ -601,9 +614,12 @@ where /// [`NodeSigner`]: crate::sign::NodeSigner /// [`NodeSigner::get_peer_storage_key`]: crate::sign::NodeSigner::get_peer_storage_key /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager + /// [`Event::PersistClaimInfo`]: crate::events::Event::PersistClaimInfo + /// [`Event::ClaimInfoRequest`]: crate::events::Event::ClaimInfoRequest pub fn new( chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P, _entropy_source: ES, _our_peerstorage_encryption_key: PeerStorageKey, + offload_claim_info: bool, ) -> Self { Self { monitors: RwLock::new(new_hash_map()), @@ -619,6 +635,7 @@ where pending_send_only_events: Mutex::new(Vec::new()), #[cfg(peer_storage)] our_peerstorage_encryption_key: _our_peerstorage_encryption_key, + offload_claim_info, } } @@ -770,6 +787,59 @@ where Ok(()) } + /// Provides the stored [`ClaimInfo`] and associated [`ClaimMetadata`] for a specified transaction. + /// + /// This function is called in response to an [`Event::ClaimInfoRequest`] to provide the + /// necessary claim data that was previously persisted using the [`Event::PersistClaimInfo`] + /// event. + /// + /// If no matching [`ChannelMonitor`] is found for the provided `channel_id`, it simply returns + /// an `Err(())`, which should never happen if [`Event::ClaimInfoRequest`] was generated. It + /// should be handled by the caller as a failure/panic, most probably a wrong `channel_id` was + /// provided. + /// + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor + pub fn provide_claim_info( + &self, channel_id: ChannelId, claim_key: ClaimKey, claim_metadata: ClaimMetadata, + claim_info: ClaimInfo, + ) -> Result<(), ()> { + let monitors = self.monitors.read().unwrap(); + let monitor_data = monitors.get(&channel_id).ok_or(())?; + let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&self.fee_estimator); + monitor_data.monitor.provide_claim_info( + claim_key, + claim_metadata, + claim_info, + &self.broadcaster, + &bounded_fee_estimator, + &self.logger, + ); + + Ok(()) + } + + /// Notifies the system that [`ClaimInfo`] associated with a given `claim_key` has been durably + /// persisted. + /// + /// This method should be called after a [`ClaimInfo`] is persisted in response to an + /// [`Event::PersistClaimInfo`]. The [`ClaimInfo`] will thus be removed from both in-memory and + /// on-disk storage within the [`ChannelMonitor`]. + /// + /// If no matching [`ChannelMonitor`] is found for the provided `channel_id`, it simply returns + /// an `Err(())`, which should never happen if [`Event::PersistClaimInfo`] was generated. It + /// should be handled by the caller as a failure/panic, most probably a wrong `channel_id` was + /// provided. + /// + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor + pub fn claim_info_persisted( + &self, channel_id: ChannelId, claim_key: ClaimKey, + ) -> Result<(), ()> { + let monitors = self.monitors.read().unwrap(); + let monitor_data = monitors.get(&channel_id).ok_or(())?; + monitor_data.monitor.claim_info_persisted(&claim_key); + Ok(()) + } + /// This wrapper avoids having to update some of our tests for now as they assume the direct /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. @@ -788,6 +858,15 @@ where self.event_notifier.notify(); } + #[cfg(any(test, feature = "_test_utils"))] + pub fn get_and_clear_claim_info_events(&self) -> Vec { + let mut res = Vec::new(); + for (_, monitor) in self.monitors.read().unwrap().iter() { + res.append(&mut monitor.monitor.get_and_clear_claim_info_events()); + } + res + } + #[cfg(any(test, feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { use crate::events::EventsProvider; @@ -818,7 +897,17 @@ where self.monitors.read().unwrap().get(&channel_id).map(|m| &m.monitor), self.logger, ev, - handler(ev).await + { + if !self.offload_claim_info { + if let Event::PersistClaimInfo { .. } = &ev { + Ok(()) + } else { + handler(ev).await + } + } else { + handler(ev).await + } + } ) { Ok(()) => {}, Err(ReplayEvent()) => { @@ -1477,8 +1566,16 @@ where where H::Target: EventHandler, { + let filtering_handler = |event: events::Event| { + if !self.offload_claim_info { + if let events::Event::PersistClaimInfo { .. } = &event { + return Ok(()); + } + } + handler.handle_event(event) + }; for monitor_state in self.monitors.read().unwrap().values() { - match monitor_state.monitor.process_pending_events(&handler, &self.logger) { + match monitor_state.monitor.process_pending_events(&&filtering_handler, &self.logger) { Ok(()) => {}, Err(ReplayEvent()) => { self.event_notifier.notify(); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a8d055a9c5b..ac15ed9c5cd 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -328,6 +328,62 @@ pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032; /// providing us the preimage (which would claim it). pub const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS; +/// An opaque key used with the [`ChannelId`] to identify a [`ClaimMetadata`]. +/// +/// This type is used in [`Event::PersistClaimInfo`] and [`Event::ClaimInfoRequest`] to +/// associate persisted [`ClaimInfo`] with the correct commitment transaction. +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(test, derive(PartialOrd, Ord))] +pub struct ClaimKey(pub(crate) Txid); + +impl Writeable for ClaimKey { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + 0u8.write(writer)?; + self.0.write(writer) + } +} + +impl Readable for ClaimKey { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != 0 { + return Err(DecodeError::UnknownRequiredFeature); + } + Ok(Self(Readable::read(reader)?)) + } +} + +/// Metadata associated with an [`Event::ClaimInfoRequest`] which allows the operation to continue +/// after calling [`ChainMonitor::provide_claim_info`]. +/// +/// [`ChainMonitor::provide_claim_info`]: super::chainmonitor::ChainMonitor::provide_claim_info +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ClaimMetadata { + /// The [`BlockHash`] of the block containing [`Self::tx`]. + pub(crate) block_hash: BlockHash, + /// The counterparty commitment transaction for which a claim might be necessary. + pub(crate) tx: Transaction, + /// The height of the block in which the commitment transaction is included. + pub(crate) height: u32, +} + +impl_writeable_tlv_based!(ClaimMetadata, { + (1, block_hash, required), + (3, tx, required), + (5, height, required), +}); + +/// Represents detailed information about HTLCs in a commitment transaction that can be claimed. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ClaimInfo { + /// A list of HTLC outputs for the commitment transaction that are eligible for claiming. + pub(crate) htlcs: Vec, +} + +impl_writeable_tlv_based!(ClaimInfo, { + (1, htlcs, required_vec), +}); + // Deprecated, use [`HolderCommitment`] or [`HolderCommitmentTransaction`]. #[derive(Clone, PartialEq, Eq)] struct HolderSignedTx { @@ -955,6 +1011,26 @@ pub enum Balance { /// were already spent. amount_satoshis: u64, }, + /// The channel has been closed, and our counterparty broadcasted a revoked commitment + /// transaction. However, the claim information for this commitment transaction has been + /// offloaded via [`Event::PersistClaimInfo`] and has not yet been re-provided in response to + /// an [`Event::ClaimInfoRequest`]. + /// + /// Once the claim information is provided via [`ChainMonitor::provide_claim_info`], this + /// balance will be replaced by one or more [`Balance::CounterpartyRevokedOutputClaimable`] + /// entries representing the specific outputs we can claim. + /// + /// The non-HTLC balance for the revoked commitment (i.e., the counterparty's to_self output) + /// may still appear as a separate [`Balance::CounterpartyRevokedOutputClaimable`] or + /// [`Balance::ClaimableAwaitingConfirmations`] alongside this balance. + /// + /// [`Event::PersistClaimInfo`]: crate::events::Event::PersistClaimInfo + /// [`Event::ClaimInfoRequest`]: crate::events::Event::ClaimInfoRequest + /// [`ChainMonitor::provide_claim_info`]: crate::chain::chainmonitor::ChainMonitor::provide_claim_info + CounterpartyRevokedOutputClaimableAwaitingClaimInfo { + /// The identifier for which claim information is being requested. + claim_key: ClaimKey, + }, } impl Balance { @@ -995,7 +1071,8 @@ impl Balance { => *amount_satoshis, Balance::MaybeTimeoutClaimableHTLC { amount_satoshis, outbound_payment, .. } => if *outbound_payment { 0 } else { *amount_satoshis }, - Balance::MaybePreimageClaimableHTLC { .. } => 0, + Balance::MaybePreimageClaimableHTLC { .. } + | Balance::CounterpartyRevokedOutputClaimableAwaitingClaimInfo { .. } => 0, } } } @@ -1151,6 +1228,14 @@ struct FundingScope { /// The set of outpoints in each counterparty commitment transaction. We always need at least /// the payment hash from `HTLCOutputInCommitment` to claim even a revoked commitment /// transaction broadcast as we need to be able to construct the witness script in all cases. + /// + /// Note that for revoked commitment transactions, the `per_commitment_data` (the `Option` in + /// the value tuple) may be `None` if the claim info was offloaded via + /// [`Event::PersistClaimInfo`] and removed after [`ChainMonitor::claim_info_persisted`] was + /// called. In that case, an [`Event::ClaimInfoRequest`] will be emitted when the data is + /// needed. + /// + /// [`ChainMonitor::claim_info_persisted`]: crate::chain::chainmonitor::ChainMonitor::claim_info_persisted // // TODO(splicing): We shouldn't have to track these duplicatively per `FundingScope`. Ideally, // we have a global map to track the HTLCs, along with their source, as they should be @@ -1349,6 +1434,11 @@ pub(crate) struct ChannelMonitorImpl { /// [`ANTI_REORG_DELAY`], so we have to track them here. spendable_txids_confirmed: Vec, + /// Transactions confirmed spending a counterparty commitment while awaiting a call to + /// [`Self::provide_claim_info`]. + /// Replayed through [`Self::transactions_confirmed`] once claim info is provided. + pending_claim_info_txn: Vec<(Transaction, u32, Header)>, + // We simply modify best_block in Channel's block_connected so that serialization is // consistent but hopefully the users' copy handles block_connected in a consistent way. // (we do *not*, however, update them in update_monitor to ensure any local user copies keep @@ -1755,6 +1845,7 @@ pub(crate) fn write_chanmon_internal( (34, channel_monitor.alternative_funding_confirmed, option), (35, channel_monitor.is_manual_broadcast, required), (37, channel_monitor.funding_seen_onchain, required), + (38, channel_monitor.pending_claim_info_txn, required), }); Ok(()) @@ -1955,6 +2046,7 @@ impl ChannelMonitor { htlcs_resolved_on_chain: Vec::new(), htlcs_resolved_to_user: new_hash_set(), spendable_txids_confirmed: Vec::new(), + pending_claim_info_txn: Vec::new(), best_block, counterparty_node_id: counterparty_node_id, @@ -2155,6 +2247,32 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_and_clear_pending_monitor_events() } + /// Notifies the system that [`ClaimInfo`] associated with a given `claim_key` has been durably + /// persisted. + /// + /// This method should be called after a [`ClaimInfo`] is persisted in response to an + /// [`Event::PersistClaimInfo`]. The [`ClaimInfo`] will thus be removed from both in-memory and + /// on-disk storage within the [`ChannelMonitor`]. + pub fn claim_info_persisted(&self, claim_key: &ClaimKey) { + self.inner.lock().unwrap().claim_info_persisted(&claim_key.0); + } + + pub(crate) fn provide_claim_info( + &self, claim_key: ClaimKey, claim_metadata: ClaimMetadata, claim_info: ClaimInfo, + broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) { + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner, None); + inner.provide_claim_info( + claim_key, + claim_metadata, + claim_info, + broadcaster, + fee_estimator, + &logger, + ); + } + /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity. /// /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`] @@ -2203,6 +2321,21 @@ impl ChannelMonitor { ret } + #[cfg(any(test, feature = "_test_utils"))] + pub fn get_and_clear_claim_info_events(&self) -> Vec { + let mut res = Vec::new(); + let mut inner = self.inner.lock().unwrap(); + inner.pending_events.retain(|ev| { + if let Event::PersistClaimInfo { .. } = ev { + res.push(ev.clone()); + false + } else { + true + } + }); + res + } + /// Gets the counterparty's initial commitment transaction. The returned commitment /// transaction is unsigned. This is intended to be called during the initial persistence of /// the monitor (inside an implementation of [`Persist::persist_new_channel`]), to allow for @@ -2348,6 +2481,17 @@ impl ChannelMonitor { inner.unsafe_get_latest_holder_commitment_txn(&logger) } + /// Like [`Self::unsafe_get_latest_holder_commitment_txn`] but returns the commitment + /// transaction for the confirmed funding scope (which may be a splice/alternative funding). + #[cfg(any(test, feature = "_test_utils"))] + pub fn unsafe_get_latest_holder_commitment_txn_for_confirmed_scope( + &self, logger: &L, + ) -> Vec { + let mut inner = self.inner.lock().unwrap(); + let logger = WithChannelMonitor::from_impl(logger, &*inner, None); + inner.unsafe_get_latest_holder_commitment_txn_for_confirmed_scope(&logger) + } + /// Processes transactions in a newly connected block, which may result in any of the following: /// - update the monitor's state against resolved HTLCs /// - punish the counterparty in the case of seeing a revoked commitment transaction @@ -2892,7 +3036,7 @@ impl ChannelMonitor { if let Some(txid) = confirmed_txid { let funding_spent = get_confirmed_funding_scope!(us); let mut found_commitment_tx = false; - if let Some(counterparty_tx_htlcs) = funding_spent.counterparty_claimable_outpoints.get(&txid) { + if us.counterparty_commitment_txn_on_chain.contains_key(&txid) { // First look for the to_remote output back to us. if let Some(conf_thresh) = pending_commitment_tx_conf_thresh { if let Some(value) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| { @@ -2913,10 +3057,20 @@ impl ChannelMonitor { // confirmation with the same height or have never met our dust amount. } } + let counterparty_tx_htlcs_opt = + funding_spent.counterparty_claimable_outpoints.get(&txid); if Some(txid) == funding_spent.current_counterparty_commitment_txid || Some(txid) == funding_spent.prev_counterparty_commitment_txid { + let counterparty_tx_htlcs = counterparty_tx_htlcs_opt + .expect("We must always have state for our counterparty's latest and previous commitment tx, especially since it's on chain"); walk_htlcs!(false, false, counterparty_tx_htlcs.iter().map(|(a, b)| (a, b.as_ref().map(|b| &**b)))); } else { - walk_htlcs!(false, true, counterparty_tx_htlcs.iter().map(|(a, b)| (a, b.as_ref().map(|b| &**b)))); + if let Some(counterparty_tx_htlcs) = counterparty_tx_htlcs_opt { + walk_htlcs!(false, true, counterparty_tx_htlcs.iter().map(|(a, b)| (a, b.as_ref().map(|b| &**b)))); + } else { + res.push(Balance::CounterpartyRevokedOutputClaimableAwaitingClaimInfo { + claim_key: ClaimKey(txid), + }); + } // The counterparty broadcasted a revoked state! // Look for any StaticOutputs first, generating claimable balances for those. // If any match the confirmed counterparty revoked to_self output, skip @@ -3430,6 +3584,13 @@ impl ChannelMonitorImpl { } else { assert!(cfg!(fuzzing), "Commitment txids are unique outside of fuzzing, where hashes can collide"); } + let htlcs = funding.counterparty_claimable_outpoints.get(&txid).unwrap().iter().map(|(htlc, _)| htlc.clone()).collect(); + self.pending_events.push(Event::PersistClaimInfo { + funding_txo: funding.funding_outpoint().into_bitcoin_outpoint(), + channel_id: self.channel_id, + claim_key: ClaimKey(txid), + claim_info: ClaimInfo { htlcs }, + }) } }; core::iter::once(&mut self.funding).chain(&mut self.pending_funding).for_each(prune_htlc_sources); @@ -4374,6 +4535,85 @@ impl ChannelMonitorImpl { ret } + fn claim_info_persisted(&mut self, txid: &Txid) { + core::iter::once(&mut self.funding).chain(self.pending_funding.iter_mut()).for_each( + |funding| { + funding.counterparty_claimable_outpoints.remove(txid); + }, + ); + } + + fn provide_claim_info( + &mut self, claim_key: ClaimKey, claim_metadata: ClaimMetadata, claim_info: ClaimInfo, + broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, + ) { + let txid = claim_key.0; + let claimable_outpoints: Vec<_> = + claim_info.htlcs.into_iter().zip(core::iter::repeat(None)).collect(); + let funding = self + .alternative_funding_confirmed + .and_then(|(alt_txid, _)| { + self.pending_funding.iter_mut().find(|f| f.funding_txid() == alt_txid) + }) + .unwrap_or(&mut self.funding); + funding.counterparty_claimable_outpoints.insert(txid, claimable_outpoints); + + // First replay the commitment transaction itself, generating the claimable packages for it. + let (claimable_outpoints, _) = self.check_spend_counterparty_transaction( + txid, + &claim_metadata.tx, + claim_metadata.height, + &claim_metadata.block_hash, + logger, + ); + + // Then filter the new claimable packages by transactions that have already been spent + // on-chain (which we'll replay in a moment) and give them to the onchain_tx_handler. + let buffered_txn = core::mem::take(&mut self.pending_claim_info_txn); + let already_spent: HashSet<_> = buffered_txn + .iter() + .flat_map(|(tx, _, _)| { + tx.input + .iter() + .filter(|inp| inp.previous_output.txid == txid) + .map(|inp| inp.previous_output) + }) + .collect(); + + let claimable_outpoints = claimable_outpoints + .into_iter() + .filter(|package| { + debug_assert_eq!(package.outpoints().len(), 1); + !package.outpoints().iter().any(|outp| already_spent.contains(*outp)) + }) + .collect(); + + let conf_target = self.closure_conf_target(); + self.onchain_tx_handler.update_claims_view_from_requests( + claimable_outpoints, + claim_metadata.height, + self.best_block.height, + broadcaster, + conf_target, + &self.destination_script, + fee_estimator, + logger, + ); + + // Finally, replay all the transactions which spent the commitment transaction on-chain. + for (tx, buf_height, buf_header) in buffered_txn { + let txdata = [(0usize, &tx)]; + self.transactions_confirmed( + &buf_header, + &txdata, + buf_height, + broadcaster, + fee_estimator, + logger, + ); + } + } + /// Gets the set of events that are repeated regularly (e.g. those which RBF bump /// transactions). We're okay if we lose these on restart as they'll be regenerated for us at /// some regular interval via [`ChannelMonitor::rebroadcast_pending_claims`]. @@ -4585,8 +4825,8 @@ impl ChannelMonitorImpl { /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for /// HTLC-Success/HTLC-Timeout transactions. /// - /// Returns packages to claim the revoked output(s) and general information about the output that - /// is to the counterparty in the commitment transaction. + /// Returns unmerged (i.e. single-claim) packages to claim the revoked output(s) and general + /// information about the output that is to the counterparty in the commitment transaction. #[rustfmt::skip] fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) -> (Vec, CommitmentTxCounterpartyOutputInfo) @@ -4667,6 +4907,17 @@ impl ChannelMonitorImpl { claimable_outpoints.push(justice_package); } } + } else { + self.pending_events.push(Event::ClaimInfoRequest { + funding_txo: funding_spent.funding_outpoint().into_bitcoin_outpoint(), + channel_id: self.channel_id, + claim_key: ClaimKey(commitment_txid), + claim_metadata: ClaimMetadata { + block_hash: block_hash.clone(), + tx: commitment_tx.clone(), + height, + }, + }); } // Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken @@ -4680,14 +4931,6 @@ impl ChannelMonitorImpl { block_hash, per_commitment_claimable_data.iter().map(|(htlc, htlc_source)| (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) ), logger); - } else { - // Our fuzzers aren't constrained by pesky things like valid signatures, so can - // spend our funding output with a transaction which doesn't match our past - // commitment transactions. Thus, we can only debug-assert here when not - // fuzzing. - debug_assert!(cfg!(fuzzing), "We should have per-commitment option for any recognized old commitment txn"); - fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, commitment_tx, height, - block_hash, [].iter().map(|reference| *reference), logger); } } } else if let Some(per_commitment_claimable_data) = per_commitment_option { @@ -4715,6 +4958,11 @@ impl ChannelMonitorImpl { } } + // provide_claim_info relies on this: + for claimable in claimable_outpoints.iter() { + debug_assert_eq!(claimable.outpoints().len(), 1); + } + (claimable_outpoints, to_counterparty_output_info) } @@ -4889,7 +5137,13 @@ impl ChannelMonitorImpl { let per_commitment_point = PublicKey::from_secret_key(&self.onchain_tx_handler.secp_ctx, &per_commitment_key); let funding_spent = get_confirmed_funding_scope!(self); - debug_assert!(funding_spent.counterparty_claimable_outpoints.contains_key(commitment_txid)); + // The counterparty_claimable_outpoints entry may not exist if the claim info was + // offloaded via `claim_info_persisted` and not yet re-provided. The HTLC justice + // claim only requires the per-commitment key, not the full claim info. + debug_assert!( + funding_spent.counterparty_claimable_outpoints.contains_key(commitment_txid) + || self.counterparty_commitment_txn_on_chain.contains_key(commitment_txid) + ); let htlc_txid = tx.compute_txid(); let mut claimable_outpoints = vec![]; @@ -5212,6 +5466,32 @@ impl ChannelMonitorImpl { holder_transactions } + /// Like [`Self::unsafe_get_latest_holder_commitment_txn`] but returns the commitment + /// transaction for the confirmed funding scope (which may be a splice/alternative funding). + #[cfg(any(test, feature = "_test_utils"))] + fn unsafe_get_latest_holder_commitment_txn_for_confirmed_scope( + &mut self, logger: &WithContext, + ) -> Vec { + log_debug!( + logger, + "Getting signed copy of latest holder commitment transaction for confirmed scope!" + ); + let funding = get_confirmed_funding_scope!(self); + let sig = self + .onchain_tx_handler + .signer + .unsafe_sign_holder_commitment( + &funding.channel_parameters, + &funding.current_holder_commitment_tx, + &self.onchain_tx_handler.secp_ctx, + ) + .expect("sign holder commitment"); + let redeem_script = funding.channel_parameters.make_funding_redeemscript(); + let commitment_tx = + funding.current_holder_commitment_tx.add_holder_sig(&redeem_script, sig); + vec![commitment_tx] + } + #[rustfmt::skip] fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, @@ -5243,6 +5523,7 @@ impl ChannelMonitorImpl { self.best_block = BestBlock::new(block_hash, height); log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height); self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height); + self.pending_claim_info_txn.retain(|(_, h, _)| *h <= height); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.blocks_disconnected( height, &broadcaster, conf_target, &self.destination_script, fee_estimator, logger, @@ -5505,6 +5786,22 @@ impl ChannelMonitorImpl { } self.is_resolving_htlc_output(&tx, height, &block_hash, logger); + // If any input of this transaction spends a revoked counterparty + // commitment output whose claim info hasn't been provided yet, buffer + // the transaction so we can replay it through + // `is_resolving_htlc_output` once claim info arrives. + for tx_input in &tx.input { + let commitment_txid = tx_input.previous_output.txid; + if self.counterparty_commitment_txn_on_chain.contains_key(&commitment_txid) { + let funding = get_confirmed_funding_scope!(self); + if !funding.counterparty_claimable_outpoints.contains_key(&commitment_txid) { + self.pending_claim_info_txn + .push(((*tx).clone(), height, *header)); + break; + } + } + } + self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger); } } @@ -5768,6 +6065,7 @@ impl ChannelMonitorImpl { //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected //- maturing spendable output has transaction paying us has been disconnected self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= new_height); + self.pending_claim_info_txn.retain(|(_, h, _)| *h <= new_height); // TODO: Replace with `take_if` once our MSRV is >= 1.80. let mut should_broadcast_commitment = false; @@ -5823,6 +6121,7 @@ impl ChannelMonitorImpl { log_info!(logger, "Transaction {} reorg'd out", entry.txid); false } else { true }); + self.pending_claim_info_txn.retain(|(_, h, _)| *h < removed_height); } debug_assert!(!self.onchain_events_awaiting_threshold_conf.iter().any(|ref entry| entry.txid == *txid)); @@ -6521,6 +6820,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut alternative_funding_confirmed = None; let mut is_manual_broadcast = RequiredWrapper(None); let mut funding_seen_onchain = RequiredWrapper(None); + let mut pending_claim_info_txn: Option> = None; read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -6543,6 +6843,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (38, pending_claim_info_txn, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6709,6 +7010,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(), htlcs_resolved_to_user: htlcs_resolved_to_user.unwrap(), spendable_txids_confirmed: spendable_txids_confirmed.unwrap(), + pending_claim_info_txn: pending_claim_info_txn.unwrap_or_default(), best_block, counterparty_node_id: counterparty_node_id.unwrap_or(dummy_node_id), diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 3f6bb0efb01..8299b8608d8 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -45,6 +45,7 @@ use crate::util::ser::{ UpgradableRequired, WithoutLength, Writeable, Writer, }; +use crate::chain::channelmonitor::{ClaimInfo, ClaimKey, ClaimMetadata}; use crate::io; use crate::sync::Arc; use bitcoin::hashes::sha256::Hash as Sha256; @@ -1464,6 +1465,10 @@ pub enum Event { funding_txo: Option, /// The features that this channel will operate with. channel_type: ChannelTypeFeatures, + /// The outpoint of the channel's previous funding transaction that was spent by a splice. + /// + /// Will be `None` for initial channel funding (non-splice) events. + spent_funding_txo: Option, }, /// Used to indicate that a channel that got past the initial handshake with the given `channel_id` is in the /// process of closure. This includes previously opened channels, and channels that time out from not being funded. @@ -1669,6 +1674,94 @@ pub enum Event { /// This field will be `None` only for objects serialized prior to LDK 0.2.0. failure_reason: Option, }, + /// Indicates that a [`ClaimInfo`] for a specific counterparty commitment transaction must be + /// supplied to LDK if available. + /// + /// This event is generated when there is a need for [`ClaimInfo`] that was previously stored + /// using [`Event::PersistClaimInfo`] for the specified counterparty commitment transaction. + /// This event will not be generated if [`Event::PersistClaimInfo`] event is being ignored. + /// + /// The response to this event should be handled by calling + /// [`ChainMonitor::provide_claim_info`] with the [`ClaimInfo`] that was previously stored and + /// [`ClaimMetadata`] from this event. + /// + /// # Failure Behavior and Persistence + /// This event will eventually be replayed after failures-to-handle (i.e., the event handler + /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. + /// + /// [`ChainMonitor::provide_claim_info`]: crate::chain::chainmonitor::ChainMonitor::provide_claim_info + ClaimInfoRequest { + /// The channel's funding outpoint to which this claim pertains. This will match the + /// [`Event::PersistClaimInfo::funding_txo`] value for the same `claim_key`. + funding_txo: OutPoint, + /// The ID of the channel to which this claim pertains. This will match the + /// [`Event::PersistClaimInfo::channel_id`] value for the same `claim_key`. + channel_id: ChannelId, + /// The identifier for which [`ClaimInfo`] is requested. + claim_key: ClaimKey, + /// Additional metadata that must be supplied in the call to [`ChainMonitor::provide_claim_info`]. + /// + /// [`ChainMonitor::provide_claim_info`]: crate::chain::chainmonitor::ChainMonitor::provide_claim_info + claim_metadata: ClaimMetadata, + }, + /// Provides a [`ClaimInfo`] that may be persisted separately to reduce [`ChannelMonitor`] size. + /// + /// This event is used to persist information regarding a previous state which may be necessary + /// to claim funds if our counterparty broadcasts a stale state. The persisted [`ClaimInfo`] + /// may later be requested by LDK with an [`Event::ClaimInfoRequest`]. + /// + /// [`ClaimInfo`]s are unique for a given [`claim_key`], however, you should consider storing + /// them with a combined key with the [`funding_txo`] and/or [`channel_id`] as well. After a + /// splice is irrevocably confirmed on chain, [`ClaimInfo`]s with a [`funding_txo`] + /// matching the prior state can be pruned. Similarly, after a channel has been closed and + /// resolution irrevocably confirmed on chain, any [`ClaimInfo`]s with a matching + /// [`channel_id`] can be pruned. + /// + /// After successfully persisting the claim information, [`ChainMonitor::claim_info_persisted`] + /// must be called to notify the system that the data has been durably stored and can be + /// removed from the [`ChannelMonitor`]. + /// + /// This event can be safely ignored if [`ChannelMonitor`] in-memory and on-disk size is not a + /// limitation, such as in a low-traffic node. + /// + /// # Failure Behavior and Persistence + /// This event will eventually be replayed after failures-to-handle (i.e., the event handler + /// returning `Err(ReplayEvent ())`), but won't be persisted across restarts. + /// + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor + /// [`claim_key`]: Event::PersistClaimInfo::claim_key + /// [`funding_txo`]: Event::PersistClaimInfo::funding_txo + /// [`channel_id`]: Event::PersistClaimInfo::channel_id + /// [`ChainMonitor::claim_info_persisted`]: crate::chain::chainmonitor::ChainMonitor::claim_info_persisted + /// [`ClaimInfoRequest`]: Event::ClaimInfoRequest + PersistClaimInfo { + /// The channel's funding outpoint to which this claim pertains. + /// + /// Because a splice confirming invalidates all previous channel states on the prior + /// funding outpoint (as those states would double-spend the channel's original funding + /// output, now spent by the splice transaction), an [`Event::ChannelReady`] also implies + /// any stored [`ClaimInfo`]s with a `funding_txo` matching + /// [`Event::ChannelReady::spent_funding_txo`] can be pruned. Further, an + /// [`Event::SpliceFailed`] with a matching [`Event::SpliceFailed::abandoned_funding_txo`] + /// can be pruned as they represent states on a splicing RBF attempt which has now been + /// cancelled. + funding_txo: OutPoint, + /// The ID of the channel to which this claim pertains. After the channel is closed and + /// fully resolved onchain (i.e. [`ChannelMonitor::get_claimable_balances`] for the + /// corresponding [`ChannelMonitor`] returns an empty set), any stored [`ClaimInfo`]s with + /// a matching `channel_id` can be pruned. + /// + /// [`ChannelMonitor::get_claimable_balances`]: crate::chain::channelmonitor::ChannelMonitor::get_claimable_balances + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor + channel_id: ChannelId, + /// The identifier against which [`ClaimInfo`] is to be persisted. + claim_key: ClaimKey, + /// Claim related information necessary to generate revocation transactions, that must be durably + /// persisted before calling [`ChainMonitor::claim_info_persisted`]. + /// + /// [`ChainMonitor::claim_info_persisted`]: crate::chain::chainmonitor::ChainMonitor::claim_info_persisted + claim_info: ClaimInfo, + }, /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event /// requires confirmed external funds to be readily available to spend. /// @@ -2223,12 +2316,14 @@ impl Writeable for Event { ref counterparty_node_id, ref funding_txo, ref channel_type, + ref spent_funding_txo, } => { 29u8.write(writer)?; write_tlv_fields!(writer, { (0, channel_id, required), (1, funding_txo, option), (2, user_channel_id, required), + (3, spent_funding_txo, option), (4, counterparty_node_id, required), (6, channel_type, required), }); @@ -2343,6 +2438,24 @@ impl Writeable for Event { (9, abandoned_funding_txo, option), }); }, + &Event::ClaimInfoRequest { + ref funding_txo, + ref channel_id, + ref claim_key, + ref claim_metadata, + } => { + 54u8.write(writer)?; + write_tlv_fields!(writer, { + (1, funding_txo, required), + (3, channel_id, required), + (5, claim_key, required), + (7, claim_metadata, required), + }); + }, + &Event::PersistClaimInfo { .. } => { + 55u8.write(writer)?; + // We don't write `PersistClaimInfo` because it is ok if we lost it, and it may be replayed. + }, // Note that, going forward, all new events must only write data inside of // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write // data via `write_tlv_fields`. @@ -2805,10 +2918,12 @@ impl MaybeReadable for Event { let mut counterparty_node_id = RequiredWrapper(None); let mut funding_txo = None; let mut channel_type = RequiredWrapper(None); + let mut spent_funding_txo = None; read_tlv_fields!(reader, { (0, channel_id, required), (1, funding_txo, option), (2, user_channel_id, required), + (3, spent_funding_txo, option), (4, counterparty_node_id, required), (6, channel_type, required), }); @@ -2819,6 +2934,7 @@ impl MaybeReadable for Event { counterparty_node_id: counterparty_node_id.0.unwrap(), funding_txo, channel_type: channel_type.0.unwrap(), + spent_funding_txo, })) }; f() @@ -2980,6 +3096,26 @@ impl MaybeReadable for Event { }; f() }, + 54u8 => { + let mut f = || { + _init_and_read_len_prefixed_tlv_fields!(reader, { + (1, funding_txo, required), + (3, channel_id, required), + (5, claim_key, required), + (7, claim_metadata, required), + }); + + Ok(Some(Event::ClaimInfoRequest { + funding_txo: funding_txo.0.unwrap(), + channel_id: channel_id.0.unwrap(), + claim_key: claim_key.0.unwrap(), + claim_metadata: claim_metadata.0.unwrap(), + })) + }; + f() + }, + // Note that we do not write a length-prefixed TLV for PersistClaimInfo events. + 55u8 => Ok(None), // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue. // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt // reads. diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index cd32d219b93..eb25baabb9c 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -94,6 +94,10 @@ fn test_monitor_and_persister_update_fail() { (nodes[0].keys_manager, nodes[0].keys_manager), ) .unwrap(); + // Compare events separately since we don't ever persist [`Event::PersistClaimInfo`] event. + let events = monitor.get_and_clear_pending_events(); + let new_events = new_monitor.get_and_clear_pending_events(); + assert_eq!(new_events, events); assert!(new_monitor == *monitor); new_monitor }; @@ -104,6 +108,7 @@ fn test_monitor_and_persister_update_fail() { &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager, + false, ); assert_eq!( chain_mon.watch_channel(chan.2, new_monitor), @@ -3546,7 +3551,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode // The event processing should release the last RAA update. // It should also generate the next update for nodes[2]. - check_added_monitors(&nodes[1], 2); + check_added_monitors_with_claim_info_events(&nodes[1], 2, 1); let mut bs_htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_c_id); check_added_monitors(&nodes[1], 0); @@ -4927,6 +4932,7 @@ fn native_async_persist() { native_async_persister, Arc::clone(&keys_manager), keys_manager.get_peer_storage_key(), + false, ); // Write the initial ChannelMonitor async, testing primarily that the `MonitorEvent::Completed` diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9361cd3c749..bca96c5d011 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6610,6 +6610,7 @@ pub struct SpliceFundingPromotion { pub monitor_update: Option, pub announcement_sigs: Option, pub discarded_funding: Vec, + pub spent_funding_txo: Option, } impl FundedChannel @@ -11002,7 +11003,7 @@ where log_info!(logger, "Promoting splice funding txid {}", splice_txid); - let discarded_funding = { + let (spent_funding_txo, discarded_funding) = { // Scope `funding` to avoid unintentionally using it later since it is swapped below. let funding = pending_splice .negotiated_candidates @@ -11010,6 +11011,8 @@ where .find(|funding| funding.get_funding_txid() == Some(splice_txid)) .unwrap(); let prev_funding_txid = self.funding.get_funding_txid(); + let spent_funding_txo = + self.funding.get_funding_txo().map(|o| o.into_bitcoin_outpoint()); if let Some(scid) = self.funding.short_channel_id { self.context.historical_scids.push(scid); @@ -11018,7 +11021,7 @@ where core::mem::swap(&mut self.funding, funding); // The swap above places the previous `FundingScope` into `pending_funding`. - pending_splice + let discarded_funding = pending_splice .negotiated_candidates .drain(..) .filter(|funding| funding.get_funding_txid() != prev_funding_txid) @@ -11033,7 +11036,8 @@ where .expect("Negotiated splices must have a known funding outpoint"), }) }) - .collect::>() + .collect::>(); + (spent_funding_txo, discarded_funding) }; self.context.interactive_tx_signing_session = None; @@ -11073,6 +11077,7 @@ where monitor_update, announcement_sigs, discarded_funding, + spent_funding_txo, }) } @@ -11143,7 +11148,7 @@ where &self.context.channel_id, ); - let (funding_txo, monitor_update, announcement_sigs, discarded_funding) = + let (funding_txo, monitor_update, announcement_sigs, discarded_funding, spent_funding_txo) = self.maybe_promote_splice_funding( node_signer, chain_hash, user_config, height, logger, ).map(|splice_promotion| ( @@ -11151,9 +11156,10 @@ where splice_promotion.monitor_update, splice_promotion.announcement_sigs, splice_promotion.discarded_funding, - )).unwrap_or((None, None, None, Vec::new())); + splice_promotion.spent_funding_txo, + )).unwrap_or((None, None, None, Vec::new(), None)); - return Ok((Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update, discarded_funding)), announcement_sigs)); + return Ok((Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update, discarded_funding, spent_funding_txo)), announcement_sigs)); } } } @@ -11315,7 +11321,7 @@ where ); - let (funding_txo, monitor_update, announcement_sigs, discarded_funding) = chain_node_signer + let (funding_txo, monitor_update, announcement_sigs, discarded_funding, spent_funding_txo) = chain_node_signer .and_then(|(chain_hash, node_signer, user_config)| { // We can only promote on blocks connected, which is when we expect // `chain_node_signer` to be `Some`. @@ -11326,10 +11332,11 @@ where splice_promotion.monitor_update, splice_promotion.announcement_sigs, splice_promotion.discarded_funding, + splice_promotion.spent_funding_txo, )) - .unwrap_or((None, None, None, Vec::new())); + .unwrap_or((None, None, None, Vec::new(), None)); - return Ok((Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update, discarded_funding)), timed_out_htlcs, announcement_sigs)); + return Ok((Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update, discarded_funding, spent_funding_txo)), timed_out_htlcs, announcement_sigs)); } } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ada27af749f..3d996186d63 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3272,6 +3272,7 @@ macro_rules! emit_initial_channel_ready_event { .get_funding_txo() .map(|outpoint| outpoint.into_bitcoin_outpoint()), channel_type: $channel.funding.get_channel_type().clone(), + spent_funding_txo: None, }, None, )); @@ -12969,6 +12970,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ splice_promotion.funding_txo.into_bitcoin_outpoint(), ), channel_type: chan.funding.get_channel_type().clone(), + spent_funding_txo: splice_promotion.spent_funding_txo, }, None, )); @@ -15476,7 +15478,13 @@ impl< pub(super) enum FundingConfirmedMessage { Establishment(msgs::ChannelReady), - Splice(msgs::SpliceLocked, Option, Option, Vec), + Splice( + msgs::SpliceLocked, + Option, + Option, + Vec, + Option, + ), } impl< @@ -15551,7 +15559,7 @@ impl< log_trace!(logger, "Sending channel_ready WITHOUT channel_update"); } }, - Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update_opt, discarded_funding)) => { + Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update_opt, discarded_funding, spent_funding_txo)) => { let counterparty_node_id = funded_channel.context.get_counterparty_node_id(); let channel_id = funded_channel.context.channel_id(); @@ -15579,6 +15587,7 @@ impl< counterparty_node_id, funding_txo: Some(funding_txo.into_bitcoin_outpoint()), channel_type: funded_channel.funding.get_channel_type().clone(), + spent_funding_txo, }, None)); discarded_funding.into_iter().for_each(|funding_info| { let event = Event::DiscardFunding { @@ -21250,7 +21259,7 @@ pub mod bench { let seed_a = [1u8; 32]; let keys_manager_a = KeysManager::new(&seed_a, 42, 42, true); - let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a, &keys_manager_a, keys_manager_a.get_peer_storage_key()); + let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a, &keys_manager_a, keys_manager_a.get_peer_storage_key(), false); let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters { network, best_block: BestBlock::from_network(network), @@ -21260,7 +21269,7 @@ pub mod bench { let logger_b = test_utils::TestLogger::with_id("node a".to_owned()); let seed_b = [2u8; 32]; let keys_manager_b = KeysManager::new(&seed_b, 42, 42, true); - let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b, &keys_manager_b, keys_manager_b.get_peer_storage_key()); + let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b, &keys_manager_b, keys_manager_b.get_peer_storage_key(), false); let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters { network, best_block: BestBlock::from_network(network), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 2d971c3a100..b8f61e93d91 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -11,7 +11,7 @@ //! nodes for functional tests. use crate::blinded_path::payment::DummyTlvs; -use crate::chain::channelmonitor::{ChannelMonitor, HTLC_FAIL_BACK_BUFFER}; +use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER}; use crate::chain::transaction::OutPoint; use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::events::bump_transaction::sync::BumpTransactionEventHandlerSync; @@ -929,6 +929,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { &feeest, &persister, &self.keys_manager, + false, ); for deserialized_monitor in deserialized_monitors.drain(..) { let channel_id = deserialized_monitor.channel_id(); @@ -1267,11 +1268,72 @@ pub fn commit_tx_fee_msat( /// Check whether N channel monitor(s) have been added. pub fn check_added_monitors>(node: &H, count: usize) { + do_check_added_monitors(node, count, None); +} + +pub fn check_added_monitors_with_claim_info_events>( + node: &H, count: usize, expected_claim_info_events: usize, +) { + do_check_added_monitors(node, count, Some(expected_claim_info_events)); +} + +pub fn do_check_added_monitors>( + node: &H, count: usize, expected_claim_info_events: Option, +) { if let Some(chain_monitor) = node.chain_monitor() { - let mut added_monitors = chain_monitor.added_monitors.lock().unwrap(); + let added_monitors = chain_monitor.added_monitors.lock().unwrap().split_off(0); let n = added_monitors.len(); + let mut channels_with_commitment_secrets = new_hash_set(); + let commitment_secret_updates = added_monitors + .iter() + .map(|(channel_id, _, updates_opt)| { + if let Some(updates) = updates_opt { + let is_commitment_secret = |update: &&ChannelMonitorUpdateStep| { + matches!(update, ChannelMonitorUpdateStep::CommitmentSecret { .. }) + }; + let count = updates.updates.iter().filter(is_commitment_secret).count(); + if count > 0 { + channels_with_commitment_secrets.insert(*channel_id); + } + count + } else { + 0 + } + }) + .sum(); + let mut added_claim_info_events: usize = 0; + if commitment_secret_updates > 0 { + let persist_claim_info_events = + chain_monitor.chain_monitor.get_and_clear_claim_info_events(); + added_claim_info_events += persist_claim_info_events.len(); + + let mut seen_channel_fundings = new_hash_set(); + let mut claim_info_channel_ids = new_hash_set(); + for event in persist_claim_info_events { + match event { + Event::PersistClaimInfo { channel_id, funding_txo, claim_key, claim_info } => { + assert!(seen_channel_fundings.insert(funding_txo)); + claim_info_channel_ids.insert(channel_id); + let mut persisted_claim_infos = + chain_monitor.persisted_claim_infos.lock().unwrap(); + let persist_key = (channel_id, claim_key); + persisted_claim_infos.insert(persist_key, claim_info); + }, + _ => panic!(), + } + } + assert_eq!(claim_info_channel_ids, channels_with_commitment_secrets); + } + + if let Some(expected_claim_info_events) = expected_claim_info_events { + assert_eq!(added_claim_info_events, expected_claim_info_events); + } else { + // Each CommitmentSecret update produces one PersistClaimInfo event per + // active funding. For spliced channels with a pending funding, this means + // more claim info events than commitment secret updates. + assert!(added_claim_info_events >= commitment_secret_updates); + } assert_eq!(n, count, "expected {} monitors to be added, not {}", count, n); - added_monitors.clear(); } } @@ -1383,6 +1445,7 @@ macro_rules! _reload_node_inner { $node.fee_estimator, &$persister, &$node.keys_manager, + false, ); $node.chain_monitor = &$new_chain_monitor; @@ -4583,6 +4646,7 @@ where &cfg.fee_estimator, persisters[i], &cfg.keys_manager, + false, ); let seed = [i as u8; 32]; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 09a87d93156..16f653062af 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -4860,6 +4860,7 @@ pub fn test_key_derivation_params() { &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager, + false, ); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); @@ -7385,6 +7386,10 @@ pub fn test_update_err_monitor_lockdown() { ) .unwrap() .1; + // Compare events separately since we don't ever persist [`Event::PersistClaimInfo`] event. + let events = monitor.get_and_clear_pending_events(); + let new_events = new_monitor.get_and_clear_pending_events(); + assert_eq!(new_events, events); assert!(new_monitor == *monitor); new_monitor }; @@ -7395,6 +7400,7 @@ pub fn test_update_err_monitor_lockdown() { &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager, + false, ); assert_eq!( watchtower.watch_channel(chan_1.2, new_monitor), @@ -7493,6 +7499,10 @@ pub fn test_concurrent_monitor_claim() { ) .unwrap() .1; + // Compare events separately since we don't ever persist [`Event::PersistClaimInfo`] event. + let events = monitor.get_and_clear_pending_events(); + let new_events = new_monitor.get_and_clear_pending_events(); + assert_eq!(new_events, events); assert!(new_monitor == *monitor); new_monitor }; @@ -7503,6 +7513,7 @@ pub fn test_concurrent_monitor_claim() { &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager, + false, ); assert_eq!( watchtower.watch_channel(chan_1.2, new_monitor), @@ -7543,6 +7554,10 @@ pub fn test_concurrent_monitor_claim() { ) .unwrap() .1; + // Compare events separately since we don't ever persist [`Event::PersistClaimInfo`] event. + let events = monitor.get_and_clear_pending_events(); + let new_events = new_monitor.get_and_clear_pending_events(); + assert_eq!(new_events, events); assert!(new_monitor == *monitor); new_monitor }; @@ -7553,6 +7568,7 @@ pub fn test_concurrent_monitor_claim() { &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager, + false, ); assert_eq!( watchtower.watch_channel(chan_1.2, new_monitor), @@ -10010,7 +10026,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { // After the events are processed, the ChannelMonitorUpdates will be released and, upon their // completion, we'll respond to nodes[1] with an RAA + CS. get_revoke_commit_msgs(&nodes[0], &node_b_id); - check_added_monitors(&nodes[0], 3); + check_added_monitors_with_claim_info_events(&nodes[0], 3, 2); } #[xtest(feature = "_externalize_tests")] diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 18a976871a6..540821216b0 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -24,6 +24,7 @@ use crate::ln::chan_utils; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId}; use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::ln::splicing_tests::{initiate_splice_in, splice_channel}; use crate::crypto::utils::sign; use crate::util::ser::Writeable; use crate::util::scid_utils::block_from_scid; @@ -40,6 +41,7 @@ use bitcoin::transaction::Version; use crate::prelude::*; use crate::ln::functional_test_utils::*; +use crate::util::test_utils::TestChainMonitor; #[test] fn chanmon_fail_from_stale_commitment() { @@ -106,13 +108,17 @@ fn test_spendable_output<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, spendable_t if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { .. }) = spendable.pop().unwrap() {} else { panic!(); } } - if let Event::SpendableOutputs { outputs, .. } = spendable.pop().unwrap() { + test_spendable_output_event(node, spendable_tx, spendable.pop().unwrap()) +} + +fn test_spendable_output_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, spendable_tx: &Transaction, event: Event) -> Vec { + if let Event::SpendableOutputs { outputs, .. } = event { assert_eq!(outputs.len(), 1); let spend_tx = node.keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &Secp256k1::new()).unwrap(); check_spends!(spend_tx, spendable_tx); outputs - } else { panic!(); } + } else { panic!("{event:?}"); } } #[test] @@ -2853,7 +2859,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { // Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs // through the success path. - assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().iter().filter(|e| !matches!(e, Event::ClaimInfoRequest {..})).collect::>().is_empty()); let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); // Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the // best block is updated before the confirmed transactions are notified. @@ -3906,3 +3912,568 @@ fn test_ladder_preimage_htlc_claims() { expect_payment_sent(&nodes[0], payment_preimage2, None, true, false); check_added_monitors(&nodes[0], 1); } + +#[test] +fn test_revoked_claim_with_offloaded_claim_info() { + for do_splice in [true, false] { + for confirm_splice in [true, false] { + if !do_splice && confirm_splice { + continue; + } + for confirm_htlcs_before_claim_info in [true, false] { + for claim_htlcs_on_commitment in [true, false] { + if confirm_htlcs_before_claim_info && claim_htlcs_on_commitment { + continue; + } + for claim_to_self_before_claim_info in [true, false] { + do_test_revoked_claim_with_offloaded_claim_info( + do_splice, + confirm_splice, + confirm_htlcs_before_claim_info, + claim_htlcs_on_commitment, + claim_to_self_before_claim_info, + ); + } + } + } + } + } +} + +fn do_test_revoked_claim_with_offloaded_claim_info( + do_splice: bool, confirm_splice: bool, confirm_htlc_before_claim_info: bool, + claim_htlcs_on_commitment: bool, claim_to_self_before_claim_info: bool, +) { + // Tests claim info with a simple test of node A broadcasting a revoked commitment transaction + // with two HTLCs (one inbound, one outbound) and validating that claimable balances and + // eventual claims work correctly at each step of resolution. + // + // `do_splice` results in testing with a pending splice in-flight, `confirm_splice` the same + // but broadcasts a commitment transaction based on that splice rather than cancelling that + // splice implicitly.. + // + // `confirm_htlc_before_claim_info` selects whether we mine A's revoked HTLC claim transactions + // (prior to B receiving the claim info). + // + // `claim_htlcs_on_commitment` tests B claiming HTLC outputs directly on the commitment + // transaction, rather than HTLC transactions (implies + // `!confirm_htlc_before_claim_info` as we can't generate HTLC claims on the commitment until + // we've gotten the claim info). + // + // `claim_to_self_before_claim_info` tests B claiming A's revoked to_self output prior to the + // claim info being provided. This should be somewhat unrealistic as claim info should be + // provided faster than a block comes in, but we do it for completeness. + assert!(!confirm_splice || do_splice, "Can't confirm a splice that didn't happen"); + assert!(!claim_htlcs_on_commitment || !confirm_htlc_before_claim_info); + + let mut chanmon_cfgs = create_chanmon_cfgs(2); + // We broadcast a revoked commitment transaction and then expect the monitor to spend from it, + // so have to disable the test validation that checks that we don't do that. + chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; + let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + // Enable claim info offloading on node B so that PersistClaimInfo and ClaimInfoRequest + // events are surfaced. + node_cfgs[1].chain_monitor = TestChainMonitor::new( + Some(&chanmon_cfgs[1].chain_source), + &chanmon_cfgs[1].tx_broadcaster, + &chanmon_cfgs[1].logger, + &chanmon_cfgs[1].fee_estimator, + &chanmon_cfgs[1].persister, + &chanmon_cfgs[1].keys_manager, + true, + ); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_reserves(&nodes); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_value = 1_000_000; + let (_, _, chan_id, ..) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, chan_value, 100_000_000); + + let splice_tx = if do_splice { + let amt = Amount::from_sat(200_000); + let funding_contribution = initiate_splice_in(&nodes[0], &nodes[1], chan_id, amt); + let (tx, ..) = splice_channel(&nodes[0], &nodes[1], chan_id, funding_contribution); + Some(tx) + } else { + None + }; + + route_payment(&nodes[0], &[&nodes[1]], 3_000_000); + let (payment_preimage_ba, payment_hash_ba, ..) = + route_payment(&nodes[1], &[&nodes[0]], 4_000_000); + + // Give A's monitor the preimage for the B->A HTLC so that A can later broadcast an + // HTLC-Success transaction for it from the revoked commitment. + get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy( + &payment_hash_ba, + &payment_preimage_ba, + &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), + &nodes[0].logger, + ); + + // Get A's commitment transaction before it's revoked. + let (as_revoked_tx, splice_value) = if confirm_splice { + let stx = splice_tx.as_ref().unwrap(); + mine_transaction(&nodes[0], stx); + mine_transaction(&nodes[1], stx); + + ( + get_monitor!(nodes[0], chan_id) + .unsafe_get_latest_holder_commitment_txn_for_confirmed_scope(&nodes[0].logger), + 200_000, + ) + } else { + (get_local_commitment_txn!(nodes[0], chan_id), 0) + }; + assert_eq!(as_revoked_tx.len(), 1); + assert_eq!(as_revoked_tx[0].output.len(), 6); // Two HTLCs, two anchors, two balances + let revoked_txid = as_revoked_tx[0].compute_txid(); + let spends_revoked = |inp: &&TxIn| inp.previous_output.txid == revoked_txid; + + // Revoke A's old commitment by routing another payment. + let (_, payment_hash_revoke, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + // Get A's revoked HTLC transactions by mining the revoked commitment on A. + mine_transaction(&nodes[0], &as_revoked_tx[0]); + check_closed_broadcast(&nodes[0], 1, true); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, &[node_b_id], chan_value); + + // A should immediately get a BumpTransaction event for the inbound HTLC A has the preimage + // for. + let events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let Event::BumpTransaction(bump) = &events[0] { + nodes[0].bump_tx_handler.handle_event(bump); + } else { + panic!("Wrong event {events:?}"); + } + + // After CLTV timeout, A should resolve the outbound HTLC. + connect_blocks(&nodes[0], TEST_FINAL_CLTV); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 3, "{:?}", events); + assert!(events.iter().any(|ev| matches!( + ev, + Event::HTLCHandlingFailed { + failure_type: HTLCHandlingFailureType::Receive { payment_hash }, + .. + } if *payment_hash == payment_hash_ba + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentPathFailed { payment_hash, .. } if *payment_hash == payment_hash_revoke + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentFailed { payment_hash: Some(h), .. } if *h == payment_hash_revoke + ))); + check_added_monitors(&nodes[0], 1); + let events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 2, "Expected two events, one RBF bump one timeout claim"); + for event in events { + if let Event::BumpTransaction(bump) = event { + nodes[0].bump_tx_handler.handle_event(&bump); + } else { + panic!("Wrong event {event:?}"); + } + } + + // Finally, get the actual HTLC transactions + let a_htlc_txn = { + let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + // Deduplicate by the commitment output being spent (there may be RBF replacements + // when the HTLC-Success is re-broadcast after the HTLC-Timeout becomes available). + let mut seen_vouts = new_hash_set(); + txn.into_iter() + .filter(|tx| { + let vout = tx.input.iter().find(spends_revoked).unwrap(); + seen_vouts.insert(vout.previous_output.vout) + }) + .collect::>() + }; + assert_eq!(a_htlc_txn.len(), 2); + for tx in a_htlc_txn.iter() { + if confirm_splice { + check_spends!(tx, as_revoked_tx[0], splice_tx.as_ref().unwrap()); + } else { + check_spends!(tx, as_revoked_tx[0], coinbase_tx); + } + } + + // Call `claim_info_persisted` for each stored claim info to remove it from B's monitor. + { + let persisted = nodes[1].chain_monitor.persisted_claim_infos.lock().unwrap(); + for &(channel_id, ref claim_key) in persisted.keys() { + let chain_monitor = &nodes[1].chain_monitor.chain_monitor; + chain_monitor.claim_info_persisted(channel_id, claim_key.clone()).unwrap(); + } + } + + // Now that we're done with setup, actually broadcast the revoked commitment transaction on + // node B. + mine_transaction(&nodes[1], &as_revoked_tx[0]); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, &[node_a_id], chan_value); + + // B immediately claims the to_self (revokeable) output without needing the offloaded + // claim info. + let bs_to_self_claim_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_to_self_claim_tx.len(), 1); + assert_eq!(bs_to_self_claim_tx[0].input.len(), 1); + check_spends!(bs_to_self_claim_tx[0], as_revoked_tx[0]); + + let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (claim_key, claim_meta) = if let Event::ClaimInfoRequest { + channel_id, + claim_key, + claim_metadata, + .. + } = events.pop().unwrap() { + assert_eq!(channel_id, chan_id); + (claim_key, claim_metadata) + } else { + panic!("Wront event {events:?}"); + }; + + // Note that `Event::ChannelClosed`s will miss the additional splice value that was never fully + // locked and thus the splice_value is only relevant when looking at claimable balances. + let chan_value = chan_value + splice_value; + let commit_tx_fee = Amount::from_sat(chan_value) + - as_revoked_tx[0].output.iter().map(|outp| outp.value).sum(); + let anchor_outputs_value = 2 * channel::ANCHOR_OUTPUT_VALUE_SATOSHI; + let bs_to_remote = 96_000; + let as_to_local = + chan_value - bs_to_remote - 3_000 - 4_000 - commit_tx_fee.to_sat() - anchor_outputs_value; + let to_remote_spendable_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; + + let bs_output_bal = Balance::ClaimableAwaitingConfirmations { + amount_satoshis: bs_to_remote, + confirmation_height: to_remote_spendable_height, + source: BalanceSource::CounterpartyForceClosed, + }; + + let mut as_revoked_confirmation_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY; + let mut as_revoked_bal = if claim_to_self_before_claim_info { + mine_transaction(&nodes[1], &bs_to_self_claim_tx[0]); + Balance::ClaimableAwaitingConfirmations { + amount_satoshis: bs_to_self_claim_tx[0].output[0].value.to_sat(), + confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, + source: BalanceSource::CounterpartyForceClosed, + } + } else { + Balance::CounterpartyRevokedOutputClaimable { + amount_satoshis: as_to_local, + } + }; + + assert_eq!( + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])), + sorted_vec(vec![ + bs_output_bal.clone(), + as_revoked_bal.clone(), + Balance::CounterpartyRevokedOutputClaimableAwaitingClaimInfo { + claim_key: claim_key.clone(), + }, + ]) + ); + + let claim_info = { + let mut infos = nodes[1].chain_monitor.persisted_claim_infos.lock().unwrap(); + infos.remove(&(chan_id, claim_key.clone())).unwrap() + }; + + let (first_htlc_claim_bal, first_htlc_claim, second_htlc_claim) = + if confirm_htlc_before_claim_info { + // Mine one of A's HTLC transactions on B before providing claim info. Even without the + // claim ifno, B can claim the revoked HTLC outputs + mine_transaction(&nodes[1], &a_htlc_txn[0]); + + let mut first_claim = + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(first_claim.len(), 1); + check_spends!(first_claim[0], a_htlc_txn[0], coinbase_tx); + + mine_transaction(&nodes[1], &a_htlc_txn[1]); + let mut second_claim = + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(second_claim.len(), 1); + check_spends!(second_claim[0], a_htlc_txn[1], coinbase_tx); + + assert_eq!( + sorted_vec(vec![ + bs_output_bal.clone(), + as_revoked_bal.clone(), + Balance::CounterpartyRevokedOutputClaimableAwaitingClaimInfo { + claim_key: claim_key.clone(), + }, + ]), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + // Mine one of the two claims before we even provide the claim info + mine_transaction(&nodes[1], &first_claim[0]); + let first_htlc_claim_bal = Balance::ClaimableAwaitingConfirmations { + amount_satoshis: first_claim[0].output[0].value.to_sat(), + confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, + source: BalanceSource::CounterpartyForceClosed, + }; + assert_eq!( + sorted_vec(vec![ + bs_output_bal.clone(), + as_revoked_bal.clone(), + first_htlc_claim_bal.clone(), + Balance::CounterpartyRevokedOutputClaimableAwaitingClaimInfo { + claim_key: claim_key.clone(), + }, + ]), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + // Now provide the claim info, which shouldn't broadcast any new transactions but will + // result in replaying the HTLC transactions, allowing B to discover an HTLC preimage. + let chain_monitor = &nodes[1].chain_monitor.chain_monitor; + chain_monitor.provide_claim_info(chan_id, claim_key, claim_meta, claim_info).unwrap(); + expect_payment_sent!(&nodes[1], payment_preimage_ba); + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + + // With the claim info in place, B can now generate full claimable balances. + assert_eq!( + sorted_vec(vec![ + bs_output_bal.clone(), + as_revoked_bal.clone(), + first_htlc_claim_bal.clone(), + Balance::CounterpartyRevokedOutputClaimable { + amount_satoshis: 3_000, + }, + ]), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + (first_htlc_claim_bal, first_claim.pop().unwrap(), second_claim.pop().unwrap()) + } else { + // Provide claim info immediately and then test HTLC claiming. + let chain_monitor = &nodes[1].chain_monitor.chain_monitor; + chain_monitor.provide_claim_info(chan_id, claim_key, claim_meta, claim_info).unwrap(); + + let bs_balances_before_htlcs = sorted_vec(vec![ + bs_output_bal.clone(), + as_revoked_bal.clone(), + Balance::CounterpartyRevokedOutputClaimable { + amount_satoshis: 3_000, + }, + Balance::CounterpartyRevokedOutputClaimable { + amount_satoshis: 4_000, + }, + ]); + + assert_eq!( + bs_balances_before_htlcs, + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + let mut claim_txn = + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(claim_txn.len(), 2); + assert_eq!(claim_txn[0].input.len(), 1); + assert_eq!(claim_txn[1].input.len(), 1); + assert_ne!(claim_txn[0].input[0].previous_output, claim_txn[1].input[0].previous_output); + assert_ne!(claim_txn[0].input[0].previous_output, bs_to_self_claim_tx[0].input[0].previous_output); + assert_ne!(claim_txn[1].input[0].previous_output, bs_to_self_claim_tx[0].input[0].previous_output); + for tx in &claim_txn { + check_spends!(tx, as_revoked_tx[0]); + } + + let (first_htlc_claim, second_htlc_claim) = if claim_htlcs_on_commitment { + (claim_txn.pop().unwrap(), claim_txn.pop().unwrap()) + } else { + mine_transaction(&nodes[1], &a_htlc_txn[0]); + let mut htlc_claim_txn = + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(htlc_claim_txn.len(), 1); + check_spends!(htlc_claim_txn[0], a_htlc_txn[0], coinbase_tx); + + assert_eq!( + bs_balances_before_htlcs, + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + mine_transaction(&nodes[1], &a_htlc_txn[1]); + let mut second_htlc_claim_txn = + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(second_htlc_claim_txn.len(), 1); + check_spends!(second_htlc_claim_txn[0], a_htlc_txn[1], coinbase_tx); + assert_eq!( + bs_balances_before_htlcs, + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + // B generates a PaymentSent for the HTLC after learning the preimage. + expect_payment_sent!(&nodes[1], payment_preimage_ba); + + (htlc_claim_txn.pop().unwrap(), second_htlc_claim_txn.pop().unwrap()) + }; + + mine_transaction(&nodes[1], &first_htlc_claim); + let first_htlc_claim_bal = Balance::ClaimableAwaitingConfirmations { + amount_satoshis: first_htlc_claim.output[0].value.to_sat(), + confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, + source: BalanceSource::CounterpartyForceClosed, + }; + assert_eq!( + sorted_vec(vec![ + bs_output_bal.clone(), + as_revoked_bal.clone(), + first_htlc_claim_bal.clone(), + Balance::CounterpartyRevokedOutputClaimable { + amount_satoshis: 3_000, + }, + ]), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + (first_htlc_claim_bal, first_htlc_claim, second_htlc_claim) + }; + + let first_htlc_claim_conf_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; + + let mut bs_output_bal_opt = Some(bs_output_bal); + macro_rules! check_bs_output_spendable { + () => { + if bs_output_bal_opt.is_some() + && nodes[1].best_block_info().1 >= to_remote_spendable_height + { + let mut events = + nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + if do_splice && !confirm_splice { + assert_eq!(events.len(), 2); + if let Event::DiscardFunding { .. } = &events[0] { + // Remove the watched splice tx which would otherwise cause an assertion + // failure after test completion due to mismatched Filter loads on reload. + let stx = splice_tx.as_ref().unwrap(); + nodes[1].chain_source.remove_watched_txn_and_outputs( + OutPoint { txid: stx.compute_txid(), index: 0 }, + stx.output[0].script_pubkey.clone(), + ); + } else { + panic!("Wrong event {events:?}"); + } + events.remove(0); + } + assert_eq!(events.len(), 1); + test_spendable_output_event(&nodes[1], &as_revoked_tx[0], events.pop().unwrap()); + bs_output_bal_opt = None; + } + } + } + + mine_transaction(&nodes[1], &second_htlc_claim); + let second_htlc_claim_bal = Balance::ClaimableAwaitingConfirmations { + amount_satoshis: second_htlc_claim.output[0].value.to_sat(), + confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, + source: BalanceSource::CounterpartyForceClosed, + }; + check_bs_output_spendable!(); + assert_eq!( + sorted_vec([ + as_revoked_bal.clone(), + first_htlc_claim_bal.clone(), + second_htlc_claim_bal.clone(), + ].into_iter().chain(bs_output_bal_opt.clone()).collect()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + if !claim_to_self_before_claim_info { + mine_transaction(&nodes[1], &bs_to_self_claim_tx[0]); + as_revoked_confirmation_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; + as_revoked_bal = Balance::ClaimableAwaitingConfirmations { + amount_satoshis: bs_to_self_claim_tx[0].output[0].value.to_sat(), + confirmation_height: nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1, + source: BalanceSource::CounterpartyForceClosed, + }; + check_bs_output_spendable!(); + assert_eq!( + sorted_vec([ + as_revoked_bal.clone(), + first_htlc_claim_bal.clone(), + second_htlc_claim_bal.clone(), + ].into_iter().chain(bs_output_bal_opt.clone()).collect()), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + } + + while bs_output_bal_opt.is_some() { + connect_blocks(&nodes[1], 1); + check_bs_output_spendable!(); + } + assert_eq!( + sorted_vec(vec![ + as_revoked_bal.clone(), + first_htlc_claim_bal.clone(), + second_htlc_claim_bal.clone(), + ]), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + if first_htlc_claim_conf_height < as_revoked_confirmation_height { + connect_blocks(&nodes[1], first_htlc_claim_conf_height - nodes[1].best_block_info().1); + test_spendable_output(&nodes[1], &first_htlc_claim, false); + assert_eq!( + sorted_vec(vec![ + as_revoked_bal.clone(), + second_htlc_claim_bal, + ]), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + if claim_htlcs_on_commitment { + expect_payment_failed!(nodes[1], payment_hash_ba, false); + check_added_monitors(&nodes[1], 1); + } + + connect_blocks(&nodes[1], 1); + test_spendable_output(&nodes[1], &second_htlc_claim, false); + assert_eq!( + vec![as_revoked_bal], + nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]) + ); + + connect_blocks(&nodes[1], as_revoked_confirmation_height - nodes[1].best_block_info().1); + test_spendable_output(&nodes[1], &bs_to_self_claim_tx[0], false); + assert!(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty()); + } else { + connect_blocks(&nodes[1], as_revoked_confirmation_height - nodes[1].best_block_info().1); + test_spendable_output(&nodes[1], &bs_to_self_claim_tx[0], false); + assert_eq!( + sorted_vec(vec![ + first_htlc_claim_bal, + second_htlc_claim_bal.clone(), + ]), + sorted_vec(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[])) + ); + + connect_blocks(&nodes[1], first_htlc_claim_conf_height - nodes[1].best_block_info().1); + test_spendable_output(&nodes[1], &first_htlc_claim, false); + assert_eq!( + vec![second_htlc_claim_bal], + nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]) + ); + if claim_htlcs_on_commitment { + expect_payment_failed!(nodes[1], payment_hash_ba, false); + check_added_monitors(&nodes[1], 1); + } + + connect_blocks(&nodes[1], 1); + test_spendable_output(&nodes[1], &second_htlc_claim, false); + assert!(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty()); + } +} diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index bb730f8fba8..bd127b31adc 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -405,7 +405,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { fee_estimator = test_utils::TestFeeEstimator::new(253); persister = test_utils::TestPersister::new(); let keys_manager = &chanmon_cfgs[0].keys_manager; - new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager); + new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager, false); nodes[0].chain_monitor = &new_chain_monitor; diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index f27ccc1cbac..a4dca95291e 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1831,6 +1831,7 @@ mod tests { &chanmon_cfgs[0].fee_estimator, &persister_0, &chanmon_cfgs[0].keys_manager, + false, ); let chain_mon_1 = test_utils::TestChainMonitor::new( Some(&chanmon_cfgs[1].chain_source), @@ -1839,6 +1840,7 @@ mod tests { &chanmon_cfgs[1].fee_estimator, &persister_1, &chanmon_cfgs[1].keys_manager, + false, ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; @@ -2072,6 +2074,7 @@ mod tests { &chanmon_cfgs[0].fee_estimator, &persister_0, &chanmon_cfgs[0].keys_manager, + false, ); let chain_mon_1 = test_utils::TestChainMonitor::new( Some(&chanmon_cfgs[1].chain_source), @@ -2080,6 +2083,7 @@ mod tests { &chanmon_cfgs[1].fee_estimator, &persister_1, &chanmon_cfgs[1].keys_manager, + false, ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 2eace55a4bf..f4753b75e8b 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -27,6 +27,7 @@ use alloc::collections::BTreeMap; use bitcoin::absolute::LockTime as AbsoluteLockTime; use bitcoin::amount::{Amount, SignedAmount}; +use bitcoin::block::Header; use bitcoin::consensus::Encodable; use bitcoin::constants::ChainHash; use bitcoin::hash_types::{BlockHash, Txid}; @@ -1098,6 +1099,7 @@ impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction); impl_for_vec!(crate::ln::channelmanager::PaymentClaimDetails); impl_for_vec!(crate::ln::msgs::SocketAddress); impl_for_vec!((A, B), A, B); +impl_for_vec!((A, B, C), A, B, C); impl_for_vec!(SerialId); impl_for_vec!(TxInMetadata); impl_for_vec!(TxOutMetadata); @@ -1532,6 +1534,7 @@ impl_consensus_ser!(TxIn); impl_consensus_ser!(TxOut); impl_consensus_ser!(Witness); impl_consensus_ser!(Sequence); +impl_consensus_ser!(Header); impl Readable for Mutex { fn read(r: &mut R) -> Result { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 22be4367c7a..88df94bf975 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -17,7 +17,8 @@ use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; use crate::chain::chaininterface::{ConfirmationTarget, TransactionType}; use crate::chain::chainmonitor::{ChainMonitor, Persist}; use crate::chain::channelmonitor::{ - ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, + ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ClaimInfo, ClaimKey, + MonitorEvent, }; use crate::chain::transaction::OutPoint; use crate::chain::WatchedOutput; @@ -495,7 +496,8 @@ impl SyncBroadcaster for T {} impl> SyncPersist for T {} pub struct TestChainMonitor<'a> { - pub added_monitors: Mutex)>>, + pub added_monitors: + Mutex, Option)>>, pub monitor_updates: Mutex>>, pub latest_monitor_update_id: Mutex>, pub chain_monitor: ChainMonitor< @@ -507,6 +509,7 @@ pub struct TestChainMonitor<'a> { &'a dyn SyncPersist, &'a TestKeysInterface, >, + pub persisted_claim_infos: Mutex>, pub keys_manager: &'a TestKeysInterface, /// If this is set to Some(), the next update_channel call (not watch_channel) must be a /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given @@ -523,6 +526,7 @@ impl<'a> TestChainMonitor<'a> { chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn SyncBroadcaster, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn SyncPersist, keys_manager: &'a TestKeysInterface, + offload_claim_info: bool, ) -> Self { Self { added_monitors: Mutex::new(Vec::new()), @@ -536,7 +540,9 @@ impl<'a> TestChainMonitor<'a> { persister, keys_manager, keys_manager.get_peer_storage_key(), + offload_claim_info, ), + persisted_claim_infos: Mutex::new(new_hash_map()), keys_manager, expect_channel_force_closed: Mutex::new(None), expect_monitor_round_trip_fail: Mutex::new(None), @@ -577,7 +583,7 @@ impl<'a> TestChainMonitor<'a> { .lock() .unwrap() .insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id())); - self.added_monitors.lock().unwrap().push((channel_id, monitor)); + self.added_monitors.lock().unwrap().push((channel_id, monitor, None)); self.chain_monitor.load_existing_monitor(channel_id, new_monitor) } @@ -611,7 +617,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { .lock() .unwrap() .insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id())); - self.added_monitors.lock().unwrap().push((channel_id, monitor)); + self.added_monitors.lock().unwrap().push((channel_id, monitor, None)); self.chain_monitor.watch_channel(channel_id, new_monitor) } @@ -666,9 +672,12 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { assert_eq!(chan_id, channel_id); assert!(new_monitor != *monitor); } else { - assert!(new_monitor == *monitor); + let expected_monitor = monitor.clone(); + // Compare without [`Event::PersistClaimInfo`] events since we don't persist them. + expected_monitor.get_and_clear_claim_info_events(); + assert!(new_monitor == expected_monitor); } - self.added_monitors.lock().unwrap().push((channel_id, new_monitor)); + self.added_monitors.lock().unwrap().push((channel_id, new_monitor, Some(update.clone()))); update_res }