lightning_signer/chain/
tracker.rs

1use alloc::collections::VecDeque;
2use alloc::sync::Arc;
3use bitcoin::absolute::LockTime;
4use core::cell::RefCell;
5use core::mem;
6#[cfg(feature = "timeless_workaround")]
7use core::time::Duration;
8
9use bitcoin::blockdata::block::Header as BlockHeader;
10use bitcoin::blockdata::constants::{genesis_block, DIFFCHANGE_INTERVAL};
11use bitcoin::consensus::{Decodable, Encodable};
12use bitcoin::hash_types::FilterHeader;
13use bitcoin::hashes::Hash;
14use bitcoin::secp256k1::PublicKey;
15use bitcoin::{BlockHash, Network, OutPoint, Target, Transaction, TxIn, TxOut, Txid};
16use vls_common::HexEncode;
17
18use crate::policy::validator::ValidatorFactory;
19#[allow(unused_imports)]
20use log::{debug, error, info, warn};
21use push_decoder::{BlockDecoder, Listener as PushListener};
22use serde_derive::{Deserialize, Serialize};
23use serde_with::{serde_as, IfIsHumanReadable};
24use txoo::get_latest_checkpoint;
25use txoo::proof::{ProofType, TxoProof};
26
27use crate::prelude::*;
28use crate::short_function;
29use crate::util::ser_util::{OutPointReversedDef, TxIdReversedDef};
30
31/// Error
32#[derive(Debug, PartialEq)]
33pub enum Error {
34    /// Chain progression is invalid (e.g. invalid difficulty change)
35    InvalidChain,
36    /// Previous blockhash of new block header doesn't match the current tip
37    OrphanBlock(String),
38    /// Block is invalid (e.g. block hash not under target)
39    InvalidBlock,
40    /// Block cannot be parsed
41    BlockDecodeError,
42    /// Reorg size greater than [`ChainTracker::MAX_REORG_SIZE`]
43    ReorgTooDeep,
44    /// The TXOO proof was incorrect
45    InvalidProof,
46}
47
48macro_rules! error_invalid_chain {
49    ($($arg:tt)*) => {{
50        error!("InvalidChain: {}", format!($($arg)*));
51        Error::InvalidChain
52    }};
53}
54
55macro_rules! error_orphan_block {
56    ($($arg:tt)*) => {{
57        let message = format!($($arg)*);
58        warn!("OrphanBlock: {}", message);
59        Error::OrphanBlock(message)
60    }};
61}
62
63macro_rules! error_invalid_block {
64    ($($arg:tt)*) => {{
65        error!("InvalidBlock: {}", format!($($arg)*));
66        Error::InvalidBlock
67    }};
68}
69
70macro_rules! error_invalid_proof {
71    ($($arg:tt)*) => {{
72        error!("InvalidProof: {}", format!($($arg)*));
73        Error::InvalidProof
74    }};
75}
76
77/// A listener entry
78#[serde_as]
79#[derive(Debug, Clone, Serialize, Deserialize)]
80pub struct ListenSlot {
81    /// watched transactions to be confirmed
82    #[serde_as(as = "IfIsHumanReadable<OrderedSet<TxIdReversedDef>>")]
83    pub txid_watches: OrderedSet<Txid>,
84    /// watched outpoints to be spent
85    #[serde_as(as = "IfIsHumanReadable<OrderedSet<OutPointReversedDef>>")]
86    pub watches: OrderedSet<OutPoint>,
87    /// outpoints we have already seen
88    #[serde_as(as = "IfIsHumanReadable<OrderedSet<OutPointReversedDef>>")]
89    pub seen: OrderedSet<OutPoint>,
90}
91
92/// Block headers, including the usual bitcoin block header
93/// and the filter header
94#[derive(Clone)]
95pub struct Headers(pub BlockHeader, pub FilterHeader);
96
97impl Encodable for Headers {
98    fn consensus_encode<S: crate::io::Write + ?Sized>(
99        &self,
100        s: &mut S,
101    ) -> Result<usize, crate::io::Error> {
102        let mut len = 0;
103        len += self.0.consensus_encode(s)?;
104        len += self.1.consensus_encode(s)?;
105        Ok(len)
106    }
107}
108
109impl Decodable for Headers {
110    fn consensus_decode<D: crate::io::Read + ?Sized>(
111        d: &mut D,
112    ) -> Result<Self, bitcoin::consensus::encode::Error> {
113        let header = BlockHeader::consensus_decode(d)?;
114        let filter_header = FilterHeader::consensus_decode(d)?;
115        Ok(Headers(header, filter_header))
116    }
117}
118
119// the decode state while we are receiving a block
120struct BlockDecodeState {
121    decoder: BlockDecoder,
122    offset: u32,
123    block_hash: BlockHash,
124}
125
126impl BlockDecodeState {
127    fn new(block_hash: BlockHash) -> Self {
128        BlockDecodeState { decoder: BlockDecoder::new(), offset: 0, block_hash }
129    }
130}
131
132/// Track chain, with basic validation
133pub struct ChainTracker<L: ChainListener> {
134    /// headers past the tip
135    pub headers: VecDeque<Headers>,
136    /// tip header
137    pub tip: Headers,
138    /// height
139    pub height: u32,
140    /// The network
141    pub network: Network,
142    /// listeners
143    pub listeners: OrderedMap<L::Key, (L, ListenSlot)>,
144    node_id: PublicKey,
145    validator_factory: Arc<dyn ValidatorFactory>,
146    // Block decoder, only while streaming a block is in progress
147    decode_state: Option<RefCell<BlockDecodeState>>,
148    /// public keys of trusted TXO oracle
149    pub trusted_oracle_pubkeys: Vec<PublicKey>,
150    allow_deep_reorgs: bool,
151}
152
153impl<L: ChainListener> ChainTracker<L> {
154    // # issue #187
155    #[cfg(feature = "tracker_size_workaround")]
156    /// Maximum reorg size that we will accept
157    pub const MAX_REORG_SIZE: usize = 16;
158    #[cfg(not(feature = "tracker_size_workaround"))]
159    /// Maximum reorg size that we will accept
160    pub const MAX_REORG_SIZE: usize = 100;
161
162    /// Create a new tracker
163    pub fn new(
164        network: Network,
165        height: u32,
166        tip: Headers,
167        node_id: PublicKey,
168        validator_factory: Arc<dyn ValidatorFactory>,
169        trusted_oracle_pubkeys: Vec<PublicKey>,
170    ) -> Result<Self, Error> {
171        let header = tip.0;
172        header
173            .validate_pow(header.target())
174            .map_err(|e| error_invalid_block!("validate pow {}: {}", header.target(), e))?;
175        let headers = VecDeque::new();
176        let listeners = OrderedMap::new();
177        Ok(ChainTracker {
178            headers,
179            tip,
180            height,
181            network,
182            listeners,
183            node_id,
184            validator_factory,
185            decode_state: None,
186            trusted_oracle_pubkeys,
187            allow_deep_reorgs: false,
188        })
189    }
190
191    /// Set whether deep reorgs are allowed
192    pub fn set_allow_deep_reorgs(&mut self, allow: bool) {
193        self.allow_deep_reorgs = allow;
194    }
195
196    /// Restore a tracker
197    pub fn restore(
198        headers: VecDeque<Headers>,
199        tip: Headers,
200        height: u32,
201        network: Network,
202        listeners: OrderedMap<L::Key, (L, ListenSlot)>,
203        node_id: PublicKey,
204        validator_factory: Arc<dyn ValidatorFactory>,
205        trusted_oracle_pubkeys: Vec<PublicKey>,
206    ) -> Self {
207        ChainTracker {
208            headers,
209            tip,
210            height,
211            network,
212            listeners,
213            node_id,
214            validator_factory,
215            decode_state: None,
216            trusted_oracle_pubkeys,
217            allow_deep_reorgs: false,
218        }
219    }
220
221    /// Create a tracker at genesis
222    pub fn from_genesis(
223        network: Network,
224        node_id: PublicKey,
225        validator_factory: Arc<dyn ValidatorFactory>,
226        trusted_oracle_pubkeys: Vec<PublicKey>,
227    ) -> Self {
228        let height = 0;
229        let genesis = genesis_block(network);
230        let filter_header = FilterHeader::all_zeros();
231        Self::from_checkpoint(
232            network,
233            node_id,
234            validator_factory,
235            &genesis.header,
236            &filter_header,
237            height,
238            trusted_oracle_pubkeys,
239        )
240    }
241
242    fn from_checkpoint(
243        network: Network,
244        node_id: PublicKey,
245        validator_factory: Arc<dyn ValidatorFactory>,
246        header: &BlockHeader,
247        filter_header: &FilterHeader,
248        height: u32,
249        trusted_oracle_pubkeys: Vec<PublicKey>,
250    ) -> Self {
251        let tip = Headers(*header, *filter_header);
252
253        Self::new(network, height, tip, node_id, validator_factory, trusted_oracle_pubkeys)
254            .expect("genesis block / checkpoint is expected to be valid")
255    }
256
257    /// Create a tracker for a network, from a checkpoint if exists or from genesis otherwise
258    pub fn for_network(
259        network: Network,
260        node_id: PublicKey,
261        validator_factory: Arc<dyn ValidatorFactory>,
262        trusted_oracle_pubkeys: Vec<PublicKey>,
263    ) -> Self {
264        if let Some((height, _hash, filter_header, header)) = get_latest_checkpoint(network) {
265            Self::from_checkpoint(
266                network,
267                node_id,
268                validator_factory,
269                &header,
270                &filter_header,
271                height,
272                trusted_oracle_pubkeys,
273            )
274        } else {
275            Self::from_genesis(network, node_id, validator_factory, trusted_oracle_pubkeys)
276        }
277    }
278
279    /// Current chain tip header
280    pub fn tip(&self) -> &Headers {
281        &self.tip
282    }
283
284    /// Headers past the tip
285    pub fn headers(&self) -> &VecDeque<Headers> {
286        &self.headers
287    }
288
289    /// Height of current chain tip
290    pub fn height(&self) -> u32 {
291        self.height
292    }
293
294    #[cfg(feature = "timeless_workaround")]
295    // WORKAROUND for #206, #339, #235 - If our implementation has no clock use the
296    // latest BlockHeader timestamp.
297    /// Header timestamp of current chain tip
298    pub fn tip_time(&self) -> Duration {
299        Duration::from_secs(if self.headers.is_empty() { 0 } else { self.headers[0].0.time as u64 })
300    }
301
302    /// Remove block at tip due to reorg
303    ///
304    /// The previous block and filter headers are provided in case the reorg
305    /// is too deep for our local memory of headers.  However, this should
306    /// only be used on testnet, since a deep reorg may be incompatible
307    /// with the Lightning security model.
308    pub fn remove_block(
309        &mut self,
310        proof: TxoProof,
311        supplied_prev_headers: Headers,
312    ) -> Result<BlockHeader, Error> {
313        // there are four block hashes in play here:
314        // - the block hash in the BlockChunk messages
315        // - our idea of the tip's block hash (`tip_block_hash`)
316        // - the hash of the block that was actually streamed (`external_block_hash`)
317        // - the hash in the proof
318        //
319        // we need to make sure they are all the same:
320        // - `maybe_finish_decoding_block` here checks 1 vs 2
321        // - `ChainTrackerPushListener` checks 1 vs 3
322        // - `validate_block` checks 2 vs 4
323
324        if self.headers.is_empty() {
325            if self.allow_deep_reorgs {
326                warn!("reorg too deep, but allowed by flag");
327            } else {
328                return Err(Error::ReorgTooDeep);
329            }
330        }
331
332        // If we have headers (i.e. not a deep reorg), check the prev header
333        // matches what we were given as an argument and pop it off.  Otherwise,
334        // we assume the prev header is correct and use it.
335        if !self.headers.is_empty() {
336            if supplied_prev_headers.0 != self.headers[0].0 {
337                return Err(error_invalid_chain!(
338                    "supplied prev block header {:?} != self.headers {:?}",
339                    supplied_prev_headers.0,
340                    self.headers[0].0
341                ));
342            }
343            if supplied_prev_headers.1 != self.headers[0].1 {
344                return Err(error_invalid_chain!(
345                    "supplied prev filter header {} != self.headers[0].1 {} for prev block hash {}",
346                    supplied_prev_headers.1.to_string(),
347                    self.headers[0].1.to_string(),
348                    supplied_prev_headers.0.block_hash().to_string()
349                ));
350            }
351            self.headers.pop_front();
352        };
353
354        let mut prev_headers = supplied_prev_headers;
355
356        let tip_block_hash = prev_headers.0.block_hash();
357        self.maybe_finish_decoding_block(&proof, &tip_block_hash)?;
358
359        // we assume here that the external block hash and the tip block hash are the same
360        // this is actually validated below in notify_listeners_remove
361        let expected_external_block_hash =
362            if proof.proof.is_external() { Some(&tip_block_hash) } else { None };
363        self.validate_block(
364            self.height - 1,
365            expected_external_block_hash,
366            &prev_headers,
367            &self.tip,
368            &proof,
369            true,
370        )?;
371        match proof.proof {
372            ProofType::Filter(_, spv_proof) =>
373                self.notify_listeners_remove(Some(spv_proof.txs.as_slice()), tip_block_hash),
374            ProofType::Block(b) =>
375                return Err(error_invalid_proof!(
376                    "non-streamed block not supported {}",
377                    b.block_hash()
378                )),
379            ProofType::ExternalBlock() => self.notify_listeners_remove(None, tip_block_hash),
380        };
381
382        info!("removed block {}: {}", self.height, &self.tip.0.block_hash());
383        mem::swap(&mut self.tip, &mut prev_headers);
384        self.height -= 1;
385        Ok(prev_headers.0)
386    }
387
388    /// Restore a listener
389    pub fn restore_listener(&mut self, outpoint: L::Key, listener: L, slot: ListenSlot) {
390        self.listeners.insert(outpoint, (listener, slot));
391    }
392
393    // Notify listeners of a block remove.
394    // If txs is None, this is a streamed block, and the transactions were already
395    // provided as push events.
396    fn notify_listeners_remove(&mut self, txs: Option<&[Transaction]>, block_hash: BlockHash) {
397        for (listener, slot) in self.listeners.values_mut() {
398            let (adds, removes) = if let Some(txs) = txs {
399                listener.on_remove_block(txs, &block_hash)
400            } else {
401                listener.on_remove_streamed_block_end(&block_hash)
402            };
403
404            debug!("{}: REVERT adding {:?}, removing {:?}", short_function!(), adds, removes);
405
406            // these are going to be re-added to the watches,
407            // so we need to remove them from the seen set
408            for outpoint in removes.iter() {
409                slot.seen.remove(outpoint);
410            }
411
412            // revert what we did to the watches in the forward direction
413            slot.watches.extend(removes);
414            // remove after adding, in case there were intra-block spends
415            for outpoint in adds.iter() {
416                slot.watches.remove(outpoint);
417            }
418        }
419    }
420
421    /// Handle a streamed block
422    pub fn block_chunk(&mut self, hash: BlockHash, offset: u32, chunk: &[u8]) -> Result<(), Error> {
423        if offset == 0 {
424            assert!(self.decode_state.is_none(), "already decoding, and got chunk at offset 0");
425            self.decode_state = Some(RefCell::new(BlockDecodeState::new(hash)));
426        }
427
428        // we jump through some hoops here to prevent the borrow checker from complaining
429        if let Some(decode_state_cell) = self.decode_state.as_ref() {
430            let mut decode_state = decode_state_cell.borrow_mut();
431            assert_eq!(
432                decode_state.block_hash, hash,
433                "got chunk for wrong block {} != {}",
434                hash, decode_state.block_hash
435            );
436            assert_eq!(
437                decode_state.offset, offset,
438                "got chunk for wrong offset {} != {}",
439                offset, decode_state.offset
440            );
441            let decoder = &mut decode_state.decoder;
442            let mut listener = ChainTrackerPushListener(self, hash);
443            decoder.decode_next(chunk, &mut listener).expect("decode failure");
444            decode_state.offset += chunk.len() as u32;
445        } else {
446            panic!("got chunk at offset {} without decoder", offset);
447        }
448        Ok(())
449    }
450
451    /// Add a block, which becomes the new tip
452    pub fn add_block(&mut self, header: BlockHeader, proof: TxoProof) -> Result<(), Error> {
453        // there are four block hashes in play here:
454        // - the block hash in the BlockChunk messages
455        // - the block hash of the AddBlock message's header (`message_block_hash`)
456        // - the hash of the block that was actually streamed (`external_block_hash`)
457        // - the hash in the proof
458        //
459        // we need to make sure they are all the same:
460        // - `maybe_finish_decoding_block` here checks 1 vs 2
461        // - `ChainTrackerPushListener` checks 1 vs 3
462        // - `validate_block` checks 2 vs 4
463
464        let message_block_hash = header.block_hash();
465        self.maybe_finish_decoding_block(&proof, &message_block_hash)?;
466
467        let filter_header = proof.filter_header();
468        let headers = Headers(header, filter_header);
469
470        // we assume here that the external block hash and the message block hash are the same
471        // this is actually validated below in notify_listeners_remove
472        let expected_external_block_hash =
473            if proof.proof.is_external() { Some(&message_block_hash) } else { None };
474        self.validate_block(
475            self.height,
476            expected_external_block_hash,
477            &self.tip,
478            &headers,
479            &proof,
480            false,
481        )?;
482        match proof.proof {
483            ProofType::Filter(_, spv_proof) =>
484                self.notify_listeners_add(Some(spv_proof.txs.as_slice()), message_block_hash),
485            ProofType::Block(b) =>
486                return Err(error_invalid_proof!(
487                    "non-streamed block not supported {}",
488                    b.block_hash()
489                )),
490            ProofType::ExternalBlock() => self.notify_listeners_add(None, message_block_hash),
491        };
492
493        self.headers.truncate(Self::MAX_REORG_SIZE - 1);
494        self.headers.push_front(self.tip.clone());
495        self.tip = Headers(header, filter_header);
496        self.height += 1;
497        info!("added block {}: {}", self.height, &self.tip.0.block_hash());
498        Ok(())
499    }
500
501    // if we're decoding a block, tell the decoder we are done.
502    // will panic if the proof is external and we are not decoding or vice versa.
503    fn maybe_finish_decoding_block(
504        &mut self,
505        proof: &TxoProof,
506        expected_block_hash: &BlockHash,
507    ) -> Result<(), Error> {
508        assert_eq!(
509            proof.proof.is_external(),
510            self.decode_state.is_some(),
511            "is_external != decode_state"
512        );
513        if let Some(decode_state_cell) = self.decode_state.take() {
514            let decode_state = decode_state_cell.into_inner();
515            decode_state.decoder.finish().map_err(|e| {
516                error!("block decode error: {:?}", e);
517                Error::BlockDecodeError
518            })?;
519            if decode_state.block_hash != *expected_block_hash {
520                error!(
521                    "Block hash mismatch: expected {}, decoded {}",
522                    expected_block_hash, decode_state.block_hash
523                );
524                return Err(Error::BlockDecodeError);
525            }
526        }
527        Ok(())
528    }
529
530    // Notify listeners of a block add.
531    // If txs is None, this is a streamed block, and the transactions were already
532    // provided as push events.
533    fn notify_listeners_add(&mut self, txs: Option<&[Transaction]>, block_hash: BlockHash) {
534        for (listener, slot) in self.listeners.values_mut() {
535            let (adds, removes) = if let Some(txs) = txs {
536                listener.on_add_block(txs, &block_hash)
537            } else {
538                listener.on_add_streamed_block_end(&block_hash)
539            };
540            debug!("{}: adding {:?}, removing {:?}", short_function!(), adds, removes);
541
542            slot.watches.extend(adds);
543            // remove after adding, in case there were intra-block spends
544            for outpoint in removes.iter() {
545                slot.watches.remove(outpoint);
546            }
547
548            // keep track of what we removed, so we can watch reorgs for it
549            slot.seen.extend(removes);
550        }
551    }
552
553    /// Add a listener and initialize the watched outpoint set
554    pub fn add_listener(&mut self, listener: L, initial_txid_watches: OrderedSet<Txid>) {
555        let slot = ListenSlot {
556            txid_watches: initial_txid_watches,
557            watches: OrderedSet::new(),
558            seen: OrderedSet::new(),
559        };
560        debug!("{}: adding listener with txid watches {:?}", short_function!(), slot.txid_watches);
561        self.listeners.insert(listener.key().clone(), (listener, slot));
562    }
563
564    /// Remove a listener
565    pub fn remove_listener(&mut self, key: &L::Key) {
566        debug!("{}: removing listener", short_function!());
567        self.listeners.remove(&key);
568    }
569
570    /// Add more watches to a listener
571    pub fn add_listener_watches(&mut self, key: &L::Key, watches: OrderedSet<OutPoint>) {
572        let (_, slot) =
573            self.listeners.get_mut(key).expect("trying to add watches to non-existent listener");
574        debug!("{}: adding watches {:?}", short_function!(), watches);
575        slot.watches.extend(watches);
576    }
577
578    /// Return all Txid and OutPoint watches for future blocks.
579    pub fn get_all_forward_watches(&self) -> (Vec<Txid>, Vec<OutPoint>) {
580        self.get_all_watches(false)
581    }
582
583    /// Return all Txid and OutPoint watches for removing blocks.
584    /// This is a superset of the forward watches, and also includes
585    /// watches for outpoints which were seen as spent in previous blocks.
586    pub fn get_all_reverse_watches(&self) -> (Vec<Txid>, Vec<OutPoint>) {
587        self.get_all_watches(true)
588    }
589
590    fn get_all_watches(&self, include_reverse: bool) -> (Vec<Txid>, Vec<OutPoint>) {
591        let mut txid_watches = OrderedSet::new();
592        let mut outpoint_watches = OrderedSet::new();
593        for (_, slot) in self.listeners.values() {
594            txid_watches.extend(&slot.txid_watches);
595            outpoint_watches.extend(&slot.watches);
596            if include_reverse {
597                outpoint_watches.extend(&slot.seen);
598            }
599        }
600        (txid_watches.into_iter().collect(), outpoint_watches.into_iter().collect())
601    }
602
603    fn validate_block(
604        &self,
605        height: u32,
606        external_block_hash: Option<&BlockHash>,
607        prev_headers: &Headers,
608        headers: &Headers,
609        proof: &TxoProof,
610        is_remove: bool,
611    ) -> Result<(), Error> {
612        let header = &headers.0;
613        let prev_header = &prev_headers.0;
614        // Check hash is correctly chained
615        if header.prev_blockhash != prev_header.block_hash() {
616            return Err(error_orphan_block!(
617                "header.prev_blockhash {} != self.tip.block_hash {}",
618                header.prev_blockhash.to_hex(),
619                prev_header.block_hash().to_hex()
620            ));
621        }
622        // Ensure correctly mined (hash is under target)
623        header.validate_pow(header.target()).map_err(|_| Error::InvalidBlock)?;
624        if self.network == Network::Testnet
625            && header.target() == max_target(self.network)
626            && header.time > prev_header.time + 60 * 20
627        {
628            // special case for Testnet - 20 minute rule
629        } else if (height + 1) % DIFFCHANGE_INTERVAL == 0 {
630            let prev_target = prev_header.target();
631            let target = header.target();
632            let network = self.network;
633            validate_retarget(prev_target, target, network)?;
634        } else {
635            if header.bits != prev_header.bits && self.network != Network::Testnet {
636                return Err(error_invalid_chain!(
637                    "header.bits {} != self.tip.bits {}",
638                    header.bits.to_consensus(),
639                    prev_header.bits.to_consensus()
640                ));
641            }
642        }
643
644        let (_, outpoint_watches) =
645            if is_remove { self.get_all_reverse_watches() } else { self.get_all_forward_watches() };
646
647        let validator = self.validator_factory.make_validator(self.network, self.node_id, None);
648        let prev_filter_header = &prev_headers.1;
649
650        if prev_filter_header.to_byte_array().iter().all(|x| *x == 0) {
651            // This allows us to upgrade old signers that didn't have filter headers.
652            // It is safe, because it's vanishingly unlikely that the filter header is
653            // all zeros, so the only way this can be triggered is if the filter header
654            // was missing on restore.
655            log::warn!("bypassing filter validation because prev_filter_header is all zeroes");
656        } else {
657            validator
658                .validate_block(
659                    proof,
660                    height + 1,
661                    header,
662                    external_block_hash,
663                    prev_filter_header,
664                    &outpoint_watches,
665                    &self.trusted_oracle_pubkeys,
666                )
667                .map_err(|e| error_invalid_proof!("{:?}", e))?;
668        }
669        Ok(())
670    }
671}
672
673fn validate_retarget(prev_target: Target, target: Target, network: Network) -> Result<(), Error> {
674    // TODO(511) do actual retargeting with timestamps, requires remembering start timestamp
675
676    // Round trip the target bounds, to simulate the way bitcoind checks them
677    fn round_trip_target(target: Target) -> Target {
678        Target::from_compact(target.to_compact_lossy())
679    }
680
681    let min = round_trip_target(prev_target.min_difficulty_transition_threshold());
682    let max = round_trip_target(prev_target.max_difficulty_transition_threshold());
683    let chain_max = max_target(network);
684
685    if target.gt(&chain_max) {
686        return Err(error_invalid_block!("target {:x} > chain_max {:x}", target, chain_max));
687    }
688    if target.lt(&min) {
689        return Err(error_invalid_chain!(
690            "bad transition {:x} -> target {:x} < min {:x}",
691            prev_target,
692            target,
693            min
694        ));
695    }
696    if target.gt(&max) {
697        return Err(error_invalid_chain!(
698            "bad transition {:x} -> target {:x} > max {:x}",
699            prev_target,
700            target,
701            max
702        ));
703    }
704    Ok(())
705}
706
707// work around unecessary mutable borrow tripping us up
708struct ChainTrackerPushListener<'a, L: ChainListener>(&'a ChainTracker<L>, BlockHash);
709
710impl<'a, L: ChainListener> ChainTrackerPushListener<'a, L> {
711    // broadcast push events to all listeners
712    fn do_push<F: FnMut(&mut dyn PushListener)>(&mut self, mut f: F) {
713        for (listener, _) in self.0.listeners.values() {
714            listener.on_push(&mut f)
715        }
716    }
717}
718
719impl<'a, L: ChainListener> PushListener for ChainTrackerPushListener<'a, L> {
720    fn on_block_start(&mut self, header: &BlockHeader) {
721        // ensure that the block hash in the BlockChunk message
722        // matches the streamed block header
723        assert_eq!(
724            header.block_hash(),
725            self.1,
726            "streamed block hash does not match header {} != {}",
727            header.block_hash(),
728            self.1
729        );
730        self.do_push(|pl| pl.on_block_start(header));
731    }
732
733    fn on_block_end(&mut self) {
734        self.do_push(|pl| pl.on_block_end());
735    }
736
737    fn on_transaction_start(&mut self, version: i32) {
738        self.do_push(|pl| pl.on_transaction_start(version));
739    }
740
741    fn on_transaction_end(&mut self, locktime: LockTime, txid: Txid) {
742        self.do_push(|pl| pl.on_transaction_end(locktime, txid));
743    }
744
745    fn on_transaction_input(&mut self, txin: &TxIn) {
746        self.do_push(|pl| pl.on_transaction_input(txin));
747    }
748
749    fn on_transaction_output(&mut self, txout: &TxOut) {
750        self.do_push(|pl| pl.on_transaction_output(txout));
751    }
752}
753
754/// Listen to chain events
755pub trait ChainListener: SendSync {
756    /// The key type
757    type Key: Ord + Clone;
758
759    /// The key
760    fn key(&self) -> &Self::Key;
761
762    /// A block was added via a compact proof.
763    /// The listener returns outpoints to watch in the future, and outpoints to stop watching.
764    fn on_add_block(
765        &self,
766        txs: &[Transaction],
767        block_hash: &BlockHash,
768    ) -> (Vec<OutPoint>, Vec<OutPoint>);
769
770    /// A block was added via streaming (see `on_block_chunk`).
771    /// The listener returns outpoints to watch in the future, and outpoints to stop watching.
772    /// The decoded block hash is also returned.
773    fn on_add_streamed_block_end(&self, block_hash: &BlockHash) -> (Vec<OutPoint>, Vec<OutPoint>);
774
775    /// A block was deleted via a compact proof.
776    /// The listener returns the same thing as on_add_block, so that the changes can be reverted.
777    /// The decoded block hash is also returned.
778    fn on_remove_block(
779        &self,
780        txs: &[Transaction],
781        block_hash: &BlockHash,
782    ) -> (Vec<OutPoint>, Vec<OutPoint>);
783
784    /// A block was deleted via streaming (see `on_block_chunk`).
785    /// The listener returns the same thing as on_add_block, so that the changes can be reverted.
786    fn on_remove_streamed_block_end(
787        &self,
788        block_hash: &BlockHash,
789    ) -> (Vec<OutPoint>, Vec<OutPoint>);
790
791    /// Get the block push decoder listener
792    fn on_push<F>(&self, f: F)
793    where
794        F: FnOnce(&mut dyn PushListener);
795}
796
797/// Convert the Network to a max target value for each network.
798pub fn max_target(network: Network) -> Target {
799    let upper = match network {
800        Network::Regtest => 0x7fffffu128 << (128 - 24),
801        Network::Testnet => 0xffffu128 << (128 - 48),
802        Network::Bitcoin => 0xffffu128 << (128 - 48),
803        _ => unreachable!(),
804    };
805    let mut bytes = [0u8; 32];
806    bytes[0..16].copy_from_slice(&upper.to_be_bytes());
807    Target::from_be_bytes(bytes)
808}
809
810#[cfg(test)]
811mod tests {
812    use crate::util::test_utils::*;
813    use bitcoin::blockdata::constants::genesis_block;
814    use bitcoin::consensus::serialize;
815    use bitcoin::hash_types::TxMerkleNode;
816    use bitcoin::hashes::Hash;
817    use bitcoin::key::Secp256k1;
818    use bitcoin::network::constants::Network;
819    use bitcoin::secp256k1::SecretKey;
820    use bitcoin::string::FromHexStr;
821    use bitcoin::{merkle_tree, Block, CompactTarget, TxIn};
822    use bitcoin::{Sequence, Witness};
823    use bitcoind_client::dummy::DummyTxooSource;
824    use core::iter::FromIterator;
825    use test_log::test;
826
827    use super::*;
828
829    use crate::util::mocks::MockValidatorFactory;
830
831    use txoo::source::Source;
832
833    #[tokio::test]
834    async fn test_add_valid_proof() -> Result<(), Error> {
835        let source = make_source().await;
836        let (mut tracker, _) = make_tracker()?;
837        assert_eq!(tracker.height(), 0);
838
839        let public_key = source.oracle_setup().await.public_key;
840        tracker.trusted_oracle_pubkeys = vec![public_key];
841
842        add_block(&mut tracker, &source, &[]).await?;
843        add_block(&mut tracker, &source, &[]).await?;
844        assert_eq!(tracker.height(), 2);
845        Ok(())
846    }
847
848    #[tokio::test]
849    async fn test_add_invalid_proof() -> Result<(), Error> {
850        let source = make_source().await;
851        let (mut tracker, _) = make_tracker()?;
852        assert_eq!(tracker.height(), 0);
853
854        let random_secret = [0x11; 32];
855        let public_key = get_txoo_public_key(&random_secret);
856        tracker.trusted_oracle_pubkeys = vec![public_key];
857
858        add_block(&mut tracker, &source, &[]).await?;
859        let result = add_block(&mut tracker, &source, &[]).await;
860        assert!(result.is_err());
861
862        Ok(())
863    }
864
865    #[tokio::test]
866    async fn test_add_remove() -> Result<(), Error> {
867        let genesis = genesis_block(Network::Regtest);
868        let source = make_source().await;
869        let (mut tracker, _) = make_tracker()?;
870        let header0 = tracker.tip().0.clone();
871        assert_eq!(tracker.height(), 0);
872        let header1 = add_block(&mut tracker, &source, &[]).await?;
873        assert_eq!(tracker.height(), 1);
874
875        // difficulty can't change within the retarget period
876        let bad_bits = header1.bits.to_consensus() - 1;
877        let header_bad_bits = mine_header_with_bits(
878            tracker.tip.0.block_hash(),
879            TxMerkleNode::all_zeros(),
880            CompactTarget::from_consensus(bad_bits),
881        );
882        let dummy_proof =
883            TxoProof::prove_unchecked(&genesis, &FilterHeader::all_zeros(), tracker.height() + 1);
884        assert_eq!(
885            tracker.add_block(header_bad_bits, dummy_proof).err(),
886            Some(Error::InvalidChain)
887        );
888
889        let header_removed = remove_block(&mut tracker, &source, &[], &header0).await?;
890        assert_eq!(header1, header_removed);
891
892        // can't go back before the first block that the tracker saw
893        let (_, filter_header) = source.get(0, &genesis).await.unwrap();
894        let proof = TxoProof::prove_unchecked(&genesis, &filter_header, 0);
895
896        let prev_headers = Headers(header0, FilterHeader::all_zeros());
897        assert_eq!(tracker.remove_block(proof, prev_headers).err(), Some(Error::ReorgTooDeep));
898        Ok(())
899    }
900
901    #[tokio::test]
902    async fn test_listeners() -> Result<(), Error> {
903        let source = make_source().await;
904        let (mut tracker, validator_factory) = make_tracker()?;
905
906        let header1 = add_block(&mut tracker, &source, &[]).await?;
907
908        let tx = make_tx(vec![make_txin(1)]);
909        let initial_watch = make_outpoint(1);
910        let second_watch = OutPoint::new(tx.txid(), 0);
911        let listener = MockListener::new(initial_watch);
912
913        tracker.add_listener(listener.clone(), OrderedSet::new());
914
915        tracker.add_listener_watches(&initial_watch, OrderedSet::from_iter(vec![initial_watch]));
916
917        assert_eq!(tracker.listeners.len(), 1);
918        assert_eq!(
919            tracker.listeners.get(listener.key()).unwrap().1.watches,
920            OrderedSet::from_iter(vec![initial_watch])
921        );
922
923        let header2 = add_block(&mut tracker, &source, &[tx.clone()]).await?;
924
925        assert_eq!(
926            tracker.listeners.get(listener.key()).unwrap().1.watches,
927            OrderedSet::from_iter(vec![second_watch])
928        );
929
930        let tx2 = make_tx(vec![TxIn {
931            previous_output: second_watch,
932            script_sig: Default::default(),
933            sequence: Sequence::ZERO,
934            witness: Witness::default(),
935        }]);
936
937        let _header3 = add_block(&mut tracker, &source, &[tx2.clone()]).await?;
938
939        assert_eq!(tracker.listeners.get(listener.key()).unwrap().1.watches, OrderedSet::new());
940
941        // validation included forward watches
942        assert_eq!(
943            *validator_factory.validator().last_validated_watches.lock().unwrap(),
944            vec![second_watch]
945        );
946
947        remove_block(&mut tracker, &source, &[tx2], &header2).await?;
948
949        assert_eq!(
950            tracker.listeners.get(listener.key()).unwrap().1.watches,
951            OrderedSet::from_iter(vec![second_watch])
952        );
953
954        // validation should have included reverse watches
955        assert_eq!(
956            *validator_factory.validator().last_validated_watches.lock().unwrap(),
957            vec![initial_watch, second_watch]
958        );
959
960        remove_block(&mut tracker, &source, &[tx], &header1).await?;
961
962        assert_eq!(
963            tracker.listeners.get(listener.key()).unwrap().1.watches,
964            OrderedSet::from_iter(vec![initial_watch])
965        );
966
967        // validation should still include reverse watches, because those are
968        // currently not pruned
969        assert_eq!(
970            *validator_factory.validator().last_validated_watches.lock().unwrap(),
971            vec![initial_watch, second_watch]
972        );
973
974        Ok(())
975    }
976
977    #[tokio::test]
978    async fn test_streamed() -> Result<(), Error> {
979        let source = make_source().await;
980        let (mut tracker, _validator_factory) = make_tracker()?;
981
982        let _header1 = add_block(&mut tracker, &source, &[]).await?;
983
984        let tx = make_tx(vec![make_txin(1)]);
985        let initial_watch = make_outpoint(1);
986        let second_watch = OutPoint::new(tx.txid(), 0);
987        let listener = MockListener::new(initial_watch);
988
989        tracker.add_listener(listener.clone(), OrderedSet::new());
990
991        tracker.add_listener_watches(&initial_watch, OrderedSet::from_iter(vec![initial_watch]));
992
993        assert_eq!(tracker.listeners.len(), 1);
994        assert_eq!(
995            tracker.listeners.get(listener.key()).unwrap().1.watches,
996            OrderedSet::from_iter(vec![initial_watch])
997        );
998
999        let _header2 = add_streamed_block(&mut tracker, &source, &[tx.clone()]).await?;
1000
1001        assert_eq!(
1002            tracker.listeners.get(listener.key()).unwrap().1.watches,
1003            OrderedSet::from_iter(vec![second_watch])
1004        );
1005
1006        Ok(())
1007    }
1008
1009    #[tokio::test]
1010    async fn test_retarget() -> Result<(), Error> {
1011        let source = make_source().await;
1012        let (mut tracker, _) = make_tracker()?;
1013        for _ in 1..DIFFCHANGE_INTERVAL {
1014            add_block(&mut tracker, &source, &[]).await?;
1015        }
1016        assert_eq!(tracker.height, DIFFCHANGE_INTERVAL - 1);
1017
1018        let target = tracker.tip().0.target();
1019
1020        // Decrease difficulty by 2 fails because of chain max
1021        let new_bits = shift_target(target, 1).to_compact_lossy();
1022
1023        assert_eq!(
1024            add_block_with_bits(&mut tracker, &source, new_bits, false).await.err(),
1025            Some(Error::InvalidBlock)
1026        );
1027
1028        // Increase difficulty by 8 fails because of max retarget
1029        let new_bits = shift_target(target, -3).to_compact_lossy();
1030
1031        assert_eq!(
1032            add_block_with_bits(&mut tracker, &source, new_bits, false).await.err(),
1033            Some(Error::InvalidChain)
1034        );
1035
1036        // Increase difficulty by 2
1037        let new_bits = shift_target(target, -1).to_compact_lossy();
1038        add_block_with_bits(&mut tracker, &source, new_bits, true).await?;
1039        Ok(())
1040    }
1041
1042    fn shift_target(target: Target, shift: i8) -> Target {
1043        let mut target_bytes = target.to_be_bytes();
1044        let mut upper = u128::from_be_bytes(target_bytes[0..16].try_into().unwrap());
1045        if shift > 0 {
1046            upper <<= shift as u128;
1047        } else {
1048            upper >>= -shift as u128;
1049        }
1050        target_bytes[0..16].copy_from_slice(&upper.to_be_bytes());
1051        Target::from_be_bytes(target_bytes)
1052    }
1053
1054    #[test]
1055    fn test_retarget_rounding() -> Result<(), Error> {
1056        validate_retarget(
1057            Target::from_compact(CompactTarget::from_hex_str("0x1c063051").unwrap()),
1058            Target::from_compact(CompactTarget::from_hex_str("0x1c018c14").unwrap()),
1059            Network::Testnet,
1060        )?;
1061        Ok(())
1062    }
1063
1064    fn make_tracker() -> Result<(ChainTracker<MockListener>, Arc<MockValidatorFactory>), Error> {
1065        let genesis = genesis_block(Network::Regtest);
1066        let validator_factory = Arc::new(MockValidatorFactory::new());
1067        let (node_id, _, _) = make_node();
1068        let tip = Headers(genesis.header, FilterHeader::all_zeros());
1069        let tracker = ChainTracker::new(
1070            Network::Regtest,
1071            0,
1072            tip,
1073            node_id,
1074            validator_factory.clone(),
1075            vec![],
1076        )?;
1077        Ok((tracker, validator_factory))
1078    }
1079
1080    async fn make_source() -> DummyTxooSource {
1081        let source = DummyTxooSource::new();
1082        source.on_new_block(0, &genesis_block(Network::Regtest)).await;
1083        source
1084    }
1085
1086    // returns the new block's header
1087    async fn add_block(
1088        tracker: &mut ChainTracker<MockListener>,
1089        source: &DummyTxooSource,
1090        txs: &[Transaction],
1091    ) -> Result<BlockHeader, Error> {
1092        let txs = txs_with_coinbase(txs);
1093
1094        let block = make_block(tracker.tip().0, txs);
1095        let height = tracker.height() + 1;
1096        source.on_new_block(height, &block).await;
1097        let (attestation, filter_header) = source.get(height, &block).await.unwrap();
1098        let pubkey = source.oracle_setup().await.public_key;
1099        let txid_watches: Vec<_> = block.txdata.iter().map(|tx| tx.txid()).collect();
1100        let proof = TxoProof::prove(
1101            vec![(pubkey, attestation)],
1102            &filter_header,
1103            &block,
1104            height,
1105            &[],
1106            &txid_watches,
1107        );
1108
1109        tracker.add_block(block.header.clone(), proof)?;
1110        Ok(block.header)
1111    }
1112
1113    // returns the new block's header
1114    async fn add_streamed_block(
1115        tracker: &mut ChainTracker<MockListener>,
1116        source: &DummyTxooSource,
1117        txs: &[Transaction],
1118    ) -> Result<BlockHeader, Error> {
1119        let txs = txs_with_coinbase(txs);
1120
1121        let block = make_block(tracker.tip().0, txs);
1122        let height = tracker.height() + 1;
1123        source.on_new_block(height, &block).await;
1124        let (attestation, filter_header) = source.get(height, &block).await.unwrap();
1125        let pubkey = source.oracle_setup().await.public_key;
1126        let txid_watches: Vec<_> = block.txdata.iter().map(|tx| tx.txid()).collect();
1127        let proof = TxoProof::prove(
1128            vec![(pubkey, attestation)],
1129            &filter_header,
1130            &block,
1131            height,
1132            &[],
1133            &txid_watches,
1134        );
1135
1136        let proof =
1137            TxoProof { attestations: proof.attestations, proof: ProofType::ExternalBlock() };
1138
1139        let bytes = serialize(&block);
1140        tracker.block_chunk(block.block_hash(), 0, &bytes)?;
1141        tracker.add_block(block.header.clone(), proof)?;
1142        Ok(block.header)
1143    }
1144
1145    // returns the new block's header
1146    async fn add_block_with_bits(
1147        tracker: &mut ChainTracker<MockListener>,
1148        source: &DummyTxooSource,
1149        bits: CompactTarget,
1150        do_add: bool,
1151    ) -> Result<BlockHeader, Error> {
1152        let txs = txs_with_coinbase(&[]);
1153        let txids: Vec<Txid> = txs.iter().map(|tx| tx.txid()).collect();
1154
1155        let merkle_root = merkle_tree::calculate_root(txids.into_iter()).unwrap();
1156        let merkle_root_node = TxMerkleNode::from_raw_hash(merkle_root.into());
1157        let header = mine_header_with_bits(tracker.tip().0.block_hash(), merkle_root_node, bits);
1158
1159        let txids: Vec<Txid> = txs.iter().map(|tx| tx.txid()).collect();
1160        let block = Block { header, txdata: txs };
1161        let height = tracker.height() + 1;
1162
1163        let proof: TxoProof;
1164        if do_add {
1165            source.on_new_block(height, &block).await;
1166            let public_key = source.oracle_setup().await.public_key;
1167            let (attestation, filter_header) = source.get(height, &block).await.unwrap();
1168            proof = TxoProof::prove(
1169                vec![(public_key, attestation)],
1170                &filter_header,
1171                &block,
1172                height,
1173                &vec![],
1174                &txids,
1175            );
1176        } else {
1177            let filter_header = FilterHeader::all_zeros();
1178            proof = TxoProof::prove_unchecked(&block, &filter_header, height);
1179        }
1180
1181        tracker.add_block(block.header.clone(), proof)?;
1182        Ok(block.header)
1183    }
1184
1185    // returns the removed block's header
1186    async fn remove_block(
1187        tracker: &mut ChainTracker<MockListener>,
1188        source: &DummyTxooSource,
1189        txs: &[Transaction],
1190        prev_header: &BlockHeader,
1191    ) -> Result<BlockHeader, Error> {
1192        let txs = txs_with_coinbase(txs);
1193        let block = make_block(*prev_header, txs);
1194        let height = tracker.height();
1195        let (attestation, filter_header) = source.get(height, &block).await.unwrap();
1196        let pubkey = source.oracle_setup().await.public_key;
1197        let txid_watches: Vec<_> = block.txdata.iter().map(|tx| tx.txid()).collect();
1198        let proof = TxoProof::prove(
1199            vec![(pubkey, attestation)],
1200            &filter_header,
1201            &block,
1202            height,
1203            &[],
1204            &txid_watches,
1205        );
1206
1207        let prev_filter_header = tracker.headers[0].1;
1208        let prev_headers = Headers(*prev_header, prev_filter_header);
1209        let removed_header = tracker.remove_block(proof, prev_headers)?;
1210        Ok(removed_header)
1211    }
1212
1213    fn txs_with_coinbase(txs: &[Transaction]) -> Vec<Transaction> {
1214        let mut txs = txs.to_vec();
1215        txs.insert(
1216            0,
1217            Transaction {
1218                version: 0,
1219                lock_time: LockTime::ZERO,
1220                input: vec![],
1221                output: vec![Default::default()],
1222            },
1223        );
1224        txs
1225    }
1226
1227    fn get_txoo_public_key(secret_key: &[u8]) -> PublicKey {
1228        let secp = Secp256k1::new();
1229        let secret_key = SecretKey::from_slice(&secret_key).expect("32 bytes, within curve order");
1230        PublicKey::from_secret_key(&secp, &secret_key)
1231    }
1232}