1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
crate::ix!();

impl PeerManager {

    pub fn process_getblockstxn_message(
        self:               Arc<Self>, 
        peer:               &Option<Peer>,
        pfrom:              &mut AmoWriteGuard<Box<dyn NodeInterface>>,
        msg_type:           &str,
        recv:               &mut DataStream,
        time_received:      &OffsetDateTime /* micros */,
        interrupt_msg_proc: &AtomicBool)  {

        let mut req = BlockTransactionsRequest::default();

        recv.stream_into(&mut req);

        let mut recent_block: Amo<Block> = Amo::<Block>::none();

        {
            let mut guard = CS_MOST_RECENT_BLOCK.get();

            if *MOST_RECENT_BLOCK_HASH.get() == req.blockhash {
                recent_block = MOST_RECENT_BLOCK.clone();
            }

            // Unlock cs_most_recent_block to
            // avoid CS_MAIN lock inversion
        }

        if recent_block.is_some() {

            self.send_block_transactions(
                pfrom, 
                &recent_block.get(), 
                &req
            );

            return;
        }

        {
            let mut guard = CS_MAIN.lock();

            let pindex: Option<Arc<BlockIndex>> 
            = self.chainman.get()
                .inner
                .blockman
                .lookup_block_index(&req.blockhash);

            if pindex.is_none() 
            || (pindex.as_ref().unwrap().n_status & BlockStatus::BLOCK_HAVE_DATA.bits()) == 0 {

                log_print!(
                    LogFlags::NET, 
                    "Peer %d sent us a getblocktxn for a block we don't have\n", 
                    pfrom.get_id()
                );

                return;
            }

            let active_chain_height = self.chainman.get().active_chain().height().unwrap();

            let target_height = active_chain_height - MAX_BLOCKTXN_DEPTH;

            if pindex.as_ref().unwrap().n_height >= target_height.try_into().unwrap() {

                let mut block = Block::default();

                let ret: bool = read_block_from_disk_with_blockindex(
                    &mut block,
                    pindex.unwrap(),
                    &self.chainparams.get_consensus()
                );

                assert!(ret);

                self.send_block_transactions(pfrom, &block, &req);

                return;
            }
        }

        // If an older block is requested
        // (should never happen in practice,
        // but can happen in tests) send
        // a block response instead of
        // a blocktxn response. Sending a full
        // block response instead of a small
        // blocktxn response is preferable in
        // the case where a peer might
        // maliciously send lots of
        // getblocktxn requests to trigger
        // expensive disk reads, because it
        // will require the peer to actually
        // receive all the data read from disk
        // over the network.
        log_print!(
            LogFlags::NET,
            "Peer %d sent us a getblocktxn for a block > %i deep\n",
            pfrom.get_id(),
            MAX_BLOCKTXN_DEPTH
        );

        let mut inv = Inv::default();

        {
            let mut guard = CS_MAIN.lock();

            let created_state = create_state(pfrom.get_id());

            let state = created_state.get();

            inv.ty = match state.wants_cmpct_witness.load(atomic::Ordering::Relaxed) {
                true   => GetDataMsg::MSG_WITNESS_BLOCK.bits(),
                false  => GetDataMsg::MSG_BLOCK.bits()
            };
        }

        inv.hash = req.blockhash;

        {
            let mut guard = peer.as_ref().unwrap().getdata_requests.lock();

            guard.push_back(inv);
        }

        // The message processing loop will go
        // around again (without pausing) and
        // we'll respond then
    }
}