1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
crate::ix!();

impl PeerManager {

    pub fn process_tx_message(
        self:               Arc<Self>, 
        peer:               Amo<Peer>,
        mut pfrom:          &mut AmoWriteGuard<Box<dyn NodeInterface>>,
        msg_type:           &str,
        recv:               &mut DataStream,
        time_received:      &OffsetDateTime /* micros */,
        interrupt_msg_proc: &AtomicBool)  {

        //  Stop processing the transaction early
        //  if
        //
        //  1) We are in blocks only mode and peer
        //  has no relay permission
        //
        //  2) This peer is a block-relay-only
        //  peer
        if (self.ignore_incoming_txs && !pfrom.has_permission(NetPermissionFlags::Relay)) 
        || !pfrom.has_tx_relay() {

            log_print!(
                LogFlags::NET, 
                "transaction sent in violation of protocol peer=%d\n", 
                pfrom.get_id()
            );

            pfrom.mark_for_disconnect();

            return;
        }

        let mut ptx = TransactionRef::none();

        recv.stream_into(&mut ptx);

        let tx = ptx.get();

        let txid:  &u256 = tx.get_hash();
        let wtxid: &u256 = tx.get_witness_hash();

        let mut guard_main    = CS_MAIN.lock();
        let mut guard_orphans = G_CS_ORPHANS.lock();

        let nodestate: Amo<NodeState> = create_state(pfrom.get_id());

        let hash: &u256 = match nodestate.get().wtxid_relay.load(atomic::Ordering::Relaxed) {
            true   => wtxid,
            false  => txid
        };

        pfrom.add_known_tx(hash);

        if nodestate.get().wtxid_relay.load(atomic::Ordering::Relaxed) 
        && txid != wtxid {

            // Insert txid into
            // filterInventoryKnown, even for
            // wtxidrelay peers. This prevents
            // re-adding of unconfirmed parents to
            // the recently_announced filter, when
            // a child tx is requested. See
            // ProcessGetData().
            pfrom.add_known_tx(txid);
        }

        self.inner.lock().txrequest.lock().received_response(pfrom.get_id(), txid);

        if tx.has_witness() {
            self.inner.lock().txrequest.lock().received_response(pfrom.get_id(), wtxid);
        }

        // We do the AlreadyHaveTx() check using
        // wtxid, rather than txid - in the
        // absence of witness malleation, this is
        // strictly better, because the recent
        // rejects filter may contain the wtxid
        // but rarely contains the txid of
        // a segwit transaction that has been
        // rejected. In the presence of witness
        // malleation, it's possible that by only
        // doing the check with wtxid, we could
        // overlook a transaction which was
        // confirmed with a different witness, or
        // exists in our mempool with a different
        // witness, but this has limited downside:
        // mempool validation does its own lookup
        // of whether we have the txid already;
        // and an adversary can already relay us
        // old transactions (older than our
        // recency filter) if trying to DoS us,
        // without any need for witness
        // malleation.
        if self.clone().already_have_tx(&GenTxId::wtxid(wtxid)) {

            if pfrom.has_permission(NetPermissionFlags::ForceRelay) {

                // Always relay transactions
                // received from peers with
                // forcerelay permission, even
                // if they were already in the
                // mempool, allowing the node
                // to function as a gateway
                // for nodes hidden behind it.
                if !self.mempool.get().exists(&GenTxId::txid(tx.get_hash())) {

                    log_printf!(
                        "Not relaying non-mempool transaction %s from forcerelay peer=%d\n", 
                        tx.get_hash().to_string(), 
                        pfrom.get_id()
                    );

                } else {

                    log_printf!(
                        "Force relaying tx %s from peer=%d\n", 
                        tx.get_hash().to_string(), 
                        pfrom.get_id()
                    );

                    self.clone().relay_transaction(tx.get_hash(), tx.get_witness_hash());
                }
            }

            return;
        }

        let result: MempoolAcceptResult = accept_to_memory_pool(
            self.chainman.get().active_chainstate(),
            self.mempool.clone(),
            ptx.clone(),
            /* bypass_limits */ false,
            None
        );

        let state: &TxValidationState = &result.state;

        if result.result_type == MempoolAcceptResultType::VALID {

            let chainman = self.chainman.get();

            let active_chainstate = chainman.active_chainstate();

            let mempool = self.mempool.get();

            let chain_height = active_chainstate.height().unwrap();

            mempool.check(
                active_chainstate.coins_tip(), 
                (chain_height + 1).try_into().unwrap()
            );

            // As this version of the
            // transaction was acceptable, we
            // can forget about any requests
            // for it.
            {
                let mut inner = self.inner.lock();

                inner.txrequest.lock().forget_tx_hash(tx.get_hash());
                inner.txrequest.lock().forget_tx_hash(tx.get_witness_hash());
            }

            self.clone().relay_transaction(
                tx.get_hash(), 
                tx.get_witness_hash()
            );

            self.orphanage.clone().add_children_to_work_set(
                &tx, 
                &mut peer.get_mut().orphan_work_set
            );

            pfrom.set_n_last_tx_time(Some(get_datetime()));

            log_print!(
                LogFlags::MEMPOOL, 
                "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n", 
                pfrom.get_id(), 
                tx.get_hash().to_string(), 
                self.mempool.len(), 
                self.mempool.dynamic_memory_usage() / 1000
            );

            for removed_tx in result.replaced_transactions.as_ref().unwrap().iter() {
                self.clone().add_to_compact_extra_transactions(removed_tx);
            }

            // Recursively process any orphan
            // transactions that depended on
            // this one
            self.clone()
                .process_orphan_tx(&mut peer.get_mut().orphan_work_set);

        } else {

            if state.get_result() == TxValidationResult::TX_MISSING_INPUTS {

                // It may be the case that the
                // orphans parents have all
                // been rejected
                let mut rejected_parents: bool = false;

                // Deduplicate parent txids,
                // so that we don't have to
                // loop over the same parent
                // txid more than once down
                // below.
                let mut unique_parents: Vec<u256> = vec![];

                unique_parents.reserve(tx.vin.len());

                for txin in tx.vin.iter() {
                    // We start with all
                    // parents, and then
                    // remove duplicates
                    // below.
                    unique_parents.push(txin.prevout.hash.clone());
                }

                unique_parents.sort();

                unique_parents.dedup();

                for parent_txid in unique_parents.iter() {
                    if self.inner.lock().recent_rejects.contains_key(parent_txid.as_slice()) {
                        rejected_parents = true;
                        break;
                    }
                }

                if !rejected_parents {

                    let current_time = get_datetime();

                    for parent_txid in unique_parents.iter() {

                        // Here, we only have
                        // the txid (and not
                        // wtxid) of the
                        // inputs, so we only
                        // request in txid
                        // mode, even for
                        // wtxidrelay peers.
                        //
                        // Eventually we
                        // should replace this
                        // with an improved
                        // protocol for
                        // getting all
                        // unconfirmed
                        // parents.
                        let gtxid = GenTxId::txid(parent_txid);

                        pfrom.add_known_tx(parent_txid);

                        if !self.clone().already_have_tx(&gtxid) {

                            self.clone()
                                .add_tx_announcement(
                                    pfrom, 
                                    &gtxid, 
                                    current_time
                                );
                        }
                    }

                    if self.orphanage.clone().add_tx(&ptx, pfrom.get_id()) {
                        self.clone().add_to_compact_extra_transactions(&ptx);
                    }

                    let mut inner = self.inner.lock();

                    // Once added to the
                    // orphan pool, a tx is
                    // considered AlreadyHave,
                    // and we shouldn't
                    // request it anymore.
                    inner.txrequest.lock().forget_tx_hash(tx.get_hash());
                    inner.txrequest.lock().forget_tx_hash(tx.get_witness_hash());

                    // DoS prevention: do not
                    // allow m_orphanage to
                    // grow unbounded (see
                    // CVE-2012-3789)
                    let n_max_orphan_tx: u32 = max(
                        0 as i64,
                        G_ARGS.lock().get_int_arg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS.into())
                    ) as u32;

                    let mut n_evicted: u32 = self.orphanage.clone().limit_orphans(n_max_orphan_tx);

                    if n_evicted > 0 {

                        log_print!(
                            LogFlags::MEMPOOL, 
                            "orphanage overflow, removed %u tx\n", 
                            n_evicted
                        );
                    }

                } else {

                    log_print!(
                        LogFlags::MEMPOOL, 
                        "not keeping orphan with rejected parents %s\n", 
                        tx.get_hash().to_string()
                    );

                    let mut inner = self.inner.lock();

                    // We will continue to
                    // reject this tx since it
                    // has rejected parents so
                    // avoid re-requesting it
                    // from other peers.
                    //
                    // Here we add both the
                    // txid and the wtxid, as
                    // we know that regardless
                    // of what witness is
                    // provided, we will not
                    // accept this, so we
                    // don't need to allow for
                    // redownload of this txid
                    // from any of our
                    // non-wtxidrelay peers.
                    inner.recent_rejects.insert_key(tx.get_hash().as_slice());
                    inner.recent_rejects.insert_key(tx.get_witness_hash().as_slice());

                    inner.txrequest.lock().forget_tx_hash(tx.get_hash());
                    inner.txrequest.lock().forget_tx_hash(tx.get_witness_hash());
                }

            } else {

                if state.get_result() != TxValidationResult::TX_WITNESS_STRIPPED {

                    // We can add the wtxid of
                    // this transaction to our
                    // reject filter.
                    //
                    // Do not add txids of
                    // witness transactions or
                    // witness-stripped
                    // transactions to the
                    // filter, as they can
                    // have been malleated;
                    // adding such txids to
                    // the reject filter would
                    // potentially interfere
                    // with relay of valid
                    // transactions from peers
                    // that do not support
                    // wtxid-based relay. See
                    // https://github.com/bitcoin/bitcoin/issues/8279
                    // for details.
                    //
                    // We can remove this
                    // restriction (and always
                    // add wtxids to the
                    // filter even for witness
                    // stripped transactions)
                    // once wtxid-based relay
                    // is broadly deployed.
                    //
                    // See also comments in
                    // https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
                    // for concerns around
                    // weakening security of
                    // unupgraded nodes if we
                    // start doing this too
                    // early.
                    self.inner.lock().recent_rejects.insert_key(tx.get_witness_hash().as_slice());

                    self.inner.lock().txrequest.lock().forget_tx_hash(tx.get_witness_hash());

                    // If the transaction
                    // failed for
                    // TX_INPUTS_NOT_STANDARD,
                    // then we know that the
                    // witness was irrelevant
                    // to the policy failure,
                    // since this check
                    // depends only on the
                    // txid (the scriptPubKey
                    // being spent is covered
                    // by the txid).
                    //
                    // Add the txid to the
                    // reject filter to
                    // prevent repeated
                    // processing of this
                    // transaction in the
                    // event that child
                    // transactions are later
                    // received (resulting in
                    // parent-fetching by txid
                    // via the orphan-handling
                    // logic).
                    if state.get_result() == TxValidationResult::TX_INPUTS_NOT_STANDARD 
                    && tx.get_witness_hash() != tx.get_hash() {

                        self.inner.lock().recent_rejects.insert_key(tx.get_hash().as_slice());

                        self.inner.lock().txrequest.lock().forget_tx_hash(tx.get_hash());
                    }

                    if recursive_dynamic_usage(&ptx) < 100000 {
                        self.clone().add_to_compact_extra_transactions(&ptx);
                    }
                }
            }
        }

        // If a tx has been detected by
        // m_recent_rejects, we will have reached
        // this point and the tx will have been
        // ignored. Because we haven't run the tx
        // through AcceptToMemoryPool, we won't
        // have computed a DoS score for it or
        // determined exactly why we consider it
        // invalid.
        //
        // This means we won't penalize any peer
        // subsequently relaying a DoSy tx (even
        // if we penalized the first peer who gave
        // it to us) because we have to account
        // for m_recent_rejects showing false
        // positives. In other words, we shouldn't
        // penalize a peer if we aren't *sure*
        // they submitted a DoSy tx.
        //
        // Note that m_recent_rejects doesn't just
        // record DoSy or invalid transactions,
        // but any tx not accepted by the mempool,
        // which may be due to node policy
        // (vs. consensus). So we can't blanket
        // penalize a peer simply for relaying
        // a tx that our m_recent_rejects has
        // caught, regardless of false positives.
        if state.is_invalid() {

            log_print!(
                LogFlags::MEMPOOLREJ, 
                "%s from peer=%d was not accepted: %s\n", 
                tx.get_hash().to_string(), 
                pfrom.get_id(), 
                state.to_string()
            );

            self.clone().maybe_punish_node_for_tx(pfrom.get_id(), state, None);
        }
    }
}