1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
//! # Executive Module
//!
//! The executive is the main orchestrator for the entire runtime.
//! It has functions that implement the Core, BlockBuilder, and TxPool runtime APIs.
//!
//! It does all the reusable verification of UTXO transactions.
use crate::pallas_applying::{
babbage::{
check_ins_not_empty,
// check_all_ins_in_utxos,
check_preservation_of_value,
check_tx_validity_interval,
check_witness_set,
},
utils::BabbageError::*,
UTxOs,
};
use crate::pallas_codec::utils::CborWrap;
use crate::pallas_primitives::{
babbage::{
MintedDatumOption, MintedScriptRef, MintedTransactionBody, MintedTx,
Tx as PallasTransaction, Value as PallasValue,
},
conway::{MintedTx as ConwayMintedTx, TransactionOutput},
};
use crate::uplc::tx::{eval_phase_two, ResolvedInput, SlotConfig};
use crate::{
checks_interface::{
babbage_minted_tx_from_cbor, babbage_tx_to_cbor, check_min_coin,
conway_minted_tx_from_cbor, mk_utxo_for_babbage_tx,
},
ensure,
types::{Block, BlockNumber, DispatchResult, Header, Input, Output, Transaction, UTxOError},
utxo_set::TransparentUtxoSet,
EXTRINSIC_KEY, HEADER_KEY, HEIGHT_KEY, LOG_TARGET,
};
use crate::{MILLI_SECS_PER_SLOT, ZERO_SLOT, ZERO_TIME};
use alloc::{collections::btree_set::BTreeSet, string::String, vec::Vec};
use log::debug;
use parity_scale_codec::{Decode, Encode};
use sp_runtime::{
traits::{BlakeTwo256, Block as BlockT, Extrinsic, Hash as HashT, Header as HeaderT},
transaction_validity::{
TransactionLongevity, TransactionSource, TransactionValidity, TransactionValidityError,
ValidTransaction,
},
ApplyExtrinsicResult, ExtrinsicInclusionMode, StateVersion,
};
type OutputInfoList<'a> = Vec<(
String, // address in string format
PallasValue,
Option<MintedDatumOption<'a>>,
Option<CborWrap<MintedScriptRef<'a>>>,
)>;
/// The executive is in charge of validating transactions for admittance in the
/// pool and in blocks. It is in charge of *executing* transactions, i.e.,
/// applying them to the ledger.
pub struct Executive;
impl Executive
where
Block: BlockT,
Transaction: Extrinsic,
{
/// Checks performed to enter the transaction pool. The response of the node
/// is essentially determined by the outcome of this function.
fn pool_checks(mtx: &MintedTx, _utxos: &UTxOs) -> DispatchResult {
check_ins_not_empty(&mtx.transaction_body.clone())?;
Ok(())
}
/// Checks performed to a transaction with all its requirements satisfied
/// to be included in a block.
fn ledger_checks(mtx: &MintedTx, utxos: &UTxOs) -> DispatchResult {
let tx_body: &MintedTransactionBody = &mtx.transaction_body.clone();
// Next unneeded since already checked at `apply_griffin_transaction`
// check_all_ins_in_utxos(tx_body, utxos)?;
let current_slot = Self::zero_slot() + (Self::block_height() as u64);
check_tx_validity_interval(tx_body, ¤t_slot)?;
check_preservation_of_value(tx_body, utxos)?;
check_witness_set(mtx, utxos)?;
check_min_coin(tx_body)?;
Ok(())
}
fn phase_two_checks(tx_cbor_bytes: &Vec<u8>, input_utxos: Vec<Output>) -> DispatchResult {
let conway_mtx: ConwayMintedTx = conway_minted_tx_from_cbor(&tx_cbor_bytes);
let pallas_input_utxos = input_utxos
.iter()
.map(|ri| TransactionOutput::from(ri.clone()))
.collect::<Vec<_>>();
let pallas_resolved_inputs: Vec<ResolvedInput> = conway_mtx
.transaction_body
.inputs
.iter()
.zip(pallas_input_utxos.iter())
.map(|(input, output)| ResolvedInput {
input: input.clone(),
output: output.clone(),
})
.collect();
let slot_config = SlotConfig {
zero_time: Self::zero_time(),
zero_slot: Self::zero_slot(),
slot_length: MILLI_SECS_PER_SLOT,
};
let phase_two_result = eval_phase_two(
&conway_mtx,
&pallas_resolved_inputs,
None,
None,
&slot_config,
false,
|_| (),
);
ensure!(
phase_two_result.is_ok(),
UTxOError::PhaseTwo(phase_two_result.unwrap_err())
);
Ok(())
}
/// Does pool-style validation of a griffin transaction.
/// Does not commit anything to storage.
/// This returns Ok even if some inputs are still missing because the tagged transaction pool can handle that.
/// We later check that there are no missing inputs in `apply_griffin_transaction`.
///
/// The output includes the list of relevant UTxOs to be used for other
/// checks (in order to avoid a further db search).
fn validate_griffin_transaction(
transaction: &Transaction,
) -> Result<ValidTransaction, UTxOError> {
debug!(
target: LOG_TARGET,
"validating griffin transaction",
);
// Make sure there are no duplicate inputs
{
let input_set: BTreeSet<_> = transaction
.transaction_body
.inputs
.iter()
.map(|o| o.encode())
.collect();
ensure!(
input_set.len() == transaction.transaction_body.inputs.len(),
UTxOError::Babbage(DuplicateInput)
);
}
let mut tx_outs_info: OutputInfoList = Vec::new();
let mut input_utxos: Vec<Output> = Vec::new();
// Add present inputs to a list to be used to produce the local UTxO set.
// Keep track of any missing inputs for use in the tagged transaction pool
let mut missing_inputs = Vec::new();
for input in transaction.transaction_body.inputs.iter() {
if let Some(u) = TransparentUtxoSet::peek_utxo(&input) {
tx_outs_info.push((
hex::encode(u.address.0.as_slice()),
PallasValue::from(u.clone().value),
None, // irrelevant for phase 1 checks (always inline datum)
None,
));
// Repeated info in tx_outs_info, but we need this type for phase 2 checks
input_utxos.push(u);
} else {
missing_inputs.push(input.clone().encode());
}
}
// Make sure no outputs already exist in storage
let tx_hash = BlakeTwo256::hash_of(&transaction.encode());
for index in 0..transaction.transaction_body.outputs.len() {
let input = Input {
tx_hash,
index: index as u32,
};
debug!(
target: LOG_TARGET,
"Checking for pre-existing output {:?}", input
);
ensure!(
TransparentUtxoSet::peek_utxo(&input).is_none(),
UTxOError::Babbage(OutputAlreadyInUTxO)
);
}
// Griffin Tx -> Pallas Tx -> CBOR -> Minted Pallas Tx
// This last one is used to produce the local UTxO set.
let pallas_tx: PallasTransaction = <_>::from(transaction.clone());
let cbor_bytes: Vec<u8> = babbage_tx_to_cbor(&pallas_tx);
let mtx: MintedTx = babbage_minted_tx_from_cbor(&cbor_bytes);
let tx_body: &MintedTransactionBody = &mtx.transaction_body.clone();
let outs_info_clone = tx_outs_info.clone();
let utxos: UTxOs = mk_utxo_for_babbage_tx(tx_body, outs_info_clone.as_slice());
Self::pool_checks(&mtx, &utxos)?;
// Calculate the tx-pool tags provided by this transaction, which
// are just the encoded Inputs
let provides = (0..transaction.transaction_body.outputs.len())
.map(|i| {
let input = Input {
tx_hash,
index: i as u32,
};
input.encode()
})
.collect::<Vec<_>>();
// If any of the inputs are missing, we cannot make any more progress
if !missing_inputs.is_empty() {
debug!(
target: LOG_TARGET,
"Transaction is valid but still has missing inputs. Returning early.",
);
return Ok(ValidTransaction {
requires: missing_inputs,
provides,
priority: 0,
longevity: TransactionLongevity::MAX,
propagate: true,
});
}
// These checks were done in `apply_griffin_transaction`, but we do them here for simplicity.
// This might limit the ledger's ability to accept transactions that would be valid
// in a block, as in chaining.
Self::ledger_checks(&mtx, &utxos)?;
Self::phase_two_checks(&cbor_bytes, input_utxos)?;
// Return the valid transaction
Ok(ValidTransaction {
requires: Vec::new(),
provides,
priority: 0,
longevity: TransactionLongevity::MAX,
propagate: true,
})
}
/// Does full verification and application of griffin transactions.
/// Most of the validation happens in the call to `validate_griffin_transaction`.
/// Once those checks are done we make sure there are no missing inputs and then update storage.
fn apply_griffin_transaction(transaction: &Transaction) -> DispatchResult {
debug!(
target: LOG_TARGET,
"applying griffin transaction {:?}", transaction
);
// Re-do the pre-checks. These should have been done in the pool, but we can't
// guarantee that foreign nodes do these checks faithfully, so we need to check on-chain.
let valid_transaction = Self::validate_griffin_transaction(transaction)?;
// If there are still missing inputs, we cannot execute this,
// although it would be valid in the pool
ensure!(
valid_transaction.requires.is_empty(),
UTxOError::Babbage(InputNotInUTxO)
);
// At this point, all validation is complete, so we can commit the storage changes.
Self::update_storage(transaction);
Ok(())
}
/// Helper function to update the utxo set according to the given transaction.
/// This function does absolutely no validation. It assumes that the transaction
/// has already passed validation. Changes proposed by the transaction are written
/// blindly to storage.
fn update_storage(transaction: &Transaction) {
// Remove verified UTXOs
for input in &transaction.transaction_body.inputs {
TransparentUtxoSet::consume_utxo(input);
}
debug!(
target: LOG_TARGET,
"Transaction before updating storage {:?}", transaction
);
// Write the newly created utxos
for (index, output) in transaction.transaction_body.outputs.iter().enumerate() {
let input = Input {
tx_hash: BlakeTwo256::hash_of(&transaction.encode()),
index: index as u32,
};
TransparentUtxoSet::store_utxo(input, output);
}
}
/// A helper function that allows griffin runtimes to read the current block height
pub fn block_height() -> BlockNumber {
sp_io::storage::get(HEIGHT_KEY)
.and_then(|d| BlockNumber::decode(&mut &*d).ok())
.expect("A height is stored at the beginning of block one and never cleared.")
}
/// A helper function that allows griffin runtimes to read the start posix time of the first
/// block, in milliseconds
pub fn zero_time() -> u64 {
sp_io::storage::get(ZERO_TIME)
.and_then(|d| u64::decode(&mut &*d).ok())
.expect("Failed to read ZERO_TIME from storage.")
}
/// A helper function that allows griffin runtimes to read the slot number of the first block
pub fn zero_slot() -> u64 {
sp_io::storage::get(ZERO_SLOT)
.and_then(|d| u64::decode(&mut &*d).ok())
.expect("Failed to read ZERO_SLOT from storage.")
}
// These next three methods are for the block authoring workflow.
// Open the block, apply zero or more extrinsics, close the block
pub fn open_block(header: &Header) -> ExtrinsicInclusionMode {
debug!(
target: LOG_TARGET,
"Entering initialize_block. header: {:?}", header
);
// Store the transient partial header for updating at the end of the block.
// This will be removed from storage before the end of the block.
sp_io::storage::set(HEADER_KEY, &header.encode());
// Also store the height persistently so it is available when
// performing pool validations and other off-chain runtime calls.
sp_io::storage::set(HEIGHT_KEY, &header.number().encode());
// griffin blocks always allow user transactions.
ExtrinsicInclusionMode::AllExtrinsics
}
pub fn apply_extrinsic(extrinsic: Transaction) -> ApplyExtrinsicResult {
debug!(
target: LOG_TARGET,
"Entering apply_extrinsic: {:?}", extrinsic
);
// Append the current extrinsic to the transient list of extrinsics.
// This will be used when we calculate the extrinsics root at the end of the block.
let mut extrinsics = sp_io::storage::get(EXTRINSIC_KEY)
.and_then(|d| <Vec<Vec<u8>>>::decode(&mut &*d).ok())
.unwrap_or_default();
extrinsics.push(extrinsic.encode());
sp_io::storage::set(EXTRINSIC_KEY, &extrinsics.encode());
// Now actually apply the extrinsic
Self::apply_griffin_transaction(&extrinsic).map_err(|e| {
log::warn!(
target: LOG_TARGET,
"⛔ Griffin Transaction did not validate to be applied due to: {:?}",
e,
);
TransactionValidityError::Invalid(e.into())
})?;
Ok(Ok(()))
}
pub fn close_block() -> Header {
let mut header = sp_io::storage::get(HEADER_KEY)
.and_then(|d| Header::decode(&mut &*d).ok())
.expect("We initialized with header, it never got mutated, qed");
// the header itself contains the state root, so it cannot be inside the state (circular
// dependency..). Make sure in execute block path we have the same rule.
sp_io::storage::clear(HEADER_KEY);
let extrinsics = sp_io::storage::get(EXTRINSIC_KEY)
.and_then(|d| <Vec<Vec<u8>>>::decode(&mut &*d).ok())
.unwrap_or_default();
let extrinsics_root =
<Header as HeaderT>::Hashing::ordered_trie_root(extrinsics, StateVersion::V0);
sp_io::storage::clear(EXTRINSIC_KEY);
header.set_extrinsics_root(extrinsics_root);
let raw_state_root = &sp_io::storage::root(StateVersion::V1)[..];
let state_root = <Header as HeaderT>::Hash::decode(&mut &raw_state_root[..]).unwrap();
header.set_state_root(state_root);
debug!(target: LOG_TARGET, "finalizing block {:?}", header);
header
}
// This one is for the Core api. It is used to import blocks authored by foreign nodes.
pub fn execute_block(block: Block) {
debug!(
target: LOG_TARGET,
"Entering execute_block. block: {:?}", block
);
// Store the header. Although we don't need to mutate it, we do need to make
// info, such as the block height, available to individual pieces. This will
// be cleared before the end of the block
sp_io::storage::set(HEADER_KEY, &block.header().encode());
// Also store the height persistently so it is available when
// performing pool validations and other off-chain runtime calls.
sp_io::storage::set(HEIGHT_KEY, &block.header().number().encode());
// Apply each extrinsic
for extrinsic in block.extrinsics() {
match Self::apply_griffin_transaction(&extrinsic) {
Ok(()) => debug!(
target: LOG_TARGET,
"Successfully executed extrinsic: {:?}", extrinsic
),
Err(e) => panic!("{:?}", e),
}
}
// Clear the transient header out of storage
sp_io::storage::clear(HEADER_KEY);
// Check state root
let raw_state_root = &sp_io::storage::root(StateVersion::V1)[..];
let state_root = <Header as HeaderT>::Hash::decode(&mut &raw_state_root[..]).unwrap();
assert_eq!(
*block.header().state_root(),
state_root,
"state root mismatch"
);
// Check extrinsics root.
let extrinsics = block
.extrinsics()
.iter()
.map(|x| x.encode())
.collect::<Vec<_>>();
let extrinsics_root =
<Header as HeaderT>::Hashing::ordered_trie_root(extrinsics, StateVersion::V0);
assert_eq!(
*block.header().extrinsics_root(),
extrinsics_root,
"extrinsics root mismatch"
);
}
// This one is the pool api. It is used to make preliminary checks in the transaction pool
pub fn validate_transaction(
source: TransactionSource,
tx: Transaction,
block_hash: <Block as BlockT>::Hash,
) -> TransactionValidity {
debug!(
target: LOG_TARGET,
"Entering validate_transaction. source: {:?}, tx: {:?}, block hash: {:?}",
source,
tx,
block_hash
);
let r = Self::validate_griffin_transaction(&tx).map_err(|e| {
log::warn!(
target: LOG_TARGET,
"⛔ Griffin Transaction did not validate (in the pool): {:?}",
e,
);
TransactionValidityError::Invalid(e.into())
});
debug!(target: LOG_TARGET, "Validation result: {:?}", r);
r
}
}