subxt_metadata/utils/
validation.rs

1// Copyright 2019-2025 Parity Technologies (UK) Ltd.
2// This file is dual-licensed as Apache-2.0 or GPL-3.0.
3// see LICENSE for license details.
4
5//! Utility functions for metadata validation.
6
7use crate::{
8    CustomMetadata, CustomValueMetadata, ExtrinsicMetadata, Metadata, PalletMetadata,
9    RuntimeApiMetadata, RuntimeApiMethodMetadata, StorageEntryMetadata, StorageEntryType,
10    ViewFunctionMetadata,
11};
12use alloc::vec::Vec;
13use hashbrown::HashMap;
14use scale_info::{Field, PortableRegistry, TypeDef, TypeDefVariant, Variant, form::PortableForm};
15
16// The number of bytes our `hash` function produces.
17pub(crate) const HASH_LEN: usize = 32;
18pub type Hash = [u8; HASH_LEN];
19
20/// Internal byte representation for various metadata types utilized for
21/// generating deterministic hashes between different rust versions.
22#[repr(u8)]
23enum TypeBeingHashed {
24    Composite,
25    Variant,
26    Sequence,
27    Array,
28    Tuple,
29    Primitive,
30    Compact,
31    BitSequence,
32}
33
34/// Hashing function utilized internally.
35fn hash(data: &[u8]) -> Hash {
36    sp_crypto_hashing::twox_256(data)
37}
38
39/// XOR two hashes together. Only use this when you don't care about the order
40/// of the things you're hashing together.
41fn xor(a: Hash, b: Hash) -> Hash {
42    let mut out = [0u8; HASH_LEN];
43    for (idx, (a, b)) in a.into_iter().zip(b).enumerate() {
44        out[idx] = a ^ b;
45    }
46    out
47}
48
49// Combine some number of HASH_LEN byte hashes and output a single HASH_LEN
50// byte hash to uniquely represent the inputs.
51macro_rules! count_idents {
52    () => { 0 };
53    ($n:ident $($rest:ident)*) => { 1 + count_idents!($($rest)*) }
54}
55macro_rules! concat_and_hash_n {
56    ($name:ident($($arg:ident)+)) => {
57        fn $name($($arg: &Hash),+) -> Hash {
58            let mut out = [0u8; HASH_LEN * count_idents!($($arg)+)];
59            let mut start = 0;
60            $(
61                out[start..start+HASH_LEN].copy_from_slice(&$arg[..]);
62                #[allow(unused_assignments)]
63                { start += HASH_LEN; }
64            )+
65            hash(&out)
66        }
67    }
68}
69concat_and_hash_n!(concat_and_hash2(a b));
70concat_and_hash_n!(concat_and_hash3(a b c));
71concat_and_hash_n!(concat_and_hash4(a b c d));
72concat_and_hash_n!(concat_and_hash5(a b c d e));
73concat_and_hash_n!(concat_and_hash6(a b c d e f));
74
75/// Obtain the hash representation of a `scale_info::Field`.
76fn get_field_hash(
77    registry: &PortableRegistry,
78    field: &Field<PortableForm>,
79    cache: &mut HashMap<u32, CachedHash>,
80) -> Hash {
81    let field_name_bytes = match &field.name {
82        Some(name) => hash(name.as_bytes()),
83        None => [0u8; HASH_LEN],
84    };
85
86    concat_and_hash2(
87        &field_name_bytes,
88        &get_type_hash_recurse(registry, field.ty.id, cache),
89    )
90}
91
92/// Obtain the hash representation of a `scale_info::Variant`.
93fn get_variant_hash(
94    registry: &PortableRegistry,
95    var: &Variant<PortableForm>,
96    cache: &mut HashMap<u32, CachedHash>,
97) -> Hash {
98    let variant_name_bytes = hash(var.name.as_bytes());
99    let variant_field_bytes = var.fields.iter().fold([0u8; HASH_LEN], |bytes, field| {
100        // EncodeAsType and DecodeAsType don't care about variant field ordering,
101        // so XOR the fields to ensure that it doesn't matter.
102        xor(bytes, get_field_hash(registry, field, cache))
103    });
104
105    concat_and_hash2(&variant_name_bytes, &variant_field_bytes)
106}
107
108fn get_type_def_variant_hash(
109    registry: &PortableRegistry,
110    variant: &TypeDefVariant<PortableForm>,
111    only_these_variants: Option<&[&str]>,
112    cache: &mut HashMap<u32, CachedHash>,
113) -> Hash {
114    let variant_id_bytes = [TypeBeingHashed::Variant as u8; HASH_LEN];
115    let variant_field_bytes = variant.variants.iter().fold([0u8; HASH_LEN], |bytes, var| {
116        // With EncodeAsType and DecodeAsType we no longer care which order the variants are in,
117        // as long as all of the names+types are there. XOR to not care about ordering.
118        let should_hash = only_these_variants
119            .as_ref()
120            .map(|only_these_variants| only_these_variants.contains(&var.name.as_str()))
121            .unwrap_or(true);
122        if should_hash {
123            xor(bytes, get_variant_hash(registry, var, cache))
124        } else {
125            bytes
126        }
127    });
128    concat_and_hash2(&variant_id_bytes, &variant_field_bytes)
129}
130
131/// Obtain the hash representation of a `scale_info::TypeDef`.
132fn get_type_def_hash(
133    registry: &PortableRegistry,
134    ty_def: &TypeDef<PortableForm>,
135    cache: &mut HashMap<u32, CachedHash>,
136) -> Hash {
137    match ty_def {
138        TypeDef::Composite(composite) => {
139            let composite_id_bytes = [TypeBeingHashed::Composite as u8; HASH_LEN];
140            let composite_field_bytes =
141                composite
142                    .fields
143                    .iter()
144                    .fold([0u8; HASH_LEN], |bytes, field| {
145                        // With EncodeAsType and DecodeAsType we no longer care which order the fields are in,
146                        // as long as all of the names+types are there. XOR to not care about ordering.
147                        xor(bytes, get_field_hash(registry, field, cache))
148                    });
149            concat_and_hash2(&composite_id_bytes, &composite_field_bytes)
150        }
151        TypeDef::Variant(variant) => get_type_def_variant_hash(registry, variant, None, cache),
152        TypeDef::Sequence(sequence) => concat_and_hash2(
153            &[TypeBeingHashed::Sequence as u8; HASH_LEN],
154            &get_type_hash_recurse(registry, sequence.type_param.id, cache),
155        ),
156        TypeDef::Array(array) => {
157            // Take length into account too; different length must lead to different hash.
158            let array_id_bytes = {
159                let mut a = [0u8; HASH_LEN];
160                a[0] = TypeBeingHashed::Array as u8;
161                a[1..5].copy_from_slice(&array.len.to_be_bytes());
162                a
163            };
164            concat_and_hash2(
165                &array_id_bytes,
166                &get_type_hash_recurse(registry, array.type_param.id, cache),
167            )
168        }
169        TypeDef::Tuple(tuple) => {
170            let mut bytes = hash(&[TypeBeingHashed::Tuple as u8]);
171            for field in &tuple.fields {
172                bytes = concat_and_hash2(&bytes, &get_type_hash_recurse(registry, field.id, cache));
173            }
174            bytes
175        }
176        TypeDef::Primitive(primitive) => {
177            // Cloning the 'primitive' type should essentially be a copy.
178            hash(&[TypeBeingHashed::Primitive as u8, primitive.clone() as u8])
179        }
180        TypeDef::Compact(compact) => concat_and_hash2(
181            &[TypeBeingHashed::Compact as u8; HASH_LEN],
182            &get_type_hash_recurse(registry, compact.type_param.id, cache),
183        ),
184        TypeDef::BitSequence(bitseq) => concat_and_hash3(
185            &[TypeBeingHashed::BitSequence as u8; HASH_LEN],
186            &get_type_hash_recurse(registry, bitseq.bit_order_type.id, cache),
187            &get_type_hash_recurse(registry, bitseq.bit_store_type.id, cache),
188        ),
189    }
190}
191
192/// indicates whether a hash has been fully computed for a type or not
193#[derive(Clone, Debug)]
194pub enum CachedHash {
195    /// hash not known yet, but computation has already started
196    Recursive,
197    /// hash of the type, computation was finished
198    Hash(Hash),
199}
200
201impl CachedHash {
202    fn hash(&self) -> Hash {
203        match &self {
204            CachedHash::Hash(hash) => *hash,
205            CachedHash::Recursive => [123; HASH_LEN], // some magical value
206        }
207    }
208}
209
210/// Obtain the hash representation of a `scale_info::Type` identified by id.
211///
212/// Hashes of the outer enums (call, event, error) should be computed prior to this
213/// and passed in as the `outer_enum_hashes` argument. Whenever a type is encountered that
214/// is one of the outer enums, the procomputed hash is used instead of computing a new one.
215///
216/// The reason for this unintuitive behavior is that we sometimes want to trim the outer enum types
217/// beforehand to only include certain pallets, which affects their hash values.
218pub fn get_type_hash(registry: &PortableRegistry, id: u32) -> Hash {
219    get_type_hash_recurse(registry, id, &mut HashMap::new())
220}
221
222/// Obtain the hash representation of a `scale_info::Type` identified by id.
223fn get_type_hash_recurse(
224    registry: &PortableRegistry,
225    id: u32,
226    cache: &mut HashMap<u32, CachedHash>,
227) -> Hash {
228    // Guard against recursive types, with a 2 step caching approach:
229    //    if the cache has an entry for the id, just return a hash derived from it.
230    //    if the type has not been seen yet, mark it with `CachedHash::Recursive` in the cache and proceed to `get_type_def_hash()`.
231    //        -> During the execution of get_type_def_hash() we might get into get_type_hash(id) again for the original id
232    //            -> in this case the `CachedHash::Recursive` provokes an early return.
233    //        -> Once we return from `get_type_def_hash()` we need to update the cache entry:
234    //            -> We set the cache value to `CachedHash::Hash(type_hash)`, where `type_hash` was returned from `get_type_def_hash()`
235    //            -> It makes sure, that different types end up with different cache values.
236    //
237    // Values in the cache can be thought of as a mapping like this:
238    // type_id ->  not contained           = We haven't seen the type yet.
239    //         -> `CachedHash::Recursive`  = We have seen the type but hash calculation for it hasn't finished yet.
240    //         -> `CachedHash::Hash(hash)` = Hash calculation for the type was completed.
241    if let Some(cached_hash) = cache.get(&id) {
242        return cached_hash.hash();
243    }
244    cache.insert(id, CachedHash::Recursive);
245    let ty = registry
246        .resolve(id)
247        .expect("Type ID provided by the metadata is registered; qed");
248    let type_hash = get_type_def_hash(registry, &ty.type_def, cache);
249    cache.insert(id, CachedHash::Hash(type_hash));
250    type_hash
251}
252
253/// Obtain the hash representation of a `frame_metadata::v15::ExtrinsicMetadata`.
254fn get_extrinsic_hash(registry: &PortableRegistry, extrinsic: &ExtrinsicMetadata) -> Hash {
255    // Get the hashes of the extrinsic type.
256    let address_hash = get_type_hash(registry, extrinsic.address_ty);
257    // The `RuntimeCall` type is intentionally omitted and hashed by the outer enums instead.
258    let signature_hash = get_type_hash(registry, extrinsic.signature_ty);
259
260    // Supported versions are just u8s and we will likely never have more than 32 of these, so put them into
261    // an array of u8s and panic if more than 32.
262    if extrinsic.supported_versions.len() > 32 {
263        panic!("The metadata validation logic does not support more than 32 extrinsic versions.");
264    }
265    let supported_extrinsic_versions = {
266        let mut a = [0u8; 32];
267        a[0..extrinsic.supported_versions.len()].copy_from_slice(&extrinsic.supported_versions);
268        a
269    };
270
271    let mut bytes = concat_and_hash3(
272        &address_hash,
273        &signature_hash,
274        &supported_extrinsic_versions,
275    );
276
277    for signed_extension in extrinsic.transaction_extensions.iter() {
278        bytes = concat_and_hash4(
279            &bytes,
280            &hash(signed_extension.identifier.as_bytes()),
281            &get_type_hash(registry, signed_extension.extra_ty),
282            &get_type_hash(registry, signed_extension.additional_ty),
283        )
284    }
285
286    bytes
287}
288
289/// Get the hash corresponding to a single storage entry.
290fn get_storage_entry_hash(registry: &PortableRegistry, entry: &StorageEntryMetadata) -> Hash {
291    let mut bytes = concat_and_hash3(
292        &hash(entry.name.as_bytes()),
293        // Cloning 'entry.modifier' should essentially be a copy.
294        &[entry.modifier as u8; HASH_LEN],
295        &hash(&entry.default),
296    );
297
298    match &entry.entry_type {
299        StorageEntryType::Plain(ty) => concat_and_hash2(&bytes, &get_type_hash(registry, *ty)),
300        StorageEntryType::Map {
301            hashers,
302            key_ty,
303            value_ty,
304        } => {
305            for hasher in hashers {
306                // Cloning the hasher should essentially be a copy.
307                bytes = concat_and_hash2(&bytes, &[*hasher as u8; HASH_LEN]);
308            }
309            concat_and_hash3(
310                &bytes,
311                &get_type_hash(registry, *key_ty),
312                &get_type_hash(registry, *value_ty),
313            )
314        }
315    }
316}
317
318fn get_custom_metadata_hash(custom_metadata: &CustomMetadata) -> Hash {
319    custom_metadata
320        .iter()
321        .fold([0u8; HASH_LEN], |bytes, custom_value| {
322            xor(bytes, get_custom_value_hash(&custom_value))
323        })
324}
325
326/// Obtain the hash of some custom value in the metadata including it's name/key.
327///
328/// If the `custom_value` has a type id that is not present in the metadata,
329/// only the name and bytes are used for hashing.
330pub fn get_custom_value_hash(custom_value: &CustomValueMetadata) -> Hash {
331    let name_hash = hash(custom_value.name.as_bytes());
332    if custom_value.types.resolve(custom_value.type_id()).is_none() {
333        hash(&name_hash)
334    } else {
335        concat_and_hash2(
336            &name_hash,
337            &get_type_hash(custom_value.types, custom_value.type_id()),
338        )
339    }
340}
341
342/// Obtain the hash for a specific storage item, or an error if it's not found.
343pub fn get_storage_hash(pallet: &PalletMetadata, entry_name: &str) -> Option<Hash> {
344    let storage = pallet.storage()?;
345    let entry = storage.entry_by_name(entry_name)?;
346    let hash = get_storage_entry_hash(pallet.types, entry);
347    Some(hash)
348}
349
350/// Obtain the hash for a specific constant, or an error if it's not found.
351pub fn get_constant_hash(pallet: &PalletMetadata, constant_name: &str) -> Option<Hash> {
352    let constant = pallet.constant_by_name(constant_name)?;
353
354    // We only need to check that the type of the constant asked for matches.
355    let bytes = get_type_hash(pallet.types, constant.ty);
356    Some(bytes)
357}
358
359/// Obtain the hash for a specific call, or an error if it's not found.
360pub fn get_call_hash(pallet: &PalletMetadata, call_name: &str) -> Option<Hash> {
361    let call_variant = pallet.call_variant_by_name(call_name)?;
362
363    // hash the specific variant representing the call we are interested in.
364    let hash = get_variant_hash(pallet.types, call_variant, &mut HashMap::new());
365    Some(hash)
366}
367
368/// Obtain the hash of a specific runtime API method, or an error if it's not found.
369pub fn get_runtime_api_hash(runtime_api: &RuntimeApiMethodMetadata) -> Hash {
370    let registry = runtime_api.types;
371
372    // The trait name is part of the runtime API call that is being
373    // generated for this method. Therefore the trait name is strongly
374    // connected to the method in the same way as a parameter is
375    // to the method.
376    let mut bytes = concat_and_hash2(
377        &hash(runtime_api.trait_name.as_bytes()),
378        &hash(runtime_api.name().as_bytes()),
379    );
380
381    for input in runtime_api.inputs() {
382        bytes = concat_and_hash3(
383            &bytes,
384            &hash(input.name.as_bytes()),
385            &get_type_hash(registry, input.ty),
386        );
387    }
388
389    bytes = concat_and_hash2(&bytes, &get_type_hash(registry, runtime_api.output_ty()));
390
391    bytes
392}
393
394/// Obtain the hash of all of a runtime API trait, including all of its methods.
395pub fn get_runtime_apis_hash(trait_metadata: RuntimeApiMetadata) -> Hash {
396    // Each API is already hashed considering the trait name, so we don't need
397    // to consider thr trait name again here.
398    trait_metadata
399        .methods()
400        .fold([0u8; HASH_LEN], |bytes, method_metadata| {
401            // We don't care what order the trait methods exist in, and want the hash to
402            // be identical regardless. For this, we can just XOR the hashes for each method
403            // together; we'll get the same output whichever order they are XOR'd together in,
404            // so long as each individual method is the same.
405            xor(bytes, get_runtime_api_hash(&method_metadata))
406        })
407}
408
409/// Obtain the hash of a specific view function, or an error if it's not found.
410pub fn get_view_function_hash(view_function: &ViewFunctionMetadata) -> Hash {
411    let registry = view_function.types;
412
413    // The Query ID is `twox_128(pallet_name) ++ twox_128("fn_name(fnarg_types) -> return_ty")`.
414    let mut bytes = *view_function.query_id();
415
416    // This only takes type _names_ into account, so we beef this up by combining with actual
417    // type hashes, in a similar approach to runtime APIs..
418    for input in view_function.inputs() {
419        bytes = concat_and_hash3(
420            &bytes,
421            &hash(input.name.as_bytes()),
422            &get_type_hash(registry, input.ty),
423        );
424    }
425
426    bytes = concat_and_hash2(&bytes, &get_type_hash(registry, view_function.output_ty()));
427
428    bytes
429}
430
431/// Obtain the hash of all of the view functions in a pallet, including all of its methods.
432fn get_pallet_view_functions_hash(pallet_metadata: &PalletMetadata) -> Hash {
433    // Each API is already hashed considering the trait name, so we don't need
434    // to consider thr trait name again here.
435    pallet_metadata
436        .view_functions()
437        .fold([0u8; HASH_LEN], |bytes, method_metadata| {
438            // We don't care what order the view functions are declared in, and want the hash to
439            // be identical regardless. For this, we can just XOR the hashes for each method
440            // together; we'll get the same output whichever order they are XOR'd together in,
441            // so long as each individual method is the same.
442            xor(bytes, get_view_function_hash(&method_metadata))
443        })
444}
445
446/// Obtain the hash representation of a `frame_metadata::v15::PalletMetadata`.
447pub fn get_pallet_hash(pallet: PalletMetadata) -> Hash {
448    let registry = pallet.types;
449
450    let call_bytes = match pallet.call_ty_id() {
451        Some(calls) => get_type_hash(registry, calls),
452        None => [0u8; HASH_LEN],
453    };
454    let event_bytes = match pallet.event_ty_id() {
455        Some(event) => get_type_hash(registry, event),
456        None => [0u8; HASH_LEN],
457    };
458    let error_bytes = match pallet.error_ty_id() {
459        Some(error) => get_type_hash(registry, error),
460        None => [0u8; HASH_LEN],
461    };
462    let constant_bytes = pallet.constants().fold([0u8; HASH_LEN], |bytes, constant| {
463        // We don't care what order the constants occur in, so XOR together the combinations
464        // of (constantName, constantType) to make the order we see them irrelevant.
465        let constant_hash = concat_and_hash2(
466            &hash(constant.name.as_bytes()),
467            &get_type_hash(registry, constant.ty()),
468        );
469        xor(bytes, constant_hash)
470    });
471    let storage_bytes = match pallet.storage() {
472        Some(storage) => {
473            let prefix_hash = hash(storage.prefix().as_bytes());
474            let entries_hash = storage
475                .entries()
476                .iter()
477                .fold([0u8; HASH_LEN], |bytes, entry| {
478                    // We don't care what order the storage entries occur in, so XOR them together
479                    // to make the order irrelevant.
480                    xor(bytes, get_storage_entry_hash(registry, entry))
481                });
482            concat_and_hash2(&prefix_hash, &entries_hash)
483        }
484        None => [0u8; HASH_LEN],
485    };
486    let view_functions_bytes = get_pallet_view_functions_hash(&pallet);
487
488    // Hash all of the above together:
489    concat_and_hash6(
490        &call_bytes,
491        &event_bytes,
492        &error_bytes,
493        &constant_bytes,
494        &storage_bytes,
495        &view_functions_bytes,
496    )
497}
498
499/// Obtain a hash representation of our metadata or some part of it.
500/// This is obtained by calling [`crate::Metadata::hasher()`].
501pub struct MetadataHasher<'a> {
502    metadata: &'a Metadata,
503    specific_pallets: Option<Vec<&'a str>>,
504    specific_runtime_apis: Option<Vec<&'a str>>,
505    include_custom_values: bool,
506}
507
508impl<'a> MetadataHasher<'a> {
509    /// Create a new [`MetadataHasher`]
510    pub(crate) fn new(metadata: &'a Metadata) -> Self {
511        Self {
512            metadata,
513            specific_pallets: None,
514            specific_runtime_apis: None,
515            include_custom_values: true,
516        }
517    }
518
519    /// Only hash the provided pallets instead of hashing every pallet.
520    pub fn only_these_pallets<S: AsRef<str>>(&mut self, specific_pallets: &'a [S]) -> &mut Self {
521        self.specific_pallets = Some(specific_pallets.iter().map(|n| n.as_ref()).collect());
522        self
523    }
524
525    /// Only hash the provided runtime APIs instead of hashing every runtime API
526    pub fn only_these_runtime_apis<S: AsRef<str>>(
527        &mut self,
528        specific_runtime_apis: &'a [S],
529    ) -> &mut Self {
530        self.specific_runtime_apis =
531            Some(specific_runtime_apis.iter().map(|n| n.as_ref()).collect());
532        self
533    }
534
535    /// Do not hash the custom values
536    pub fn ignore_custom_values(&mut self) -> &mut Self {
537        self.include_custom_values = false;
538        self
539    }
540
541    /// Hash the given metadata.
542    pub fn hash(&self) -> Hash {
543        let metadata = self.metadata;
544
545        let pallet_hash = metadata.pallets().fold([0u8; HASH_LEN], |bytes, pallet| {
546            // If specific pallets are given, only include this pallet if it is in the specific pallets.
547            let should_hash = self
548                .specific_pallets
549                .as_ref()
550                .map(|specific_pallets| specific_pallets.contains(&pallet.name()))
551                .unwrap_or(true);
552            // We don't care what order the pallets are seen in, so XOR their
553            // hashes together to be order independent.
554            if should_hash {
555                xor(bytes, get_pallet_hash(pallet))
556            } else {
557                bytes
558            }
559        });
560
561        let apis_hash = metadata
562            .runtime_api_traits()
563            .fold([0u8; HASH_LEN], |bytes, api| {
564                // If specific runtime APIs are given, only include this pallet if it is in the specific runtime APIs.
565                let should_hash = self
566                    .specific_runtime_apis
567                    .as_ref()
568                    .map(|specific_runtime_apis| specific_runtime_apis.contains(&api.name()))
569                    .unwrap_or(true);
570                // We don't care what order the runtime APIs are seen in, so XOR their
571                // hashes together to be order independent.
572                if should_hash {
573                    xor(bytes, get_runtime_apis_hash(api))
574                } else {
575                    bytes
576                }
577            });
578
579        let outer_enums_hash = concat_and_hash3(
580            &get_type_hash(&metadata.types, metadata.outer_enums.call_enum_ty),
581            &get_type_hash(&metadata.types, metadata.outer_enums.event_enum_ty),
582            &get_type_hash(&metadata.types, metadata.outer_enums.error_enum_ty),
583        );
584
585        let extrinsic_hash = get_extrinsic_hash(&metadata.types, &metadata.extrinsic);
586
587        let custom_values_hash = self
588            .include_custom_values
589            .then(|| get_custom_metadata_hash(&metadata.custom()))
590            .unwrap_or_default();
591
592        concat_and_hash5(
593            &pallet_hash,
594            &apis_hash,
595            &outer_enums_hash,
596            &extrinsic_hash,
597            &custom_values_hash,
598        )
599    }
600}
601
602#[cfg(test)]
603mod tests {
604    use super::*;
605    use bitvec::{order::Lsb0, vec::BitVec};
606    use frame_metadata::v15;
607    use scale_info::{Registry, meta_type};
608
609    // Define recursive types.
610    #[allow(dead_code)]
611    #[derive(scale_info::TypeInfo)]
612    struct A {
613        pub b: Box<B>,
614    }
615
616    #[allow(dead_code)]
617    #[derive(scale_info::TypeInfo)]
618    struct B {
619        pub a: Box<A>,
620    }
621
622    // Define TypeDef supported types.
623    #[allow(dead_code)]
624    #[derive(scale_info::TypeInfo)]
625    // TypeDef::Composite with TypeDef::Array with Typedef::Primitive.
626    struct AccountId32(Hash);
627
628    #[allow(dead_code)]
629    #[derive(scale_info::TypeInfo)]
630    // TypeDef::Variant.
631    enum DigestItem {
632        PreRuntime(
633            // TypeDef::Array with primitive.
634            [::core::primitive::u8; 4usize],
635            // TypeDef::Sequence.
636            ::std::vec::Vec<::core::primitive::u8>,
637        ),
638        Other(::std::vec::Vec<::core::primitive::u8>),
639        // Nested TypeDef::Tuple.
640        RuntimeEnvironmentUpdated(((i8, i16), (u32, u64))),
641        // TypeDef::Compact.
642        Index(#[codec(compact)] ::core::primitive::u8),
643        // TypeDef::BitSequence.
644        BitSeq(BitVec<u8, Lsb0>),
645    }
646
647    #[allow(dead_code)]
648    #[derive(scale_info::TypeInfo)]
649    // Ensure recursive types and TypeDef variants are captured.
650    struct MetadataTestType {
651        recursive: A,
652        composite: AccountId32,
653        type_def: DigestItem,
654    }
655
656    #[allow(dead_code)]
657    #[derive(scale_info::TypeInfo)]
658    // Simulate a PalletCallMetadata.
659    enum Call {
660        #[codec(index = 0)]
661        FillBlock { ratio: AccountId32 },
662        #[codec(index = 1)]
663        Remark { remark: DigestItem },
664    }
665
666    fn build_default_extrinsic() -> v15::ExtrinsicMetadata {
667        v15::ExtrinsicMetadata {
668            version: 0,
669            signed_extensions: vec![],
670            address_ty: meta_type::<()>(),
671            call_ty: meta_type::<()>(),
672            signature_ty: meta_type::<()>(),
673            extra_ty: meta_type::<()>(),
674        }
675    }
676
677    fn default_pallet() -> v15::PalletMetadata {
678        v15::PalletMetadata {
679            name: "Test",
680            storage: None,
681            calls: None,
682            event: None,
683            constants: vec![],
684            error: None,
685            index: 0,
686            docs: vec![],
687        }
688    }
689
690    fn build_default_pallets() -> Vec<v15::PalletMetadata> {
691        vec![
692            v15::PalletMetadata {
693                name: "First",
694                calls: Some(v15::PalletCallMetadata {
695                    ty: meta_type::<MetadataTestType>(),
696                }),
697                ..default_pallet()
698            },
699            v15::PalletMetadata {
700                name: "Second",
701                index: 1,
702                calls: Some(v15::PalletCallMetadata {
703                    ty: meta_type::<(DigestItem, AccountId32, A)>(),
704                }),
705                ..default_pallet()
706            },
707        ]
708    }
709
710    fn pallets_to_metadata(pallets: Vec<v15::PalletMetadata>) -> Metadata {
711        v15::RuntimeMetadataV15::new(
712            pallets,
713            build_default_extrinsic(),
714            meta_type::<()>(),
715            vec![],
716            v15::OuterEnums {
717                call_enum_ty: meta_type::<()>(),
718                event_enum_ty: meta_type::<()>(),
719                error_enum_ty: meta_type::<()>(),
720            },
721            v15::CustomMetadata {
722                map: Default::default(),
723            },
724        )
725        .try_into()
726        .expect("can build valid metadata")
727    }
728
729    #[test]
730    fn different_pallet_index() {
731        let pallets = build_default_pallets();
732        let mut pallets_swap = pallets.clone();
733
734        let metadata = pallets_to_metadata(pallets);
735
736        // Change the order in which pallets are registered.
737        pallets_swap.swap(0, 1);
738        pallets_swap[0].index = 0;
739        pallets_swap[1].index = 1;
740        let metadata_swap = pallets_to_metadata(pallets_swap);
741
742        let hash = MetadataHasher::new(&metadata).hash();
743        let hash_swap = MetadataHasher::new(&metadata_swap).hash();
744
745        // Changing pallet order must still result in a deterministic unique hash.
746        assert_eq!(hash, hash_swap);
747    }
748
749    #[test]
750    fn recursive_type() {
751        let mut pallet = default_pallet();
752        pallet.calls = Some(v15::PalletCallMetadata {
753            ty: meta_type::<A>(),
754        });
755        let metadata = pallets_to_metadata(vec![pallet]);
756
757        // Check hashing algorithm finishes on a recursive type.
758        MetadataHasher::new(&metadata).hash();
759    }
760
761    #[test]
762    /// Ensure correctness of hashing when parsing the `metadata.types`.
763    ///
764    /// Having a recursive structure `A: { B }` and `B: { A }` registered in different order
765    /// `types: { { id: 0, A }, { id: 1, B } }` and `types: { { id: 0, B }, { id: 1, A } }`
766    /// must produce the same deterministic hashing value.
767    fn recursive_types_different_order() {
768        let mut pallets = build_default_pallets();
769        pallets[0].calls = Some(v15::PalletCallMetadata {
770            ty: meta_type::<A>(),
771        });
772        pallets[1].calls = Some(v15::PalletCallMetadata {
773            ty: meta_type::<B>(),
774        });
775        pallets[1].index = 1;
776        let mut pallets_swap = pallets.clone();
777        let metadata = pallets_to_metadata(pallets);
778
779        pallets_swap.swap(0, 1);
780        pallets_swap[0].index = 0;
781        pallets_swap[1].index = 1;
782        let metadata_swap = pallets_to_metadata(pallets_swap);
783
784        let hash = MetadataHasher::new(&metadata).hash();
785        let hash_swap = MetadataHasher::new(&metadata_swap).hash();
786
787        // Changing pallet order must still result in a deterministic unique hash.
788        assert_eq!(hash, hash_swap);
789    }
790
791    #[allow(dead_code)]
792    #[derive(scale_info::TypeInfo)]
793    struct Aba {
794        ab: (A, B),
795        other: A,
796    }
797
798    #[allow(dead_code)]
799    #[derive(scale_info::TypeInfo)]
800    struct Abb {
801        ab: (A, B),
802        other: B,
803    }
804
805    #[test]
806    /// Ensure ABB and ABA have a different structure:
807    fn do_not_reuse_visited_type_ids() {
808        let metadata_hash_with_type = |ty| {
809            let mut pallets = build_default_pallets();
810            pallets[0].calls = Some(v15::PalletCallMetadata { ty });
811            let metadata = pallets_to_metadata(pallets);
812            MetadataHasher::new(&metadata).hash()
813        };
814
815        let aba_hash = metadata_hash_with_type(meta_type::<Aba>());
816        let abb_hash = metadata_hash_with_type(meta_type::<Abb>());
817
818        assert_ne!(aba_hash, abb_hash);
819    }
820
821    #[test]
822    fn hash_cache_gets_filled_with_correct_hashes() {
823        let mut registry = Registry::new();
824        let a_type_id = registry.register_type(&meta_type::<A>()).id;
825        let b_type_id = registry.register_type(&meta_type::<B>()).id;
826        let registry: PortableRegistry = registry.into();
827
828        let mut cache = HashMap::new();
829
830        let a_hash = get_type_hash_recurse(&registry, a_type_id, &mut cache);
831        let a_hash2 = get_type_hash_recurse(&registry, a_type_id, &mut cache);
832        let b_hash = get_type_hash_recurse(&registry, b_type_id, &mut cache);
833
834        let CachedHash::Hash(a_cache_hash) = cache[&a_type_id] else {
835            panic!()
836        };
837        let CachedHash::Hash(b_cache_hash) = cache[&b_type_id] else {
838            panic!()
839        };
840
841        assert_eq!(a_hash, a_cache_hash);
842        assert_eq!(b_hash, b_cache_hash);
843
844        assert_eq!(a_hash, a_hash2);
845        assert_ne!(a_hash, b_hash);
846    }
847
848    #[test]
849    // Redundant clone clippy warning is a lie; https://github.com/rust-lang/rust-clippy/issues/10870
850    #[allow(clippy::redundant_clone)]
851    fn pallet_hash_correctness() {
852        let compare_pallets_hash = |lhs: &v15::PalletMetadata, rhs: &v15::PalletMetadata| {
853            let metadata = pallets_to_metadata(vec![lhs.clone()]);
854            let hash = MetadataHasher::new(&metadata).hash();
855
856            let metadata = pallets_to_metadata(vec![rhs.clone()]);
857            let new_hash = MetadataHasher::new(&metadata).hash();
858
859            assert_ne!(hash, new_hash);
860        };
861
862        // Build metadata progressively from an empty pallet to a fully populated pallet.
863        let mut pallet = default_pallet();
864        let pallet_lhs = pallet.clone();
865        pallet.storage = Some(v15::PalletStorageMetadata {
866            prefix: "Storage",
867            entries: vec![v15::StorageEntryMetadata {
868                name: "BlockWeight",
869                modifier: v15::StorageEntryModifier::Default,
870                ty: v15::StorageEntryType::Plain(meta_type::<u8>()),
871                default: vec![],
872                docs: vec![],
873            }],
874        });
875        compare_pallets_hash(&pallet_lhs, &pallet);
876
877        let pallet_lhs = pallet.clone();
878        // Calls are similar to:
879        //
880        // ```
881        // pub enum Call {
882        //     call_name_01 { arg01: type },
883        //     call_name_02 { arg01: type, arg02: type }
884        // }
885        // ```
886        pallet.calls = Some(v15::PalletCallMetadata {
887            ty: meta_type::<Call>(),
888        });
889        compare_pallets_hash(&pallet_lhs, &pallet);
890
891        let pallet_lhs = pallet.clone();
892        // Events are similar to Calls.
893        pallet.event = Some(v15::PalletEventMetadata {
894            ty: meta_type::<Call>(),
895        });
896        compare_pallets_hash(&pallet_lhs, &pallet);
897
898        let pallet_lhs = pallet.clone();
899        pallet.constants = vec![v15::PalletConstantMetadata {
900            name: "BlockHashCount",
901            ty: meta_type::<u64>(),
902            value: vec![96u8, 0, 0, 0],
903            docs: vec![],
904        }];
905        compare_pallets_hash(&pallet_lhs, &pallet);
906
907        let pallet_lhs = pallet.clone();
908        pallet.error = Some(v15::PalletErrorMetadata {
909            ty: meta_type::<MetadataTestType>(),
910        });
911        compare_pallets_hash(&pallet_lhs, &pallet);
912    }
913
914    #[test]
915    fn metadata_per_pallet_hash_correctness() {
916        let pallets = build_default_pallets();
917
918        // Build metadata with just the first pallet.
919        let metadata_one = pallets_to_metadata(vec![pallets[0].clone()]);
920        // Build metadata with both pallets.
921        let metadata_both = pallets_to_metadata(pallets);
922
923        // Hashing will ignore any non-existant pallet and return the same result.
924        let hash = MetadataHasher::new(&metadata_one)
925            .only_these_pallets(&["First", "Second"])
926            .hash();
927        let hash_rhs = MetadataHasher::new(&metadata_one)
928            .only_these_pallets(&["First"])
929            .hash();
930        assert_eq!(hash, hash_rhs, "hashing should ignore non-existant pallets");
931
932        // Hashing one pallet from metadata with 2 pallets inserted will ignore the second pallet.
933        let hash_second = MetadataHasher::new(&metadata_both)
934            .only_these_pallets(&["First"])
935            .hash();
936        assert_eq!(
937            hash_second, hash,
938            "hashing one pallet should ignore the others"
939        );
940
941        // Check hashing with all pallets.
942        let hash_second = MetadataHasher::new(&metadata_both)
943            .only_these_pallets(&["First", "Second"])
944            .hash();
945        assert_ne!(
946            hash_second, hash,
947            "hashing both pallets should produce a different result from hashing just one pallet"
948        );
949    }
950
951    #[test]
952    fn field_semantic_changes() {
953        // Get a hash representation of the provided meta type,
954        // inserted in the context of pallet metadata call.
955        let to_hash = |meta_ty| {
956            let pallet = v15::PalletMetadata {
957                calls: Some(v15::PalletCallMetadata { ty: meta_ty }),
958                ..default_pallet()
959            };
960            let metadata = pallets_to_metadata(vec![pallet]);
961            MetadataHasher::new(&metadata).hash()
962        };
963
964        #[allow(dead_code)]
965        #[derive(scale_info::TypeInfo)]
966        enum EnumA1 {
967            First { hi: u8, bye: String },
968            Second(u32),
969            Third,
970        }
971        #[allow(dead_code)]
972        #[derive(scale_info::TypeInfo)]
973        enum EnumA2 {
974            Second(u32),
975            Third,
976            First { bye: String, hi: u8 },
977        }
978
979        // EncodeAsType and DecodeAsType only care about enum variant names
980        // and not indexes or field ordering or the enum name itself..
981        assert_eq!(
982            to_hash(meta_type::<EnumA1>()),
983            to_hash(meta_type::<EnumA2>())
984        );
985
986        #[allow(dead_code)]
987        #[derive(scale_info::TypeInfo)]
988        struct StructB1 {
989            hello: bool,
990            another: [u8; 32],
991        }
992        #[allow(dead_code)]
993        #[derive(scale_info::TypeInfo)]
994        struct StructB2 {
995            another: [u8; 32],
996            hello: bool,
997        }
998
999        // As with enums, struct names and field orders are irrelevant as long as
1000        // the field names and types are the same.
1001        assert_eq!(
1002            to_hash(meta_type::<StructB1>()),
1003            to_hash(meta_type::<StructB2>())
1004        );
1005
1006        #[allow(dead_code)]
1007        #[derive(scale_info::TypeInfo)]
1008        enum EnumC1 {
1009            First(u8),
1010        }
1011        #[allow(dead_code)]
1012        #[derive(scale_info::TypeInfo)]
1013        enum EnumC2 {
1014            Second(u8),
1015        }
1016
1017        // The enums are binary compatible, but the variants have different names, so
1018        // semantically they are different and should not be equal.
1019        assert_ne!(
1020            to_hash(meta_type::<EnumC1>()),
1021            to_hash(meta_type::<EnumC2>())
1022        );
1023
1024        #[allow(dead_code)]
1025        #[derive(scale_info::TypeInfo)]
1026        enum EnumD1 {
1027            First { a: u8 },
1028        }
1029        #[allow(dead_code)]
1030        #[derive(scale_info::TypeInfo)]
1031        enum EnumD2 {
1032            First { b: u8 },
1033        }
1034
1035        // Named fields contain a different semantic meaning ('a' and 'b')  despite
1036        // being binary compatible, so hashes should be different.
1037        assert_ne!(
1038            to_hash(meta_type::<EnumD1>()),
1039            to_hash(meta_type::<EnumD2>())
1040        );
1041
1042        #[allow(dead_code)]
1043        #[derive(scale_info::TypeInfo)]
1044        struct StructE1 {
1045            a: u32,
1046        }
1047        #[allow(dead_code)]
1048        #[derive(scale_info::TypeInfo)]
1049        struct StructE2 {
1050            b: u32,
1051        }
1052
1053        // Similar to enums, struct fields that contain a different semantic meaning
1054        // ('a' and 'b') despite being binary compatible will have different hashes.
1055        assert_ne!(
1056            to_hash(meta_type::<StructE1>()),
1057            to_hash(meta_type::<StructE2>())
1058        );
1059    }
1060
1061    use frame_metadata::v15::{
1062        PalletEventMetadata, PalletStorageMetadata, StorageEntryMetadata, StorageEntryModifier,
1063    };
1064
1065    fn metadata_with_pallet_events() -> v15::RuntimeMetadataV15 {
1066        #[allow(dead_code)]
1067        #[derive(scale_info::TypeInfo)]
1068        struct FirstEvent {
1069            s: String,
1070        }
1071
1072        #[allow(dead_code)]
1073        #[derive(scale_info::TypeInfo)]
1074        struct SecondEvent {
1075            n: u8,
1076        }
1077
1078        #[allow(dead_code)]
1079        #[derive(scale_info::TypeInfo)]
1080        enum Events {
1081            First(FirstEvent),
1082            Second(SecondEvent),
1083        }
1084
1085        #[allow(dead_code)]
1086        #[derive(scale_info::TypeInfo)]
1087        enum Errors {
1088            First(DispatchError),
1089            Second(DispatchError),
1090        }
1091
1092        #[allow(dead_code)]
1093        #[derive(scale_info::TypeInfo)]
1094        enum Calls {
1095            First(u8),
1096            Second(u8),
1097        }
1098
1099        #[allow(dead_code)]
1100        enum DispatchError {
1101            A,
1102            B,
1103            C,
1104        }
1105
1106        impl scale_info::TypeInfo for DispatchError {
1107            type Identity = DispatchError;
1108
1109            fn type_info() -> scale_info::Type {
1110                scale_info::Type {
1111                    path: scale_info::Path {
1112                        segments: vec!["sp_runtime", "DispatchError"],
1113                    },
1114                    type_params: vec![],
1115                    type_def: TypeDef::Variant(TypeDefVariant { variants: vec![] }),
1116                    docs: vec![],
1117                }
1118            }
1119        }
1120
1121        let pallets = vec![
1122            v15::PalletMetadata {
1123                name: "First",
1124                index: 0,
1125                calls: Some(v15::PalletCallMetadata {
1126                    ty: meta_type::<u8>(),
1127                }),
1128                storage: Some(PalletStorageMetadata {
1129                    prefix: "___",
1130                    entries: vec![StorageEntryMetadata {
1131                        name: "Hello",
1132                        modifier: StorageEntryModifier::Optional,
1133                        // Note: This is the important part here:
1134                        // The Events type will be trimmed down and this trimming needs to be reflected
1135                        // when the hash of this storage item is computed.
1136                        ty: frame_metadata::v14::StorageEntryType::Plain(meta_type::<Vec<Events>>()),
1137                        default: vec![],
1138                        docs: vec![],
1139                    }],
1140                }),
1141                event: Some(PalletEventMetadata {
1142                    ty: meta_type::<FirstEvent>(),
1143                }),
1144                constants: vec![],
1145                error: None,
1146                docs: vec![],
1147            },
1148            v15::PalletMetadata {
1149                name: "Second",
1150                index: 1,
1151                calls: Some(v15::PalletCallMetadata {
1152                    ty: meta_type::<u64>(),
1153                }),
1154                storage: None,
1155                event: Some(PalletEventMetadata {
1156                    ty: meta_type::<SecondEvent>(),
1157                }),
1158                constants: vec![],
1159                error: None,
1160                docs: vec![],
1161            },
1162        ];
1163
1164        v15::RuntimeMetadataV15::new(
1165            pallets,
1166            build_default_extrinsic(),
1167            meta_type::<()>(),
1168            vec![],
1169            v15::OuterEnums {
1170                call_enum_ty: meta_type::<Calls>(),
1171                event_enum_ty: meta_type::<Events>(),
1172                error_enum_ty: meta_type::<Errors>(),
1173            },
1174            v15::CustomMetadata {
1175                map: Default::default(),
1176            },
1177        )
1178    }
1179
1180    #[test]
1181    fn hash_comparison_trimmed_metadata() {
1182        use subxt_utils_stripmetadata::StripMetadata;
1183
1184        // trim the metadata:
1185        let metadata = metadata_with_pallet_events();
1186        let trimmed_metadata = {
1187            let mut m = metadata.clone();
1188            m.strip_metadata(|e| e == "First", |_| true);
1189            m
1190        };
1191
1192        // Now convert it into our inner repr:
1193        let metadata = Metadata::try_from(metadata).unwrap();
1194        let trimmed_metadata = Metadata::try_from(trimmed_metadata).unwrap();
1195
1196        // test that the hashes are the same:
1197        let hash = MetadataHasher::new(&metadata)
1198            .only_these_pallets(&["First"])
1199            .hash();
1200        let hash_trimmed = MetadataHasher::new(&trimmed_metadata).hash();
1201
1202        assert_eq!(hash, hash_trimmed);
1203    }
1204}