#![forbid(unsafe_code)]
#![warn(clippy::cast_possible_truncation)]
extern crate snarkvm_console as console;
#[macro_use]
extern crate tracing;
pub use snarkvm_ledger_authority as authority;
pub use snarkvm_ledger_block as block;
pub use snarkvm_ledger_committee as committee;
pub use snarkvm_ledger_narwhal as narwhal;
pub use snarkvm_ledger_puzzle as puzzle;
pub use snarkvm_ledger_query as query;
pub use snarkvm_ledger_store as store;
#[cfg(any(test, feature = "test-helpers"))]
pub mod test_helpers;
mod error;
pub use error::*;
mod helpers;
pub use helpers::*;
pub use crate::block::*;
mod check_next_block;
pub use check_next_block::{CheckBlockError, PendingBlock};
mod advance;
mod check_transaction_basic;
mod contains;
mod find;
mod get;
mod is_solution_limit_reached;
mod iterators;
#[cfg(test)]
mod tests;
use console::{
account::{Address, GraphKey, PrivateKey, ViewKey},
network::prelude::*,
program::{Ciphertext, Entry, Identifier, Literal, Plaintext, ProgramID, Record, StatePath, Value},
types::{Field, Group},
};
use snarkvm_ledger_authority::Authority;
use snarkvm_ledger_committee::Committee;
use snarkvm_ledger_narwhal::{BatchCertificate, Subdag, Transmission, TransmissionID};
use snarkvm_ledger_puzzle::{Puzzle, PuzzleSolutions, Solution, SolutionID};
use snarkvm_ledger_query::QueryTrait;
use snarkvm_ledger_store::{ConsensusStorage, ConsensusStore};
use snarkvm_synthesizer::{
program::{FinalizeGlobalState, Program},
vm::VM,
};
use aleo_std::{
StorageMode,
prelude::{finish, lap, timer},
};
use anyhow::{Context, Result};
use core::ops::Range;
use indexmap::IndexMap;
#[cfg(feature = "locktick")]
use locktick::parking_lot::{Mutex, RwLock};
use lru::LruCache;
#[cfg(not(feature = "locktick"))]
use parking_lot::{Mutex, RwLock};
use rand::{prelude::IteratorRandom, rngs::OsRng};
use std::{borrow::Cow, collections::HashSet, sync::Arc};
use time::OffsetDateTime;
#[cfg(not(feature = "serial"))]
use rayon::prelude::*;
pub type RecordMap<N> = IndexMap<Field<N>, Record<N, Plaintext<N>>>;
const COMMITTEE_CACHE_SIZE: usize = 16;
#[derive(Copy, Clone, Debug)]
pub enum RecordsFilter<N: Network> {
All,
Spent,
Unspent,
SlowSpent(PrivateKey<N>),
SlowUnspent(PrivateKey<N>),
}
#[derive(Clone)]
pub struct Ledger<N: Network, C: ConsensusStorage<N>>(Arc<InnerLedger<N, C>>);
impl<N: Network, C: ConsensusStorage<N>> Deref for Ledger<N, C> {
type Target = InnerLedger<N, C>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc(hidden)]
pub struct InnerLedger<N: Network, C: ConsensusStorage<N>> {
vm: VM<N, C>,
genesis_block: Block<N>,
current_epoch_hash: RwLock<Option<N::BlockHash>>,
current_committee: RwLock<Option<Committee<N>>>,
current_block: RwLock<Block<N>>,
committee_cache: Mutex<LruCache<u64, Committee<N>>>,
epoch_provers_cache: Arc<RwLock<IndexMap<Address<N>, u32>>>,
}
impl<N: Network, C: ConsensusStorage<N>> Ledger<N, C> {
pub fn load(genesis_block: Block<N>, storage_mode: StorageMode) -> Result<Self> {
let timer = timer!("Ledger::load");
let genesis_hash = genesis_block.hash();
let ledger = Self::load_unchecked(genesis_block, storage_mode)?;
if !ledger.contains_block_hash(&genesis_hash)? {
bail!("Incorrect genesis block (run 'snarkos clean' and try again)")
}
const NUM_BLOCKS: usize = 10;
let latest_height = ledger.current_block.read().height();
debug_assert_eq!(latest_height, ledger.vm.block_store().max_height().unwrap(), "Mismatch in latest height");
let block_heights: Vec<u32> =
(0..=latest_height).choose_multiple(&mut OsRng, (latest_height as usize).min(NUM_BLOCKS));
cfg_into_iter!(block_heights).try_for_each(|height| {
ledger.get_block(height)?;
Ok::<_, Error>(())
})?;
lap!(timer, "Check existence of {NUM_BLOCKS} random blocks");
finish!(timer);
Ok(ledger)
}
pub fn load_unchecked(genesis_block: Block<N>, storage_mode: StorageMode) -> Result<Self> {
let timer = timer!("Ledger::load_unchecked");
info!("Loading the ledger from storage...");
let store = match ConsensusStore::<N, C>::open(storage_mode) {
Ok(store) => store,
Err(e) => bail!("Failed to load ledger (run 'snarkos clean' and try again)\n\n{e}\n"),
};
lap!(timer, "Load consensus store");
let vm = VM::from(store)?;
lap!(timer, "Initialize a new VM");
let current_committee = vm.finalize_store().committee_store().current_committee().ok();
let committee_cache = Mutex::new(LruCache::new(COMMITTEE_CACHE_SIZE.try_into().unwrap()));
let ledger = Self(Arc::new(InnerLedger {
vm,
genesis_block: genesis_block.clone(),
current_epoch_hash: Default::default(),
current_committee: RwLock::new(current_committee),
current_block: RwLock::new(genesis_block.clone()),
committee_cache,
epoch_provers_cache: Default::default(),
}));
let max_stored_height = ledger.vm.block_store().max_height();
let latest_height = if let Some(max_height) = max_stored_height {
max_height
} else {
ledger.advance_to_next_block(&genesis_block)?;
0
};
lap!(timer, "Initialize genesis");
ensure!(
latest_height == ledger.vm().block_store().current_block_height(),
"The stored height is different than the one in the block tree; \
please ensure that the cached block tree is valid or delete the \
'block_tree' file from the ledger folder"
);
let tree_root = <N::StateRoot>::from(ledger.vm().block_store().get_block_tree_root());
let state_root = ledger
.vm()
.block_store()
.get_state_root(latest_height)?
.ok_or_else(|| anyhow!("Missing state root in the storage"))?;
ensure!(
tree_root == state_root,
"The stored state root is different than the one in the block tree;
please ensure that the cached block tree is valid or delete the \
'block_tree' file from the ledger folder"
);
let block = ledger
.get_block(latest_height)
.with_context(|| format!("Failed to load block {latest_height} from the ledger"))?;
*ledger.current_block.write() = block;
*ledger.current_committee.write() = Some(ledger.latest_committee()?);
*ledger.current_epoch_hash.write() = Some(ledger.get_epoch_hash(latest_height)?);
*ledger.epoch_provers_cache.write() = ledger.load_epoch_provers();
finish!(timer, "Initialize ledger");
Ok(ledger)
}
}
impl<N: Network, C: ConsensusStorage<N>> Ledger<N, C> {
#[cfg(feature = "rocks")]
pub fn backup_database<P: AsRef<std::path::Path>>(&self, path: P) -> Result<()> {
self.vm.block_store().backup_database(path).map_err(|err| anyhow!(err))
}
#[cfg(feature = "rocks")]
pub fn cache_block_tree(&self) -> Result<()> {
self.vm.block_store().cache_block_tree()
}
pub fn load_epoch_provers(&self) -> IndexMap<Address<N>, u32> {
let current_block_height = self.vm().block_store().current_block_height();
let next_block_height = current_block_height.saturating_add(1);
let start = next_block_height.saturating_sub(current_block_height % N::NUM_BLOCKS_PER_EPOCH);
if start > current_block_height {
return IndexMap::new();
}
let existing_epoch_blocks: Vec<_> = (start..=current_block_height).collect();
let solution_addresses = cfg_iter!(existing_epoch_blocks)
.flat_map(|height| match self.get_solutions(*height).as_deref() {
Ok(Some(solutions)) => solutions.iter().map(|(_, s)| s.address()).collect::<Vec<_>>(),
_ => vec![],
})
.collect::<Vec<_>>();
let mut epoch_provers = IndexMap::new();
for address in solution_addresses {
epoch_provers.entry(address).and_modify(|e| *e += 1).or_insert(1);
}
epoch_provers
}
pub fn vm(&self) -> &VM<N, C> {
&self.vm
}
pub fn puzzle(&self) -> &Puzzle<N> {
self.vm.puzzle()
}
pub fn block_cache_size(&self) -> Option<u32> {
self.vm.block_store().cache_size()
}
pub fn epoch_provers(&self) -> Arc<RwLock<IndexMap<Address<N>, u32>>> {
self.epoch_provers_cache.clone()
}
pub fn latest_committee(&self) -> Result<Committee<N>> {
match self.current_committee.read().as_ref() {
Some(committee) => Ok(committee.clone()),
None => self.vm.finalize_store().committee_store().current_committee(),
}
}
pub fn latest_state_root(&self) -> N::StateRoot {
self.vm.block_store().current_state_root()
}
pub fn latest_epoch_number(&self) -> u32 {
self.current_block.read().height() / N::NUM_BLOCKS_PER_EPOCH
}
pub fn latest_epoch_hash(&self) -> Result<N::BlockHash> {
match self.current_epoch_hash.read().as_ref() {
Some(epoch_hash) => Ok(*epoch_hash),
None => self.get_epoch_hash(self.latest_height()),
}
}
pub fn latest_block(&self) -> Block<N> {
self.current_block.read().clone()
}
pub fn latest_round(&self) -> u64 {
self.current_block.read().round()
}
pub fn latest_height(&self) -> u32 {
self.current_block.read().height()
}
pub fn latest_hash(&self) -> N::BlockHash {
self.current_block.read().hash()
}
pub fn latest_header(&self) -> Header<N> {
*self.current_block.read().header()
}
pub fn latest_cumulative_weight(&self) -> u128 {
self.current_block.read().cumulative_weight()
}
pub fn latest_cumulative_proof_target(&self) -> u128 {
self.current_block.read().cumulative_proof_target()
}
pub fn latest_solutions_root(&self) -> Field<N> {
self.current_block.read().header().solutions_root()
}
pub fn latest_coinbase_target(&self) -> u64 {
self.current_block.read().coinbase_target()
}
pub fn latest_proof_target(&self) -> u64 {
self.current_block.read().proof_target()
}
pub fn last_coinbase_target(&self) -> u64 {
self.current_block.read().last_coinbase_target()
}
pub fn last_coinbase_timestamp(&self) -> i64 {
self.current_block.read().last_coinbase_timestamp()
}
pub fn latest_timestamp(&self) -> i64 {
self.current_block.read().timestamp()
}
pub fn latest_transactions(&self) -> Transactions<N> {
self.current_block.read().transactions().clone()
}
}
impl<N: Network, C: ConsensusStorage<N>> Ledger<N, C> {
pub fn find_unspent_credits_records(&self, view_key: &ViewKey<N>) -> Result<RecordMap<N>> {
let microcredits = Identifier::from_str("microcredits")?;
Ok(self
.find_records(view_key, RecordsFilter::Unspent)?
.filter(|(_, record)| {
match record.data().get(µcredits) {
Some(Entry::Private(Plaintext::Literal(Literal::U64(amount), _))) => !amount.is_zero(),
_ => false,
}
})
.collect::<IndexMap<_, _>>())
}
pub fn create_deploy<R: Rng + CryptoRng>(
&self,
private_key: &PrivateKey<N>,
program: &Program<N>,
priority_fee_in_microcredits: u64,
query: Option<&dyn QueryTrait<N>>,
rng: &mut R,
) -> Result<Transaction<N>, CreateDeployError> {
let records = self.find_unspent_credits_records(&ViewKey::try_from(private_key)?)?;
if records.len().is_zero() {
return Err(anyhow!("The Aleo account has no records to spend.").into());
}
let mut records = records.values();
let fee_record = Some(records.next().unwrap().clone());
Ok(self.vm.deploy(private_key, program, fee_record, priority_fee_in_microcredits, query, rng)?)
}
pub fn create_transfer<R: Rng + CryptoRng>(
&self,
private_key: &PrivateKey<N>,
to: Address<N>,
amount_in_microcredits: u64,
priority_fee_in_microcredits: u64,
query: Option<&dyn QueryTrait<N>>,
rng: &mut R,
) -> Result<Transaction<N>, CreateTransferError> {
let records = self.find_unspent_credits_records(&ViewKey::try_from(private_key)?)?;
if records.len() < 2 {
return Err(anyhow!("The Aleo account does not have enough records to spend.").into());
}
let mut records = records.values();
let inputs = [
Value::Record(records.next().unwrap().clone()),
Value::from_str(&format!("{to}"))?,
Value::from_str(&format!("{amount_in_microcredits}u64"))?,
];
let fee_record = Some(records.next().unwrap().clone());
Ok(self.vm.execute(
private_key,
("credits.aleo", "transfer_private"),
inputs.iter(),
fee_record,
priority_fee_in_microcredits,
query,
rng,
)?)
}
}
#[cfg(feature = "rocks")]
impl<N: Network, C: ConsensusStorage<N>> Drop for InnerLedger<N, C> {
fn drop(&mut self) {
if let Err(e) = self.vm.block_store().cache_block_tree() {
error!("Couldn't cache the block tree: {e}");
}
}
}
pub mod prelude {
pub use crate::{Ledger, authority, block, block::*, committee, helpers::*, narwhal, puzzle, query, store};
}