use crate::config::{
get_default_ledger_path, BootstrapMode, BuildConfig, Config, ConfigOverride, HookType,
Manifest, PackageManager, ProgramDeployment, ProgramWorkspace, ScriptsConfig,
SurfnetInfoResponse, SurfpoolConfig, TestValidator, ValidatorType, WithPath, SHUTDOWN_WAIT,
STARTUP_WAIT, SURFPOOL_HOST,
};
use anchor_client::Cluster;
use anchor_lang::prelude::UpgradeableLoaderState;
use anchor_lang::solana_program::bpf_loader_upgradeable;
use anchor_lang::AnchorDeserialize;
use anchor_lang_idl::convert::convert_idl;
use anchor_lang_idl::types::{Idl, IdlArrayLen, IdlDefinedFields, IdlType, IdlTypeDefTy};
use anyhow::{anyhow, bail, Context, Result};
use checks::{check_anchor_version, check_deps, check_idl_build_feature, check_overflow};
use clap::{CommandFactory, Parser};
use dirs::home_dir;
use heck::{ToKebabCase, ToLowerCamelCase, ToPascalCase, ToSnakeCase};
use regex::{Regex, RegexBuilder};
use rust_template::{ProgramTemplate, TestTemplate};
use semver::{Version, VersionReq};
use serde_json::{json, Map, Value as JsonValue};
use solana_cli_config::Config as SolanaCliConfig;
use solana_commitment_config::CommitmentConfig;
use solana_compute_budget_interface::ComputeBudgetInstruction;
use solana_instruction::Instruction;
use solana_keypair::Keypair;
use solana_pubkey::Pubkey;
use solana_pubsub_client::pubsub_client::{PubsubClient, PubsubClientSubscription};
use solana_rpc_client::rpc_client::RpcClient;
use solana_rpc_client_api::config::{RpcTransactionLogsConfig, RpcTransactionLogsFilter};
use solana_rpc_client_api::request::RpcRequest;
use solana_rpc_client_api::response::{Response as RpcResponse, RpcLogsResponse};
use solana_signer::{EncodableKey, Signer};
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsString;
use std::fs::{self, File};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::{Child, Stdio};
use std::string::ToString;
use std::sync::LazyLock;
mod account;
mod checks;
pub mod config;
mod keygen;
mod metadata;
mod program;
pub mod rust_template;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
pub const DOCKER_BUILDER_VERSION: &str = VERSION;
pub const DEFAULT_RPC_PORT: u16 = 8899;
pub const WEBSOCKET_PORT_OFFSET: u16 = 1;
pub static AVM_HOME: LazyLock<PathBuf> = LazyLock::new(|| {
if let Ok(avm_home) = std::env::var("AVM_HOME") {
PathBuf::from(avm_home)
} else {
let mut user_home = dirs::home_dir().expect("Could not find home directory");
user_home.push(".avm");
user_home
}
});
#[derive(Debug, Parser)]
#[clap(version = VERSION)]
pub struct Opts {
#[clap(flatten)]
pub cfg_override: ConfigOverride,
#[clap(subcommand)]
pub command: Command,
}
#[derive(Debug, Parser)]
pub enum Command {
Init {
name: String,
#[clap(short, long)]
javascript: bool,
#[clap(long)]
no_install: bool,
#[clap(value_enum, long, default_value = "yarn")]
package_manager: PackageManager,
#[clap(long)]
no_git: bool,
#[clap(value_enum, short, long, default_value = "multiple")]
template: ProgramTemplate,
#[clap(value_enum, long, default_value = "litesvm")]
test_template: TestTemplate,
#[clap(long, action)]
force: bool,
#[clap(long)]
install_agent_skills: bool,
},
#[clap(name = "build", alias = "b")]
Build {
#[clap(long)]
skip_lint: bool,
#[clap(long)]
ignore_keys: bool,
#[clap(long)]
no_idl: bool,
#[clap(short, long)]
idl: Option<String>,
#[clap(short = 't', long)]
idl_ts: Option<String>,
#[clap(short, long)]
verifiable: bool,
#[clap(short, long)]
program_name: Option<String>,
#[clap(short, long)]
solana_version: Option<String>,
#[clap(short, long)]
docker_image: Option<String>,
#[clap(value_enum, short, long, default_value = "none")]
bootstrap: BootstrapMode,
#[clap(short, long, required = false)]
env: Vec<String>,
#[clap(required = false, last = true)]
cargo_args: Vec<String>,
#[clap(long)]
no_docs: bool,
},
Expand {
#[clap(short, long)]
program_name: Option<String>,
#[clap(required = false, last = true)]
cargo_args: Vec<String>,
},
Verify {
program_id: Pubkey,
#[clap(long, conflicts_with = "current_dir")]
repo_url: Option<String>,
#[clap(long, requires = "repo_url")]
commit_hash: Option<String>,
#[clap(long)]
current_dir: bool,
#[clap(long)]
program_name: Option<String>,
#[clap(raw = true)]
args: Vec<String>,
},
#[clap(name = "test", alias = "t")]
Test {
#[clap(short, long)]
program_name: Option<String>,
#[clap(long)]
skip_deploy: bool,
#[clap(long)]
skip_lint: bool,
#[clap(long)]
skip_local_validator: bool,
#[clap(long)]
skip_build: bool,
#[clap(long)]
no_idl: bool,
#[clap(long)]
detach: bool,
#[clap(long)]
run: Vec<String>,
#[clap(value_enum, long, default_value = "surfpool")]
validator: ValidatorType,
args: Vec<String>,
#[clap(short, long, required = false)]
env: Vec<String>,
#[clap(required = false, last = true)]
cargo_args: Vec<String>,
},
New {
name: String,
#[clap(value_enum, short, long, default_value = "multiple")]
template: ProgramTemplate,
#[clap(long, action)]
force: bool,
},
Idl {
#[clap(subcommand)]
subcmd: IdlCommand,
},
Clean,
#[clap(hide = true)]
#[deprecated(since = "0.32.0", note = "use `anchor program deploy` instead")]
Deploy {
#[clap(short, long)]
program_name: Option<String>,
#[clap(long, requires = "program_name")]
program_keypair: Option<String>,
#[clap(short, long)]
verifiable: bool,
#[clap(long)]
no_idl: bool,
#[clap(required = false, last = true)]
solana_args: Vec<String>,
},
Migrate,
#[clap(hide = true)]
#[deprecated(since = "0.32.0", note = "use `anchor program upgrade` instead")]
Upgrade {
#[clap(short, long)]
program_id: Pubkey,
program_filepath: String,
#[clap(long, default_value = "0")]
max_retries: u32,
#[clap(required = false, last = true)]
solana_args: Vec<String>,
},
Airdrop {
amount: f64,
pubkey: Option<Pubkey>,
},
Cluster {
#[clap(subcommand)]
subcmd: ClusterCommand,
},
Config {
#[clap(subcommand)]
subcmd: ConfigCommand,
},
Shell,
Run {
script: String,
#[clap(required = false, last = true)]
script_args: Vec<String>,
},
Keys {
#[clap(subcommand)]
subcmd: KeysCommand,
},
Localnet {
#[clap(long)]
skip_build: bool,
#[clap(long)]
skip_deploy: bool,
#[clap(long)]
skip_lint: bool,
#[clap(long)]
ignore_keys: bool,
#[clap(value_enum, long, default_value = "surfpool")]
validator: ValidatorType,
#[clap(short, long, required = false)]
env: Vec<String>,
#[clap(required = false, last = true)]
cargo_args: Vec<String>,
},
Account {
account_type: String,
address: Pubkey,
#[clap(long)]
idl: Option<String>,
},
Completions {
#[clap(value_enum)]
shell: clap_complete::Shell,
},
Address,
Balance {
pubkey: Option<Pubkey>,
#[clap(long)]
lamports: bool,
},
Epoch,
#[clap(name = "epoch-info")]
EpochInfo,
Logs {
#[clap(long)]
include_votes: bool,
#[clap(long)]
address: Option<Vec<Pubkey>>,
},
ShowAccount {
#[clap(flatten)]
cmd: account::ShowAccountCommand,
},
Keygen {
#[clap(subcommand)]
subcmd: KeygenCommand,
},
Program {
#[clap(subcommand)]
subcmd: ProgramCommand,
},
}
#[derive(Debug, Parser)]
pub enum KeygenCommand {
New {
#[clap(short = 'o', long)]
outfile: Option<String>,
#[clap(short, long)]
force: bool,
#[clap(long)]
no_passphrase: bool,
#[clap(long)]
silent: bool,
#[clap(short = 'w', long, default_value = "12")]
word_count: usize,
},
Pubkey {
keypair: Option<String>,
},
Recover {
#[clap(short = 'o', long)]
outfile: Option<String>,
#[clap(short, long)]
force: bool,
#[clap(long)]
skip_seed_phrase_validation: bool,
#[clap(long)]
no_passphrase: bool,
},
Verify {
pubkey: Pubkey,
keypair: Option<String>,
},
}
#[derive(Debug, Parser)]
pub enum KeysCommand {
List,
Sync {
#[clap(short, long)]
program_name: Option<String>,
},
}
#[derive(Debug, Parser)]
pub enum ProgramCommand {
Deploy {
program_filepath: Option<String>,
#[clap(short, long)]
program_name: Option<String>,
#[clap(long)]
program_keypair: Option<String>,
#[clap(long)]
upgrade_authority: Option<String>,
#[clap(long)]
program_id: Option<Pubkey>,
#[clap(long)]
buffer: Option<Pubkey>,
#[clap(long)]
max_len: Option<usize>,
#[clap(long)]
no_idl: bool,
#[clap(long = "final")]
make_final: bool,
#[clap(required = false, last = true)]
solana_args: Vec<String>,
},
WriteBuffer {
program_filepath: Option<String>,
#[clap(short, long)]
program_name: Option<String>,
#[clap(long)]
buffer: Option<String>,
#[clap(long)]
buffer_authority: Option<String>,
#[clap(long)]
max_len: Option<usize>,
},
SetBufferAuthority {
buffer: Pubkey,
new_buffer_authority: Pubkey,
},
SetUpgradeAuthority {
program_id: Pubkey,
#[clap(long)]
new_upgrade_authority: Option<Pubkey>,
#[clap(long)]
new_upgrade_authority_signer: Option<String>,
#[clap(long)]
skip_new_upgrade_authority_signer_check: bool,
#[clap(long = "final")]
make_final: bool,
#[clap(long)]
upgrade_authority: Option<String>,
},
Show {
account: Pubkey,
#[clap(long)]
get_programs: bool,
#[clap(long)]
get_buffers: bool,
#[clap(long)]
all: bool,
},
Upgrade {
program_id: Pubkey,
#[clap(long)]
program_filepath: Option<String>,
#[clap(short, long)]
program_name: Option<String>,
#[clap(long)]
buffer: Option<Pubkey>,
#[clap(long)]
upgrade_authority: Option<String>,
#[clap(long, default_value = "0")]
max_retries: u32,
#[clap(required = false, last = true)]
solana_args: Vec<String>,
},
Dump {
account: Pubkey,
output_file: String,
},
Close {
account: Option<Pubkey>,
#[clap(short, long)]
program_name: Option<String>,
#[clap(long)]
authority: Option<String>,
#[clap(long)]
recipient: Option<Pubkey>,
#[clap(long)]
bypass_warning: bool,
},
Extend {
program_id: Option<Pubkey>,
#[clap(short, long)]
program_name: Option<String>,
additional_bytes: usize,
},
}
#[derive(Debug, Parser)]
pub enum IdlCommand {
Init {
program_id: Option<Pubkey>,
#[clap(short, long)]
filepath: String,
#[clap(long)]
priority_fee: Option<u64>,
#[clap(long)]
non_canonical: bool,
#[clap(long)]
#[cfg(feature = "idl-localnet-testing")]
allow_localnet: bool,
},
Upgrade {
program_id: Option<Pubkey>,
#[clap(short, long)]
filepath: String,
#[clap(long)]
priority_fee: Option<u64>,
#[clap(long)]
#[cfg(feature = "idl-localnet-testing")]
allow_localnet: bool,
},
#[clap(alias = "b")]
Build {
#[clap(short, long)]
program_name: Option<String>,
#[clap(short, long)]
out: Option<String>,
#[clap(short = 't', long)]
out_ts: Option<String>,
#[clap(long)]
no_docs: bool,
#[clap(long)]
skip_lint: bool,
#[clap(required = false, last = true)]
cargo_args: Vec<String>,
},
Fetch {
program_id: Pubkey,
#[clap(short, long)]
out: Option<String>,
#[clap(long)]
non_canonical: bool,
},
Convert {
path: String,
#[clap(short, long)]
out: Option<String>,
#[clap(short, long)]
program_id: Option<Pubkey>,
},
Type {
path: String,
#[clap(short, long)]
out: Option<String>,
},
Close {
program_id: Pubkey,
#[clap(long, default_value = "idl")]
seed: String,
#[clap(long)]
priority_fee: Option<u64>,
},
CreateBuffer {
#[clap(short, long)]
filepath: String,
#[clap(long)]
priority_fee: Option<u64>,
},
SetBufferAuthority {
buffer: Pubkey,
#[clap(short, long)]
new_authority: Pubkey,
#[clap(long)]
priority_fee: Option<u64>,
},
WriteBuffer {
program_id: Pubkey,
#[clap(short, long)]
buffer: Pubkey,
#[clap(long, default_value = "idl")]
seed: String,
#[clap(long)]
close_buffer: bool,
#[clap(long)]
priority_fee: Option<u64>,
},
}
#[derive(Debug, Parser)]
pub enum ClusterCommand {
List,
}
#[derive(Debug, Parser)]
pub enum ConfigCommand {
Get,
Set {
#[clap(short = 'u', long = "url")]
url: Option<String>,
#[clap(short = 'k', long = "keypair")]
keypair: Option<String>,
},
}
fn get_keypair(path: &str) -> Result<Keypair> {
solana_keypair::read_keypair_file(path)
.map_err(|_| anyhow!("Unable to read keypair file ({path})"))
}
fn format_sol(lamports: u64) -> String {
let sol = lamports as f64 / 1_000_000_000.0;
let formatted = format!("{:.8}", sol);
let trimmed = formatted.trim_end_matches('0').trim_end_matches('.');
format!("{} SOL", trimmed)
}
fn get_cluster_and_wallet(cfg_override: &ConfigOverride) -> Result<(String, String)> {
if let Ok(Some(cfg)) = Config::discover(cfg_override) {
return Ok((
cfg.provider.cluster.url().to_string(),
cfg.provider.wallet.to_string(),
));
}
let (cluster_url, wallet_path) =
if let Some(config_file) = solana_cli_config::CONFIG_FILE.as_ref() {
match SolanaCliConfig::load(config_file) {
Ok(cli_config) => (
cli_config.json_rpc_url.clone(),
cli_config.keypair_path.clone(),
),
Err(_) => {
(
"https://api.mainnet-beta.solana.com".to_string(),
dirs::home_dir()
.map(|home| {
home.join(".config/solana/id.json")
.to_string_lossy()
.to_string()
})
.unwrap_or_else(|| "~/.config/solana/id.json".to_string()),
)
}
}
} else {
(
"https://api.mainnet-beta.solana.com".to_string(),
dirs::home_dir()
.map(|home| {
home.join(".config/solana/id.json")
.to_string_lossy()
.to_string()
})
.unwrap_or_else(|| "~/.config/solana/id.json".to_string()),
)
};
let final_cluster = if let Some(cluster) = &cfg_override.cluster {
cluster.url().to_string()
} else {
cluster_url
};
Ok((final_cluster, wallet_path))
}
pub fn get_recommended_micro_lamport_fee(client: &RpcClient) -> Result<u64> {
let mut fees = client.get_recent_prioritization_fees(&[])?;
if fees.is_empty() {
return Ok(0);
}
fees.sort_unstable_by_key(|fee| fee.prioritization_fee);
let median_index = fees.len() / 2;
let median_priority_fee = if fees.len() % 2 == 0 {
(fees[median_index - 1].prioritization_fee + fees[median_index].prioritization_fee) / 2
} else {
fees[median_index].prioritization_fee
};
Ok(median_priority_fee)
}
pub fn prepend_compute_unit_ix(
instructions: Vec<Instruction>,
client: &RpcClient,
priority_fee: Option<u64>,
) -> Result<Vec<Instruction>> {
let priority_fee = match priority_fee {
Some(fee) => fee,
None => get_recommended_micro_lamport_fee(client)?,
};
if priority_fee > 0 {
let mut instructions_appended = instructions.clone();
instructions_appended.insert(
0,
ComputeBudgetInstruction::set_compute_unit_price(priority_fee),
);
Ok(instructions_appended)
} else {
Ok(instructions)
}
}
pub fn entry(opts: Opts) -> Result<()> {
let restore_cbs = override_toolchain(&opts.cfg_override)?;
let result = process_command(opts);
restore_toolchain(restore_cbs)?;
result
}
type RestoreToolchainCallbacks = Vec<Box<dyn FnOnce() -> Result<()>>>;
fn override_toolchain(cfg_override: &ConfigOverride) -> Result<RestoreToolchainCallbacks> {
let mut restore_cbs: RestoreToolchainCallbacks = vec![];
let cfg = Config::discover(cfg_override)?;
if let Some(cfg) = cfg {
fn parse_version(text: &str) -> Option<String> {
Some(
Regex::new(r"(\d+\.\d+\.\S+)")
.unwrap()
.captures_iter(text)
.next()?
.get(0)?
.as_str()
.to_string(),
)
}
fn get_current_version(cmd_name: &str) -> Result<String> {
let output = std::process::Command::new(cmd_name)
.arg("--version")
.output()?;
if !output.status.success() {
return Err(anyhow!("Failed to run `{cmd_name} --version`"));
}
let output_version = std::str::from_utf8(&output.stdout)?;
parse_version(output_version)
.ok_or_else(|| anyhow!("Failed to parse the version of `{cmd_name}`"))
}
if let Some(solana_version) = &cfg.toolchain.solana_version {
let current_version = get_current_version("solana")?;
if solana_version != ¤t_version {
fn override_solana_version(version: String) -> Result<bool> {
let (cmd_name, domain) =
if Version::parse(&version)? < Version::parse("1.18.19")? {
("solana-install", "solana.com")
} else {
("agave-install", "anza.xyz")
};
if get_current_version(cmd_name).is_err() {
eprintln!(
"Command not installed: `{cmd_name}`. \
See https://github.com/anza-xyz/agave/wiki/Agave-Transition, \
installing..."
);
let install_script = std::process::Command::new("curl")
.args([
"-sSfL",
&format!("https://release.{domain}/v{version}/install"),
])
.output()?;
let is_successful = std::process::Command::new("sh")
.args(["-c", std::str::from_utf8(&install_script.stdout)?])
.spawn()?
.wait_with_output()?
.status
.success();
if !is_successful {
return Err(anyhow!("Failed to install `{cmd_name}`"));
}
}
let output = std::process::Command::new(cmd_name).arg("list").output()?;
if !output.status.success() {
return Err(anyhow!("Failed to list installed `solana` versions"));
}
let is_installed = std::str::from_utf8(&output.stdout)?
.lines()
.filter_map(parse_version)
.any(|line_version| line_version == version);
let (stderr, stdout) = if is_installed {
(Stdio::null(), Stdio::null())
} else {
(Stdio::inherit(), Stdio::inherit())
};
std::process::Command::new(cmd_name)
.arg("init")
.arg(&version)
.stderr(stderr)
.stdout(stdout)
.spawn()?
.wait()
.map(|status| status.success())
.map_err(|err| anyhow!("Failed to run `{cmd_name}` command: {err}"))
}
match override_solana_version(solana_version.to_owned())? {
true => restore_cbs.push(Box::new(|| {
match override_solana_version(current_version)? {
true => Ok(()),
false => Err(anyhow!("Failed to restore `solana` version")),
}
})),
false => eprintln!(
"Failed to override `solana` version to {solana_version}, \
using {current_version} instead"
),
}
}
}
if let Some(anchor_version) = &cfg.toolchain.anchor_version {
const ANCHOR_BINARY_PREFIX: &str = "anchor-";
let current_version = std::env::args()
.next()
.expect("First arg should exist")
.parse::<PathBuf>()?
.file_name()
.and_then(|name| name.to_str())
.expect("File name should be valid Unicode")
.split_once(ANCHOR_BINARY_PREFIX)
.map(|(_, version)| version)
.unwrap_or(VERSION)
.to_owned();
if anchor_version != ¤t_version {
let binary_path = home_dir()
.unwrap()
.join(".avm")
.join("bin")
.join(format!("{ANCHOR_BINARY_PREFIX}{anchor_version}"));
if !binary_path.exists() {
eprintln!(
"`anchor` {anchor_version} is not installed with `avm`. Installing...\n"
);
if let Err(e) = install_with_avm(anchor_version, false) {
eprintln!(
"Failed to install `anchor`: {e}, using {current_version} instead"
);
return Ok(restore_cbs);
}
}
let exit_code = std::process::Command::new(binary_path)
.args(std::env::args_os().skip(1))
.spawn()?
.wait()?
.code()
.unwrap_or(1);
restore_toolchain(restore_cbs)?;
std::process::exit(exit_code);
}
}
}
Ok(restore_cbs)
}
fn install_with_avm(version: &str, verify: bool) -> Result<()> {
let mut cmd = std::process::Command::new("avm");
cmd.arg("install");
cmd.arg(version);
cmd.arg("--force");
if verify {
cmd.arg("--verify");
}
let status = cmd.status().context("running AVM")?;
if !status.success() {
bail!("failed to install `anchor` {version} with avm");
}
Ok(())
}
fn restore_toolchain(restore_cbs: RestoreToolchainCallbacks) -> Result<()> {
for restore_toolchain in restore_cbs {
if let Err(e) = restore_toolchain() {
eprintln!("Toolchain error: {e}");
}
}
Ok(())
}
fn get_npm_init_license() -> Result<String> {
let npm_init_license_output = std::process::Command::new("npm")
.arg("config")
.arg("get")
.arg("init-license")
.output()?;
if !npm_init_license_output.status.success() {
return Err(anyhow!("Failed to get npm init license"));
}
let license = String::from_utf8(npm_init_license_output.stdout)?;
Ok(license.trim().to_string())
}
fn process_command(opts: Opts) -> Result<()> {
match opts.command {
Command::Init {
name,
javascript,
no_install,
package_manager,
no_git,
template,
test_template,
force,
install_agent_skills,
} => init(
&opts.cfg_override,
name,
javascript,
no_install,
package_manager,
no_git,
template,
test_template,
force,
install_agent_skills,
),
Command::New {
name,
template,
force,
} => new(&opts.cfg_override, name, template, force),
Command::Build {
no_idl,
idl,
idl_ts,
verifiable,
program_name,
solana_version,
docker_image,
bootstrap,
cargo_args,
env,
skip_lint,
ignore_keys,
no_docs,
} => build(
&opts.cfg_override,
no_idl,
idl,
idl_ts,
verifiable,
skip_lint,
ignore_keys,
program_name,
solana_version,
docker_image,
bootstrap,
None,
None,
env,
cargo_args,
no_docs,
),
Command::Verify {
program_id,
repo_url,
commit_hash,
current_dir,
program_name,
args,
} => verify(
program_id,
repo_url,
commit_hash,
current_dir,
program_name,
args,
),
Command::Clean => clean(&opts.cfg_override),
#[allow(deprecated)]
Command::Deploy {
program_name,
program_keypair,
verifiable,
no_idl,
solana_args,
} => {
eprintln!(
"Warning: 'anchor deploy' is deprecated. Use 'anchor program deploy' instead."
);
deploy(
&opts.cfg_override,
program_name,
program_keypair,
verifiable,
no_idl,
solana_args,
)
}
Command::Expand {
program_name,
cargo_args,
} => expand(&opts.cfg_override, program_name, &cargo_args),
#[allow(deprecated)]
Command::Upgrade {
program_id,
program_filepath,
max_retries,
solana_args,
} => {
eprintln!(
"Warning: 'anchor upgrade' is deprecated. Use 'anchor program upgrade' instead."
);
upgrade(
&opts.cfg_override,
program_id,
program_filepath,
max_retries,
solana_args,
)
}
Command::Idl { subcmd } => idl(&opts.cfg_override, subcmd),
Command::Migrate => migrate(&opts.cfg_override),
Command::Test {
program_name,
skip_deploy,
skip_local_validator,
skip_build,
no_idl,
detach,
run,
validator,
args,
env,
cargo_args,
skip_lint,
} => test(
&opts.cfg_override,
program_name,
skip_deploy,
skip_local_validator,
skip_build,
skip_lint,
no_idl,
detach,
run,
validator,
args,
env,
cargo_args,
),
Command::Airdrop { amount, pubkey } => airdrop(&opts.cfg_override, amount, pubkey),
Command::Cluster { subcmd } => cluster(subcmd),
Command::Config { subcmd } => config_cmd(&opts.cfg_override, subcmd),
Command::Shell => shell(&opts.cfg_override),
Command::Run {
script,
script_args,
} => run(&opts.cfg_override, script, script_args),
Command::Keys { subcmd } => keys(&opts.cfg_override, subcmd),
Command::Localnet {
skip_build,
skip_deploy,
skip_lint,
ignore_keys,
validator,
env,
cargo_args,
} => localnet(
&opts.cfg_override,
skip_build,
skip_deploy,
skip_lint,
ignore_keys,
validator,
env,
cargo_args,
),
Command::Account {
account_type,
address,
idl,
} => account(&opts.cfg_override, account_type, address, idl),
Command::Completions { shell } => {
clap_complete::generate(
shell,
&mut Opts::command(),
"anchor",
&mut std::io::stdout(),
);
Ok(())
}
Command::Address => address(&opts.cfg_override),
Command::Balance { pubkey, lamports } => balance(&opts.cfg_override, pubkey, lamports),
Command::Epoch => epoch(&opts.cfg_override),
Command::EpochInfo => epoch_info(&opts.cfg_override),
Command::Logs {
include_votes,
address,
} => logs_subscribe(&opts.cfg_override, include_votes, address),
Command::ShowAccount { cmd } => account::show_account(&opts.cfg_override, cmd),
Command::Keygen { subcmd } => keygen::keygen(&opts.cfg_override, subcmd),
Command::Program { subcmd } => program::program(&opts.cfg_override, subcmd),
}
}
#[allow(clippy::too_many_arguments)]
fn init(
cfg_override: &ConfigOverride,
name: String,
javascript: bool,
no_install: bool,
package_manager: PackageManager,
no_git: bool,
template: ProgramTemplate,
test_template: TestTemplate,
force: bool,
install_agent_skills: bool,
) -> Result<()> {
if !force && Config::discover(cfg_override)?.is_some() {
return Err(anyhow!("Workspace already initialized"));
}
let rust_name = name.to_snake_case();
let project_name = if name == rust_name {
rust_name.clone()
} else {
name.to_kebab_case()
};
let extra_keywords = ["async", "await", "try"];
if syn::parse_str::<syn::Ident>(&rust_name).is_err()
|| extra_keywords.contains(&rust_name.as_str())
{
return Err(anyhow!(
"Anchor workspace name must be a valid Rust identifier. It may not be a Rust reserved word, start with a digit, or include certain disallowed characters. See https://doc.rust-lang.org/reference/identifiers.html for more detail.",
));
}
if force {
fs::create_dir_all(&project_name)?;
} else {
fs::create_dir(&project_name)?;
}
std::env::set_current_dir(&project_name)?;
fs::create_dir_all("app")?;
let mut cfg = Config::default();
let test_script = test_template.get_test_script(javascript, &package_manager);
cfg.scripts.insert("test".to_owned(), test_script);
let package_manager_cmd = package_manager.to_string();
cfg.toolchain.package_manager = Some(package_manager);
let mut localnet = BTreeMap::new();
let program_id = rust_template::get_or_create_program_id(&rust_name);
localnet.insert(
rust_name,
ProgramDeployment {
address: program_id,
path: None,
idl: None,
},
);
cfg.programs.insert(Cluster::Localnet, localnet);
let toml = cfg.to_string();
fs::write("Anchor.toml", toml)?;
fs::write(".gitignore", rust_template::git_ignore())?;
fs::write(".prettierignore", rust_template::prettier_ignore())?;
if force {
fs::remove_dir_all(
std::env::current_dir()?
.join("programs")
.join(&project_name),
)?;
}
rust_template::create_program(&project_name, template, Some(&test_template))?;
let migrations_path = Path::new("migrations");
fs::create_dir_all(migrations_path)?;
let license = get_npm_init_license()?;
let jest = TestTemplate::Jest == test_template;
if javascript {
let mut package_json = File::create("package.json")?;
package_json.write_all(rust_template::package_json(jest, license).as_bytes())?;
let mut deploy = File::create(migrations_path.join("deploy.js"))?;
deploy.write_all(rust_template::deploy_script().as_bytes())?;
} else {
let mut ts_config = File::create("tsconfig.json")?;
ts_config.write_all(rust_template::ts_config(jest).as_bytes())?;
let mut ts_package_json = File::create("package.json")?;
ts_package_json.write_all(rust_template::ts_package_json(jest, license).as_bytes())?;
let mut deploy = File::create(migrations_path.join("deploy.ts"))?;
deploy.write_all(rust_template::ts_deploy_script().as_bytes())?;
}
test_template.create_test_files(&project_name, javascript, &program_id.to_string())?;
if !no_install {
let package_manager_result = install_node_modules(&package_manager_cmd)?;
if !package_manager_result.status.success() && package_manager_cmd != "npm" {
println!("Failed {package_manager_cmd} install will attempt to npm install");
install_node_modules("npm")?;
} else {
eprintln!("Failed to install node modules");
}
}
if !no_git {
let git_result = std::process::Command::new("git")
.arg("init")
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("git init failed: {}", e))?;
if !git_result.status.success() {
eprintln!("Failed to automatically initialize a new git repository");
}
}
if install_agent_skills {
install_solana_skill();
}
println!("{project_name} initialized");
Ok(())
}
fn install_solana_skill() {
const SKILL_REPO: &str = "https://github.com/solana-foundation/solana-dev-skill";
const SKILL_NAME: &str = "solana-dev";
let global_path = home_dir()
.unwrap_or_default()
.join(".agents")
.join("skills")
.join(SKILL_NAME);
if global_path.exists() {
return;
}
let project_path = Path::new(".agents").join("skills").join(SKILL_NAME);
if project_path.exists() {
return;
}
println!("Installing Solana dev skill for Agents from {SKILL_REPO}");
let status = std::process::Command::new("npx")
.args([
"--yes",
"skills@1.4.4",
"add",
SKILL_REPO,
"--skill",
"*",
"-y",
])
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.status();
match status {
Ok(s) if s.success() => {
println!("Solana dev skill installed successfully");
}
_ => {
eprintln!(
"Warning: Failed to install Solana dev skill. \
Install manually with:\n npx skills add {SKILL_REPO}"
);
}
}
}
fn install_node_modules(cmd: &str) -> Result<std::process::Output> {
if cfg!(target_os = "windows") {
std::process::Command::new("cmd")
.arg(format!("/C {cmd} install"))
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("{} install failed: {}", cmd, e))
} else {
std::process::Command::new(cmd)
.arg("install")
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("{} install failed: {}", cmd, e))
}
}
fn new(
cfg_override: &ConfigOverride,
name: String,
template: ProgramTemplate,
force: bool,
) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
match cfg.path().parent() {
None => {
println!("Unable to make new program");
}
Some(parent) => {
std::env::set_current_dir(parent)?;
let cluster = cfg.provider.cluster.clone();
let programs = cfg.programs.entry(cluster).or_default();
if programs.contains_key(&name) {
if !force {
return Err(anyhow!("Program already exists"));
}
fs::remove_dir_all(std::env::current_dir()?.join("programs").join(&name))?;
}
rust_template::create_program(&name, template, None)?;
programs.insert(
name.clone(),
ProgramDeployment {
address: rust_template::get_or_create_program_id(&name),
path: None,
idl: None,
},
);
let toml = cfg.to_string();
fs::write("Anchor.toml", toml)?;
println!("Created new program.");
}
};
Ok(())
})?
}
pub type Files = Vec<(PathBuf, String)>;
pub fn create_files(files: &Files) -> Result<()> {
for (path, content) in files {
let path = path
.display()
.to_string()
.replace('/', std::path::MAIN_SEPARATOR_STR);
let path = Path::new(&path);
if path.exists() {
continue;
}
match path.extension() {
Some(_) => {
fs::create_dir_all(path.parent().unwrap())?;
fs::write(path, content)?;
}
None => fs::create_dir_all(path)?,
}
}
Ok(())
}
pub fn override_or_create_files(files: &Files) -> Result<()> {
for (path, content) in files {
let path = Path::new(path);
if path.exists() {
let mut f = fs::OpenOptions::new()
.write(true)
.truncate(true)
.open(path)?;
f.write_all(content.as_bytes())?;
f.flush()?;
} else {
fs::create_dir_all(path.parent().unwrap())?;
fs::write(path, content)?;
}
}
Ok(())
}
pub fn expand(
cfg_override: &ConfigOverride,
program_name: Option<String>,
cargo_args: &[String],
) -> Result<()> {
if let Some(program_name) = program_name.as_ref() {
cd_member(cfg_override, program_name)?;
}
let workspace_cfg = Config::discover(cfg_override)?
.ok_or_else(|| anyhow!("The 'anchor expand' command requires an Anchor workspace."))?;
let cfg_parent = workspace_cfg.path().parent().expect("Invalid Anchor.toml");
let cargo = Manifest::discover()?;
let expansions_path = cfg_parent.join(".anchor").join("expanded-macros");
fs::create_dir_all(&expansions_path)?;
match cargo {
None => expand_all(&workspace_cfg, expansions_path, cargo_args),
Some(cargo) if cargo.path().parent() == workspace_cfg.path().parent() => {
expand_all(&workspace_cfg, expansions_path, cargo_args)
}
Some(cargo) => expand_program(
cargo.path().parent().unwrap().to_path_buf(),
expansions_path,
cargo_args,
),
}
}
fn expand_all(
workspace_cfg: &WithPath<Config>,
expansions_path: PathBuf,
cargo_args: &[String],
) -> Result<()> {
let cur_dir = std::env::current_dir()?;
for p in workspace_cfg.get_rust_program_list()? {
expand_program(p, expansions_path.clone(), cargo_args)?;
}
std::env::set_current_dir(cur_dir)?;
Ok(())
}
fn expand_program(
program_path: PathBuf,
expansions_path: PathBuf,
cargo_args: &[String],
) -> Result<()> {
let cargo = Manifest::from_path(program_path.join("Cargo.toml"))
.map_err(|_| anyhow!("Could not find Cargo.toml for program"))?;
let target_dir_arg = {
let mut target_dir_arg = OsString::from("--target-dir=");
target_dir_arg.push(expansions_path.join("expand-target"));
target_dir_arg
};
let package_name = &cargo
.package
.as_ref()
.ok_or_else(|| anyhow!("Cargo config is missing a package"))?
.name;
let program_expansions_path = expansions_path.join(package_name);
fs::create_dir_all(&program_expansions_path)?;
let exit = std::process::Command::new("cargo")
.arg("expand")
.arg(target_dir_arg)
.arg(format!("--package={package_name}"))
.args(cargo_args)
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("{}", e))?;
if !exit.status.success() {
eprintln!("'anchor expand' failed. Perhaps you have not installed 'cargo-expand'? https://github.com/dtolnay/cargo-expand#installation");
std::process::exit(exit.status.code().unwrap_or(1));
}
let version = cargo.version();
let time = chrono::Utc::now().to_string().replace(' ', "_");
let file_path = program_expansions_path.join(format!("{package_name}-{version}-{time}.rs"));
fs::write(&file_path, &exit.stdout).map_err(|e| anyhow::format_err!("{}", e))?;
println!(
"Expanded {} into file {}\n",
package_name,
file_path.to_string_lossy()
);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn build(
cfg_override: &ConfigOverride,
no_idl: bool,
idl: Option<String>,
idl_ts: Option<String>,
verifiable: bool,
skip_lint: bool,
ignore_keys: bool,
program_name: Option<String>,
solana_version: Option<String>,
docker_image: Option<String>,
bootstrap: BootstrapMode,
stdout: Option<File>, stderr: Option<File>, env_vars: Vec<String>,
cargo_args: Vec<String>,
no_docs: bool,
) -> Result<()> {
if let Some(program_name) = program_name.as_ref() {
cd_member(cfg_override, program_name)?;
}
let cfg = Config::discover(cfg_override)?
.ok_or_else(|| anyhow!("The 'anchor build' command requires an Anchor workspace."))?;
let cfg_parent = cfg.path().parent().expect("Invalid Anchor.toml");
let workspace_cargo_toml_path = cfg_parent.join("Cargo.toml");
if workspace_cargo_toml_path.exists() {
check_overflow(workspace_cargo_toml_path)?;
}
check_anchor_version(&cfg).ok();
check_deps(&cfg).ok();
if !ignore_keys {
check_program_id_mismatch(&cfg, program_name.clone())?;
}
let idl_out = match idl {
Some(idl) => Some(PathBuf::from(idl)),
None => Some(cfg_parent.join("target").join("idl")),
};
fs::create_dir_all(idl_out.as_ref().unwrap())?;
let idl_ts_out = match idl_ts {
Some(idl_ts) => Some(PathBuf::from(idl_ts)),
None => Some(cfg_parent.join("target").join("types")),
};
fs::create_dir_all(idl_ts_out.as_ref().unwrap())?;
if !cfg.workspace.types.is_empty() {
fs::create_dir_all(cfg_parent.join(&cfg.workspace.types))?;
};
cfg.run_hooks(HookType::PreBuild)?;
let cargo = Manifest::discover()?;
let build_config = BuildConfig {
verifiable,
solana_version: solana_version.or_else(|| cfg.toolchain.solana_version.clone()),
docker_image: docker_image.unwrap_or_else(|| cfg.docker()),
bootstrap,
};
match cargo {
None => build_all(
&cfg,
cfg.path(),
no_idl,
idl_out,
idl_ts_out,
&build_config,
stdout,
stderr,
env_vars,
cargo_args,
skip_lint,
no_docs,
)?,
Some(cargo) if cargo.path().parent() == cfg.path().parent() => build_all(
&cfg,
cfg.path(),
no_idl,
idl_out,
idl_ts_out,
&build_config,
stdout,
stderr,
env_vars,
cargo_args,
skip_lint,
no_docs,
)?,
Some(cargo) => build_rust_cwd(
&cfg,
cargo.path().to_path_buf(),
no_idl,
idl_out,
idl_ts_out,
&build_config,
stdout,
stderr,
env_vars,
cargo_args,
skip_lint,
no_docs,
)?,
}
cfg.run_hooks(HookType::PostBuild)?;
set_workspace_dir_or_exit();
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn build_all(
cfg: &WithPath<Config>,
cfg_path: &Path,
no_idl: bool,
idl_out: Option<PathBuf>,
idl_ts_out: Option<PathBuf>,
build_config: &BuildConfig,
stdout: Option<File>, stderr: Option<File>, env_vars: Vec<String>,
cargo_args: Vec<String>,
skip_lint: bool,
no_docs: bool,
) -> Result<()> {
let cur_dir = std::env::current_dir()?;
let r = match cfg_path.parent() {
None => Err(anyhow!("Invalid Anchor.toml at {}", cfg_path.display())),
Some(_parent) => {
for p in cfg.get_rust_program_list()? {
build_rust_cwd(
cfg,
p.join("Cargo.toml"),
no_idl,
idl_out.clone(),
idl_ts_out.clone(),
build_config,
stdout.as_ref().map(|f| f.try_clone()).transpose()?,
stderr.as_ref().map(|f| f.try_clone()).transpose()?,
env_vars.clone(),
cargo_args.clone(),
skip_lint,
no_docs,
)?;
}
Ok(())
}
};
std::env::set_current_dir(cur_dir)?;
r
}
#[allow(clippy::too_many_arguments)]
fn build_rust_cwd(
cfg: &WithPath<Config>,
cargo_toml: PathBuf,
no_idl: bool,
idl_out: Option<PathBuf>,
idl_ts_out: Option<PathBuf>,
build_config: &BuildConfig,
stdout: Option<File>,
stderr: Option<File>,
env_vars: Vec<String>,
cargo_args: Vec<String>,
skip_lint: bool,
no_docs: bool,
) -> Result<()> {
match cargo_toml.parent() {
None => return Err(anyhow!("Unable to find parent")),
Some(p) => std::env::set_current_dir(p)?,
};
match build_config.verifiable {
false => _build_rust_cwd(
cfg, no_idl, idl_out, idl_ts_out, skip_lint, no_docs, cargo_args,
),
true => build_cwd_verifiable(
cfg,
cargo_toml,
build_config,
stdout,
stderr,
skip_lint,
env_vars,
cargo_args,
no_docs,
),
}
}
#[allow(clippy::too_many_arguments)]
fn build_cwd_verifiable(
cfg: &WithPath<Config>,
cargo_toml: PathBuf,
build_config: &BuildConfig,
stdout: Option<File>,
stderr: Option<File>,
skip_lint: bool,
env_vars: Vec<String>,
cargo_args: Vec<String>,
no_docs: bool,
) -> Result<()> {
let workspace_dir = cfg.path().parent().unwrap().canonicalize()?;
let target_dir = workspace_dir.join("target");
fs::create_dir_all(target_dir.join("verifiable"))?;
fs::create_dir_all(target_dir.join("idl"))?;
fs::create_dir_all(target_dir.join("types"))?;
if !&cfg.workspace.types.is_empty() {
fs::create_dir_all(workspace_dir.join(&cfg.workspace.types))?;
}
let container_name = "anchor-program";
let result = docker_build(
cfg,
container_name,
cargo_toml,
build_config,
stdout,
stderr,
env_vars,
cargo_args.clone(),
);
match &result {
Err(e) => {
eprintln!("Error during Docker build: {e:?}");
}
Ok(_) => {
println!("Extracting the IDL");
let idl = generate_idl(cfg, skip_lint, no_docs, &cargo_args)?;
println!("Writing the IDL file");
let out_file = workspace_dir
.join("target")
.join("idl")
.join(&idl.metadata.name)
.with_extension("json");
write_idl(&idl, OutFile::File(out_file))?;
println!("Writing the .ts file");
let ts_file = workspace_dir
.join("target")
.join("types")
.join(&idl.metadata.name)
.with_extension("ts");
fs::write(&ts_file, idl_ts(&idl)?)?;
if !&cfg.workspace.types.is_empty() {
fs::copy(
ts_file,
workspace_dir
.join(&cfg.workspace.types)
.join(idl.metadata.name)
.with_extension("ts"),
)?;
}
println!("Build success");
}
}
result
}
#[allow(clippy::too_many_arguments)]
fn docker_build(
cfg: &WithPath<Config>,
container_name: &str,
cargo_toml: PathBuf,
build_config: &BuildConfig,
stdout: Option<File>,
stderr: Option<File>,
env_vars: Vec<String>,
cargo_args: Vec<String>,
) -> Result<()> {
let binary_name = Manifest::from_path(&cargo_toml)?.lib_name()?;
let workdir = Path::new("/workdir");
let volume_mount = format!(
"{}:{}",
cfg.path().parent().unwrap().canonicalize()?.display(),
workdir.to_str().unwrap(),
);
println!("Using image {:?}", build_config.docker_image);
let target_dir = workdir.join("docker-target");
println!("Run docker image");
let exit = std::process::Command::new("docker")
.args([
"run",
"-it",
"-d",
"--name",
container_name,
"--env",
&format!(
"CARGO_TARGET_DIR={}",
target_dir.as_path().to_str().unwrap()
),
"-v",
&volume_mount,
"-w",
workdir.to_str().unwrap(),
&build_config.docker_image,
"bash",
])
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("Docker build failed: {}", e))?;
if !exit.status.success() {
return Err(anyhow!("Failed to build program"));
}
let result = docker_prep(container_name, build_config).and_then(|_| {
let cfg_parent = cfg.path().parent().unwrap();
docker_build_bpf(
container_name,
cargo_toml.as_path(),
cfg_parent,
target_dir.as_path(),
binary_name,
stdout,
stderr,
env_vars,
cargo_args,
)
});
docker_cleanup(container_name, target_dir.as_path())?;
result
}
fn docker_prep(container_name: &str, build_config: &BuildConfig) -> Result<()> {
match build_config.bootstrap {
BootstrapMode::Debian => {
docker_exec(container_name, &["apt", "update"])?;
docker_exec(
container_name,
&["apt", "install", "-y", "curl", "build-essential"],
)?;
docker_exec(
container_name,
&["curl", "https://sh.rustup.rs", "-sfo", "rustup.sh"],
)?;
docker_exec(container_name, &["sh", "rustup.sh", "-y"])?;
docker_exec(container_name, &["rm", "-f", "rustup.sh"])?;
}
BootstrapMode::None => {}
}
if let Some(solana_version) = &build_config.solana_version {
println!("Using solana version: {solana_version}");
docker_exec(
container_name,
&[
"curl",
"-sSfL",
&format!("https://release.anza.xyz/v{solana_version}/install",),
"-o",
"solana_installer.sh",
],
)?;
docker_exec(container_name, &["sh", "solana_installer.sh"])?;
docker_exec(container_name, &["rm", "-f", "solana_installer.sh"])?;
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn docker_build_bpf(
container_name: &str,
cargo_toml: &Path,
cfg_parent: &Path,
target_dir: &Path,
binary_name: String,
stdout: Option<File>,
stderr: Option<File>,
env_vars: Vec<String>,
cargo_args: Vec<String>,
) -> Result<()> {
let manifest_path =
pathdiff::diff_paths(cargo_toml.canonicalize()?, cfg_parent.canonicalize()?)
.ok_or_else(|| anyhow!("Unable to diff paths"))?;
println!(
"Building {} manifest: {:?}",
binary_name,
manifest_path.display()
);
let exit = std::process::Command::new("docker")
.args([
"exec",
"--env",
"PATH=/root/.local/share/solana/install/active_release/bin:/root/.cargo/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
])
.args(env_vars
.iter()
.map(|x| ["--env", x.as_str()])
.collect::<Vec<[&str; 2]>>()
.concat())
.args([
container_name,
"cargo",
])
.args(BUILD_SUBCOMMAND)
.args([
"--manifest-path",
&manifest_path.display().to_string(),
])
.args(cargo_args)
.stdout(match stdout {
None => Stdio::inherit(),
Some(f) => f.into(),
})
.stderr(match stderr {
None => Stdio::inherit(),
Some(f) => f.into(),
})
.output()
.map_err(|e| anyhow::format_err!("Docker build failed: {}", e))?;
if !exit.status.success() {
return Err(anyhow!("Failed to build program"));
}
println!("Copying out the build artifacts");
let out_file = cfg_parent
.canonicalize()?
.join(
Path::new("target")
.join("verifiable")
.join(&binary_name)
.with_extension("so"),
)
.display()
.to_string();
let mut bin_path = target_dir.join("deploy");
bin_path.push(format!("{binary_name}.so"));
let bin_artifact = format!(
"{}:{}",
container_name,
bin_path.as_path().to_str().unwrap()
);
let exit = std::process::Command::new("docker")
.args(["cp", &bin_artifact, &out_file])
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("{}", e))?;
if !exit.status.success() {
Err(anyhow!(
"Failed to copy binary out of docker. Is the target directory set correctly?"
))
} else {
Ok(())
}
}
fn docker_cleanup(container_name: &str, target_dir: &Path) -> Result<()> {
println!("Cleaning up the docker target directory");
docker_exec(container_name, &["rm", "-rf", target_dir.to_str().unwrap()])?;
println!("Removing the docker container");
let exit = std::process::Command::new("docker")
.args(["rm", "-f", container_name])
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("{}", e))?;
if !exit.status.success() {
println!("Unable to remove the docker container");
std::process::exit(exit.status.code().unwrap_or(1));
}
Ok(())
}
fn docker_exec(container_name: &str, args: &[&str]) -> Result<()> {
let exit = std::process::Command::new("docker")
.args([&["exec", container_name], args].concat())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow!("Failed to run command \"{:?}\": {:?}", args, e))?;
if !exit.status.success() {
Err(anyhow!("Failed to run command: {:?}", args))
} else {
Ok(())
}
}
#[allow(clippy::too_many_arguments)]
fn _build_rust_cwd(
cfg: &WithPath<Config>,
no_idl: bool,
idl_out: Option<PathBuf>,
idl_ts_out: Option<PathBuf>,
skip_lint: bool,
no_docs: bool,
cargo_args: Vec<String>,
) -> Result<()> {
let exit = std::process::Command::new("cargo")
.args(BUILD_SUBCOMMAND)
.args(cargo_args.clone())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("{}", e))?;
if !exit.status.success() {
std::process::exit(exit.status.code().unwrap_or(1));
}
if !no_idl {
let idl = generate_idl(cfg, skip_lint, no_docs, &cargo_args)?;
let out = match idl_out {
None => PathBuf::from(".")
.join(&idl.metadata.name)
.with_extension("json"),
Some(o) => PathBuf::from(&o.join(&idl.metadata.name).with_extension("json")),
};
let ts_out = match idl_ts_out {
None => PathBuf::from(".")
.join(&idl.metadata.name)
.with_extension("ts"),
Some(o) => PathBuf::from(&o.join(&idl.metadata.name).with_extension("ts")),
};
write_idl(&idl, OutFile::File(out))?;
fs::write(&ts_out, idl_ts(&idl)?)?;
let cfg_parent = cfg.path().parent().expect("Invalid Anchor.toml");
if !&cfg.workspace.types.is_empty() {
fs::copy(
&ts_out,
cfg_parent
.join(&cfg.workspace.types)
.join(&idl.metadata.name)
.with_extension("ts"),
)?;
}
}
Ok(())
}
const BUILD_SUBCOMMAND: &[&str] = &["build-sbf", "--tools-version", "v1.52"];
pub fn verify(
program_id: Pubkey,
repo_url: Option<String>,
commit_hash: Option<String>,
current_dir: bool,
program_name: Option<String>,
args: Vec<String>,
) -> Result<()> {
let mut command_args = Vec::new();
match (current_dir, repo_url) {
(true, _) => {
let current_path = std::env::current_dir()?
.to_str()
.ok_or_else(|| anyhow!("Invalid current directory path"))?
.to_owned();
command_args.push(current_path);
command_args.push("--current-dir".into());
}
(false, Some(url)) => {
command_args.push(url);
}
(false, None) => {
return Err(anyhow!(
"You must provide either --repo-url or --current-dir"
));
}
}
if let Some(commit) = commit_hash {
command_args.push("--commit-hash".into());
command_args.push(commit);
}
if let Some(name) = program_name {
command_args.push("--library-name".into());
command_args.push(name);
}
command_args.push("--program-id".into());
command_args.push(program_id.to_string());
command_args.extend(args);
println!("Verifying program {program_id}");
let verify_path = AVM_HOME.join("bin").join("solana-verify");
if !verify_path.exists() {
install_with_avm(env!("CARGO_PKG_VERSION"), true)
.context("installing Anchor with solana-verify")?;
}
let status = std::process::Command::new(verify_path)
.arg("verify-from-repo")
.args(&command_args)
.stdout(std::process::Stdio::inherit())
.stderr(std::process::Stdio::inherit())
.status()
.with_context(|| "Failed to run `solana-verify`")?;
if !status.success() {
return Err(anyhow!("Failed to verify program"));
}
Ok(())
}
fn cd_member(cfg_override: &ConfigOverride, program_name: &str) -> Result<()> {
let programs = program::get_programs_from_workspace(cfg_override, None)?;
for program in programs {
let cargo_toml = program.path.join("Cargo.toml");
if !cargo_toml.exists() {
return Err(anyhow!(
"Did not find Cargo.toml at the path: {}",
program.path.display()
));
}
let manifest = Manifest::from_path(&cargo_toml)?;
let pkg_name = manifest.package().name();
if program_name == pkg_name || program_name == program.lib_name {
std::env::set_current_dir(&program.path)?;
return Ok(());
}
}
Err(anyhow!("{} is not part of the workspace", program_name,))
}
fn idl(cfg_override: &ConfigOverride, subcmd: IdlCommand) -> Result<()> {
match subcmd {
IdlCommand::Init {
program_id,
filepath,
priority_fee,
non_canonical,
#[cfg(feature = "idl-localnet-testing")]
allow_localnet,
} => {
#[cfg(feature = "idl-localnet-testing")]
let allow_localnet = allow_localnet;
#[cfg(not(feature = "idl-localnet-testing"))]
let allow_localnet = false;
idl_init(
program_id,
cfg_override,
filepath,
priority_fee,
non_canonical,
allow_localnet,
)
}
IdlCommand::Upgrade {
program_id,
filepath,
priority_fee,
#[cfg(feature = "idl-localnet-testing")]
allow_localnet,
} => {
#[cfg(feature = "idl-localnet-testing")]
let allow_localnet = allow_localnet;
#[cfg(not(feature = "idl-localnet-testing"))]
let allow_localnet = false;
idl_upgrade(
program_id,
cfg_override,
filepath,
priority_fee,
allow_localnet,
)
}
IdlCommand::Build {
program_name,
out,
out_ts,
no_docs,
skip_lint,
cargo_args,
} => idl_build(
cfg_override,
program_name,
out,
out_ts,
no_docs,
skip_lint,
cargo_args,
),
IdlCommand::Fetch {
program_id: address,
out,
non_canonical,
} => idl_fetch(cfg_override, address, out, non_canonical),
IdlCommand::Convert {
path,
out,
program_id,
} => idl_convert(path, out, program_id),
IdlCommand::Type { path, out } => idl_type(path, out),
IdlCommand::Close {
program_id,
seed,
priority_fee,
} => idl_close_metadata(cfg_override, program_id, seed, priority_fee),
IdlCommand::CreateBuffer {
filepath,
priority_fee,
} => idl_create_buffer(cfg_override, filepath, priority_fee),
IdlCommand::SetBufferAuthority {
buffer,
new_authority,
priority_fee,
} => idl_set_buffer_authority(cfg_override, buffer, new_authority, priority_fee),
IdlCommand::WriteBuffer {
program_id,
buffer,
seed,
close_buffer,
priority_fee,
} => idl_write_buffer_metadata(
cfg_override,
program_id,
buffer,
seed,
close_buffer,
priority_fee,
),
}
}
fn idl_init(
program_id: Option<Pubkey>,
cfg_override: &ConfigOverride,
idl_filepath: String,
priority_fee: Option<u64>,
non_canonical: bool,
allow_localnet: bool,
) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let is_localnet = cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1");
if is_localnet && !allow_localnet {
#[cfg(feature = "idl-localnet-testing")]
println!(
"Skipping IDL initialization on localnet. To deploy on localnet, use --allow-localnet"
);
#[cfg(not(feature = "idl-localnet-testing"))]
println!("Skipping IDL initialization on localnet");
return Ok(());
}
let program_id = match program_id {
Some(id) => id.to_string(),
_ => {
let idl = fs::read(&idl_filepath)?;
let idl = convert_idl(&idl)?;
idl.address
}
};
let command = metadata::IdlCommand::funded(
cluster_url,
wallet_path,
priority_fee,
metadata::FundedIdlSubcommand::Write {
program_id,
idl_filepath,
non_canonical,
},
);
if !command.status()?.success() {
return Err(anyhow!("Failed to initialize IDL"));
}
println!("IDL initialized.");
Ok(())
}
fn idl_upgrade(
program_id: Option<Pubkey>,
cfg_override: &ConfigOverride,
idl_filepath: String,
priority_fee: Option<u64>,
allow_localnet: bool,
) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let is_localnet = cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1");
if is_localnet && !allow_localnet {
#[cfg(feature = "idl-localnet-testing")]
println!("Skipping IDL upgrade on localnet. To deploy on localnet, use --allow-localnet");
#[cfg(not(feature = "idl-localnet-testing"))]
println!("Skipping IDL upgrade on localnet");
return Ok(());
}
let program_id = match program_id {
Some(id) => id.to_string(),
_ => {
let idl = fs::read(&idl_filepath)?;
let idl = convert_idl(&idl)?;
idl.address
}
};
let command = metadata::IdlCommand::funded(
cluster_url,
wallet_path,
priority_fee,
metadata::FundedIdlSubcommand::Write {
program_id,
idl_filepath,
non_canonical: false,
},
);
if !command.status()?.success() {
return Err(anyhow!("Failed to initialize IDL"));
}
println!("IDL upgraded.");
Ok(())
}
fn idl_build(
cfg_override: &ConfigOverride,
program_name: Option<String>,
out: Option<String>,
out_ts: Option<String>,
no_docs: bool,
skip_lint: bool,
cargo_args: Vec<String>,
) -> Result<()> {
let cfg = Config::discover(cfg_override)?
.ok_or_else(|| anyhow!("The 'anchor idl build' command requires an Anchor workspace."))?;
let current_dir = std::env::current_dir()?;
let program_path = match program_name {
Some(name) => cfg.get_program(&name)?.path,
None => {
let programs = cfg.read_all_programs()?;
if programs.len() == 1 {
programs.into_iter().next().unwrap().path
} else {
programs
.into_iter()
.find(|program| program.path == current_dir)
.ok_or_else(|| anyhow!("Not in a program directory"))?
.path
}
}
};
std::env::set_current_dir(program_path)?;
let idl = generate_idl(&cfg, skip_lint, no_docs, &cargo_args)?;
std::env::set_current_dir(current_dir)?;
let out = match out {
Some(path) => OutFile::File(PathBuf::from(path)),
None => OutFile::Stdout,
};
write_idl(&idl, out)?;
if let Some(path) = out_ts {
fs::write(path, idl_ts(&idl)?)?;
}
Ok(())
}
fn generate_idl(
cfg: &WithPath<Config>,
skip_lint: bool,
no_docs: bool,
cargo_args: &[String],
) -> Result<Idl> {
check_idl_build_feature()?;
anchor_lang_idl::build::IdlBuilder::new()
.resolution(cfg.features.resolution)
.skip_lint(cfg.features.skip_lint || skip_lint)
.no_docs(no_docs)
.cargo_args(cargo_args.into())
.build()
}
fn idl_fetch(
cfg_override: &ConfigOverride,
address: Pubkey,
out: Option<String>,
non_canonical: bool,
) -> Result<()> {
let (cluster_url, _) = get_cluster_and_wallet(cfg_override)?;
let command = metadata::IdlCommand::unfunded(
cluster_url,
metadata::UnfundedIdlSubcommand::Fetch {
program_id: address.to_string(),
out,
non_canonical,
},
);
if !command.status()?.success() {
return Err(anyhow!("Failed to fetch IDL"));
}
Ok(())
}
fn idl_convert(path: String, out: Option<String>, program_id: Option<Pubkey>) -> Result<()> {
let idl = fs::read(path)?;
let idl = match program_id {
Some(program_id) => {
let mut idl = serde_json::from_slice::<serde_json::Value>(&idl)?;
idl.as_object_mut()
.ok_or_else(|| anyhow!("IDL must be an object"))?
.insert(
"metadata".into(),
serde_json::json!({ "address": program_id.to_string() }),
);
serde_json::to_vec(&idl)?
}
_ => idl,
};
let idl = convert_idl(&idl)?;
let out = match out {
None => OutFile::Stdout,
Some(out) => OutFile::File(PathBuf::from(out)),
};
write_idl(&idl, out)
}
fn idl_type(path: String, out: Option<String>) -> Result<()> {
let idl = fs::read(path)?;
let idl = convert_idl(&idl)?;
let types = idl_ts(&idl)?;
match out {
Some(out) => fs::write(out, types)?,
_ => println!("{types}"),
};
Ok(())
}
fn idl_close_metadata(
cfg_override: &ConfigOverride,
program_id: Pubkey,
seed: String,
priority_fee: Option<u64>,
) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let command = metadata::IdlCommand::funded(
cluster_url,
wallet_path,
priority_fee,
metadata::FundedIdlSubcommand::Close {
program_id: program_id.to_string(),
seed,
},
);
if !command.status()?.success() {
return Err(anyhow!("Failed to close metadata account"));
}
println!("Metadata account closed successfully.");
Ok(())
}
fn idl_create_buffer(
cfg_override: &ConfigOverride,
filepath: String,
priority_fee: Option<u64>,
) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let command = metadata::IdlCommand::funded(
cluster_url,
wallet_path,
priority_fee,
metadata::FundedIdlSubcommand::CreateBuffer { filepath },
);
if !command.status()?.success() {
return Err(anyhow!("Failed to create buffer"));
}
println!("Buffer created successfully.");
Ok(())
}
fn idl_set_buffer_authority(
cfg_override: &ConfigOverride,
buffer: Pubkey,
new_authority: Pubkey,
priority_fee: Option<u64>,
) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let command = metadata::IdlCommand::funded(
cluster_url,
wallet_path,
priority_fee,
metadata::FundedIdlSubcommand::SetBufferAuthority {
buffer: buffer.to_string(),
new_authority: new_authority.to_string(),
},
);
if !command.status()?.success() {
return Err(anyhow!("Failed to set buffer authority"));
}
println!("Buffer authority set successfully.");
Ok(())
}
fn idl_write_buffer_metadata(
cfg_override: &ConfigOverride,
program_id: Pubkey,
buffer: Pubkey,
seed: String,
close_buffer: bool,
priority_fee: Option<u64>,
) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let command = metadata::IdlCommand::funded(
cluster_url,
wallet_path,
priority_fee,
metadata::FundedIdlSubcommand::WriteBuffer {
program_id: program_id.to_string(),
buffer: buffer.to_string(),
seed,
close_buffer,
},
);
if !command.status()?.success() {
return Err(anyhow!("Failed to write metadata using buffer"));
}
println!("Metadata written successfully using buffer.");
Ok(())
}
fn idl_ts(idl: &Idl) -> Result<String> {
let idl_name = &idl.metadata.name;
let type_name = idl_name.to_pascal_case();
let idl = serde_json::to_string(idl)?;
let camel_idl = Regex::new(r#""\w+":"([\w\d]+)""#)?
.captures_iter(&idl)
.fold(idl.clone(), |acc, cur| {
let name = cur.get(1).unwrap().as_str();
if Pubkey::try_from(name).is_ok() {
return acc;
}
let camel_name = name.to_lower_camel_case();
acc.replace(&format!(r#""{name}""#), &format!(r#""{camel_name}""#))
});
let camel_idl = serde_json::to_string_pretty(&serde_json::from_str::<Idl>(&camel_idl)?)?;
Ok(format!(
r#"/**
* Program IDL in camelCase format in order to be used in JS/TS.
*
* Note that this is only a type helper and is not the actual IDL. The original
* IDL can be found at `target/idl/{idl_name}.json`.
*/
export type {type_name} = {camel_idl};
"#
))
}
fn write_idl(idl: &Idl, out: OutFile) -> Result<()> {
let idl_json = serde_json::to_string_pretty(idl)?;
match out {
OutFile::Stdout => println!("{idl_json}"),
OutFile::File(out) => fs::write(out, idl_json)?,
};
Ok(())
}
fn account(
cfg_override: &ConfigOverride,
account_type: String,
address: Pubkey,
idl_filepath: Option<String>,
) -> Result<()> {
let (program_name, account_type_name) = account_type
.split_once('.') .and_then(|(x, y)| y.find('.').map_or_else(|| Some((x, y)), |_| None)) .ok_or_else(|| {
anyhow!(
"Please enter the account struct in the following format: <program_name>.<Account>",
)
})?;
let idl = idl_filepath.map_or_else(
|| {
Config::discover(cfg_override)?
.ok_or_else(|| anyhow!("The 'anchor account' command requires an Anchor workspace with Anchor.toml for IDL type generation."))?
.read_all_programs()
.expect("Workspace must contain atleast one program.")
.into_iter()
.find(|p| p.lib_name == *program_name)
.ok_or_else(|| anyhow!("Program {program_name} not found in workspace."))
.map(|p| p.idl)?
.ok_or_else(|| {
anyhow!(
"IDL not found. Please build the program atleast once to generate the IDL."
)
})
},
|idl_path| {
let idl = fs::read(idl_path)?;
let idl = convert_idl(&idl)?;
if idl.metadata.name != program_name {
return Err(anyhow!("IDL does not match program {program_name}."));
}
Ok(idl)
},
)?;
let cluster = match &cfg_override.cluster {
Some(cluster) => cluster.clone(),
None => Config::discover(cfg_override)?
.map(|cfg| cfg.provider.cluster.clone())
.unwrap_or(Cluster::Localnet),
};
let data = create_client(cluster.url()).get_account_data(&address)?;
let disc_len = idl
.accounts
.iter()
.find(|acc| acc.name == account_type_name)
.map(|acc| acc.discriminator.len())
.ok_or_else(|| anyhow!("Account `{account_type_name}` not found in IDL"))?;
let mut data_view = &data[disc_len..];
let deserialized_json =
deserialize_idl_defined_type_to_json(&idl, account_type_name, &mut data_view)?;
println!(
"{}",
serde_json::to_string_pretty(&deserialized_json).unwrap()
);
Ok(())
}
fn deserialize_idl_defined_type_to_json(
idl: &Idl,
defined_type_name: &str,
data: &mut &[u8],
) -> Result<JsonValue, anyhow::Error> {
let defined_type = &idl
.accounts
.iter()
.find(|acc| acc.name == defined_type_name)
.and_then(|acc| idl.types.iter().find(|ty| ty.name == acc.name))
.or_else(|| idl.types.iter().find(|ty| ty.name == defined_type_name))
.ok_or_else(|| anyhow!("Type `{}` not found in IDL.", defined_type_name))?
.ty;
let mut deserialized_fields = Map::new();
match defined_type {
IdlTypeDefTy::Struct { fields } => {
if let Some(fields) = fields {
match fields {
IdlDefinedFields::Named(fields) => {
for field in fields {
deserialized_fields.insert(
field.name.clone(),
deserialize_idl_type_to_json(&field.ty, data, idl)?,
);
}
}
IdlDefinedFields::Tuple(fields) => {
let mut values = Vec::new();
for field in fields {
values.push(deserialize_idl_type_to_json(field, data, idl)?);
}
deserialized_fields
.insert(defined_type_name.to_owned(), JsonValue::Array(values));
}
}
}
}
IdlTypeDefTy::Enum { variants } => {
let repr = <u8 as AnchorDeserialize>::deserialize(data)?;
let variant = variants
.get(repr as usize)
.ok_or_else(|| anyhow!("Error while deserializing enum variant {repr}"))?;
let mut value = json!({});
if let Some(enum_field) = &variant.fields {
match enum_field {
IdlDefinedFields::Named(fields) => {
let mut values = Map::new();
for field in fields {
values.insert(
field.name.clone(),
deserialize_idl_type_to_json(&field.ty, data, idl)?,
);
}
value = JsonValue::Object(values);
}
IdlDefinedFields::Tuple(fields) => {
let mut values = Vec::new();
for field in fields {
values.push(deserialize_idl_type_to_json(field, data, idl)?);
}
value = JsonValue::Array(values);
}
}
}
deserialized_fields.insert(variant.name.clone(), value);
}
IdlTypeDefTy::Type { alias } => {
return deserialize_idl_type_to_json(alias, data, idl);
}
}
Ok(JsonValue::Object(deserialized_fields))
}
fn deserialize_idl_type_to_json(
idl_type: &IdlType,
data: &mut &[u8],
parent_idl: &Idl,
) -> Result<JsonValue, anyhow::Error> {
if data.is_empty() {
return Err(anyhow::anyhow!("Unable to parse from empty bytes"));
}
Ok(match idl_type {
IdlType::Bool => json!(<bool as AnchorDeserialize>::deserialize(data)?),
IdlType::U8 => {
json!(<u8 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::I8 => {
json!(<i8 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::U16 => {
json!(<u16 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::I16 => {
json!(<i16 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::U32 => {
json!(<u32 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::I32 => {
json!(<i32 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::F32 => json!(<f32 as AnchorDeserialize>::deserialize(data)?),
IdlType::U64 => {
json!(<u64 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::I64 => {
json!(<i64 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::F64 => json!(<f64 as AnchorDeserialize>::deserialize(data)?),
IdlType::U128 => {
json!(<u128 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::I128 => {
json!(<i128 as AnchorDeserialize>::deserialize(data)?)
}
IdlType::U256 => todo!("Upon completion of u256 IDL standard"),
IdlType::I256 => todo!("Upon completion of i256 IDL standard"),
IdlType::Bytes => JsonValue::Array(
<Vec<u8> as AnchorDeserialize>::deserialize(data)?
.iter()
.map(|i| json!(*i))
.collect(),
),
IdlType::String => json!(<String as AnchorDeserialize>::deserialize(data)?),
IdlType::Pubkey => {
json!(<Pubkey as AnchorDeserialize>::deserialize(data)?.to_string())
}
IdlType::Array(ty, size) => match size {
IdlArrayLen::Value(size) => {
let mut array_data: Vec<JsonValue> = Vec::with_capacity(*size);
for _ in 0..*size {
array_data.push(deserialize_idl_type_to_json(ty, data, parent_idl)?);
}
JsonValue::Array(array_data)
}
IdlArrayLen::Generic(_) => unimplemented!("Generic array length is not yet supported"),
},
IdlType::Option(ty) => {
let is_present = <u8 as AnchorDeserialize>::deserialize(data)?;
if is_present == 0 {
JsonValue::String("None".to_string())
} else {
deserialize_idl_type_to_json(ty, data, parent_idl)?
}
}
IdlType::Vec(ty) => {
let size: usize = <u32 as AnchorDeserialize>::deserialize(data)?
.try_into()
.unwrap();
let mut vec_data: Vec<JsonValue> = Vec::with_capacity(size);
for _ in 0..size {
vec_data.push(deserialize_idl_type_to_json(ty, data, parent_idl)?);
}
JsonValue::Array(vec_data)
}
IdlType::Defined {
name,
generics: _generics,
} => {
deserialize_idl_defined_type_to_json(parent_idl, name, data)?
}
IdlType::Generic(generic) => json!(generic),
_ => unimplemented!("{idl_type:?}"),
})
}
enum OutFile {
Stdout,
File(PathBuf),
}
#[allow(clippy::too_many_arguments)]
fn test(
cfg_override: &ConfigOverride,
program_name: Option<String>,
skip_deploy: bool,
skip_local_validator: bool,
skip_build: bool,
skip_lint: bool,
no_idl: bool,
detach: bool,
tests_to_run: Vec<String>,
validator_type: ValidatorType,
extra_args: Vec<String>,
env_vars: Vec<String>,
cargo_args: Vec<String>,
) -> Result<()> {
let test_paths = tests_to_run
.iter()
.map(|path| {
PathBuf::from(path)
.canonicalize()
.map_err(|_| anyhow!("Wrong path {}", path))
})
.collect::<Result<Vec<_>, _>>()?;
with_workspace(cfg_override, |cfg| -> Result<()> {
cfg.validator = Some(validator_type);
if !skip_build {
build(
cfg_override,
no_idl,
None,
None,
false,
skip_lint,
true,
program_name.clone(),
None,
None,
BootstrapMode::None,
None,
None,
env_vars,
cargo_args,
false,
)?;
}
let root = cfg.path().parent().unwrap().to_owned();
cfg.add_test_config(root, test_paths)?;
let is_localnet = cfg.provider.cluster == Cluster::Localnet;
if (!is_localnet || skip_local_validator) && !skip_deploy {
deploy(cfg_override, None, None, false, true, vec![])?;
}
cfg.run_hooks(HookType::PreTest)?;
let mut is_first_suite = true;
if let Some(test_script) = cfg.scripts.get_mut("test") {
is_first_suite = false;
match program_name {
Some(program_name) => {
if let Some((from, to)) = Regex::new("\\s(tests/\\S+\\.(js|ts))")
.unwrap()
.captures_iter(&test_script.clone())
.last()
.and_then(|c| c.get(1).and_then(|mtch| c.get(2).map(|ext| (mtch, ext))))
.map(|(mtch, ext)| {
(
mtch.as_str(),
format!("tests/{program_name}.{}", ext.as_str()),
)
})
{
println!("\nRunning tests of program `{program_name}`!");
*test_script = test_script.replace(from, &to);
}
}
_ => println!(
"\nFound a 'test' script in the Anchor.toml. Running it as a test suite!"
),
}
run_test_suite(
cfg,
cfg.path(),
is_localnet,
skip_local_validator,
skip_deploy,
detach,
validator_type,
&cfg.test_validator,
&cfg.scripts,
&extra_args,
&cfg.surfpool_config,
)?;
}
if let Some(test_config) = &cfg.test_config {
for test_suite in test_config.iter() {
if !is_first_suite {
std::thread::sleep(std::time::Duration::from_millis(
test_suite
.1
.test
.as_ref()
.map(|val| val.shutdown_wait)
.unwrap_or(SHUTDOWN_WAIT) as u64,
));
} else {
is_first_suite = false;
}
run_test_suite(
cfg,
test_suite.0,
is_localnet,
skip_local_validator,
skip_deploy,
detach,
validator_type,
&test_suite.1.test,
&test_suite.1.scripts,
&extra_args,
&cfg.surfpool_config,
)?;
}
}
cfg.run_hooks(HookType::PostTest)?;
Ok(())
})?
}
#[allow(clippy::too_many_arguments)]
fn run_test_suite(
cfg: &WithPath<Config>,
test_suite_path: impl AsRef<Path>,
is_localnet: bool,
skip_local_validator: bool,
skip_deploy: bool,
detach: bool,
validator_type: ValidatorType,
test_validator: &Option<TestValidator>,
scripts: &ScriptsConfig,
extra_args: &[String],
surfpool_config: &Option<SurfpoolConfig>,
) -> Result<()> {
println!("\nRunning test suite: {:#?}\n", test_suite_path.as_ref());
let mut validator_handle = None;
if is_localnet && !skip_local_validator {
match validator_type {
ValidatorType::Surfpool => {
let full_simnet_mode = false;
let flags = Some(surfpool_flags(
cfg,
surfpool_config,
full_simnet_mode,
skip_deploy,
Some(test_suite_path.as_ref()),
)?);
validator_handle = Some(start_surfpool_validator(
flags,
surfpool_config,
full_simnet_mode,
)?);
}
ValidatorType::Legacy => {
let flags = match skip_deploy {
true => None,
false => Some(validator_flags(cfg, test_validator)?),
};
validator_handle = Some(start_solana_test_validator(
cfg,
test_validator,
flags,
true,
)?);
}
}
}
let url = cluster_url(cfg, test_validator, surfpool_config);
let node_options = format!(
"{} {}",
match std::env::var_os("NODE_OPTIONS") {
Some(value) => value
.into_string()
.map_err(std::env::VarError::NotUnicode)?,
None => "".to_owned(),
},
get_node_dns_option()?,
);
let log_streams = match stream_logs(cfg, &url) {
Ok(streams) => Some(streams),
Err(e) => {
eprintln!("Warning: Failed to setup program log streaming: {:#}", e);
eprintln!("Program logs will still be visible in the test output.");
None
}
};
let test_result = {
let cmd = scripts
.get("test")
.expect("Not able to find script for `test`")
.clone();
let script_args = format!("{cmd} {}", extra_args.join(" "));
std::process::Command::new("bash")
.arg("-c")
.arg(script_args)
.env("ANCHOR_PROVIDER_URL", url)
.env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
.env("NODE_OPTIONS", node_options)
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.map_err(anyhow::Error::from)
.context(cmd)
};
if test_result.is_ok() && detach {
println!("Local validator still running. Press Ctrl + C quit.");
std::io::stdin().lock().lines().next().unwrap().unwrap();
}
if let Some(mut child) = validator_handle {
if let Err(err) = child.kill() {
println!("Failed to kill subprocess {}: {}", child.id(), err);
}
}
if let Some(log_streams) = log_streams {
for handle in log_streams {
handle.shutdown();
}
}
match test_result {
Ok(exit) => {
if !exit.status.success() {
std::process::exit(exit.status.code().unwrap());
}
}
Err(err) => {
println!("Failed to run test: {err:#}");
return Err(err);
}
}
Ok(())
}
fn validator_flags(
cfg: &WithPath<Config>,
test_validator: &Option<TestValidator>,
) -> Result<Vec<String>> {
let programs = cfg.programs.get(&Cluster::Localnet);
let test_upgradeable_program = test_validator
.as_ref()
.map(|test_validator| test_validator.upgradeable)
.unwrap_or(false);
let mut flags = Vec::new();
for mut program in cfg.read_all_programs()? {
let verifiable = false;
let binary_path = program.binary_path(verifiable).display().to_string();
let address = programs
.and_then(|m| m.get(&program.lib_name))
.map(|deployment| Ok(deployment.address.to_string()))
.unwrap_or_else(|| program.pubkey().map(|p| p.to_string()))?;
if test_upgradeable_program {
flags.push("--upgradeable-program".to_string());
flags.push(address.clone());
flags.push(binary_path);
flags.push(cfg.wallet_kp()?.pubkey().to_string());
} else {
flags.push("--bpf-program".to_string());
flags.push(address.clone());
flags.push(binary_path);
}
if let Some(idl) = program.idl.as_mut() {
idl.address = address;
let idl_out = Path::new("target")
.join("idl")
.join(&idl.metadata.name)
.with_extension("json");
write_idl(idl, OutFile::File(idl_out))?;
}
}
if let Some(test) = test_validator.as_ref() {
if let Some(genesis) = &test.genesis {
for entry in genesis {
let program_path = Path::new(&entry.program);
if !program_path.exists() {
return Err(anyhow!(
"Program in genesis configuration does not exist at path: {}",
program_path.display()
));
}
if entry.upgradeable.unwrap_or(false) {
flags.push("--upgradeable-program".to_string());
flags.push(entry.address.clone());
flags.push(entry.program.clone());
flags.push(cfg.wallet_kp()?.pubkey().to_string());
} else {
flags.push("--bpf-program".to_string());
flags.push(entry.address.clone());
flags.push(entry.program.clone());
}
}
}
if let Some(validator) = &test.validator {
let entries = serde_json::to_value(validator)?;
for (key, value) in entries.as_object().unwrap() {
if key == "ledger" {
continue;
};
if key == "account" {
for entry in value.as_array().unwrap() {
flags.push("--account".to_string());
flags.push(entry["address"].as_str().unwrap().to_string());
flags.push(entry["filename"].as_str().unwrap().to_string());
}
} else if key == "account_dir" {
for entry in value.as_array().unwrap() {
flags.push("--account-dir".to_string());
flags.push(entry["directory"].as_str().unwrap().to_string());
}
} else if key == "clone" {
let client = if let Some(url) = entries["url"].as_str() {
create_client(url)
} else {
return Err(anyhow!(
"Validator url for Solana's JSON RPC should be provided in order to clone accounts from it"
));
};
let pubkeys = value
.as_array()
.unwrap()
.iter()
.map(|entry| {
let address = entry["address"].as_str().unwrap();
Pubkey::try_from(address)
.map_err(|_| anyhow!("Invalid pubkey {}", address))
})
.collect::<Result<HashSet<Pubkey>>>()?
.into_iter()
.collect::<Vec<_>>();
let accounts = client.get_multiple_accounts(&pubkeys)?;
for (pubkey, account) in pubkeys.into_iter().zip(accounts) {
match account {
Some(account) => {
if account.owner == bpf_loader_upgradeable::id()
&& matches!(
account.deserialize_data::<UpgradeableLoaderState>()?,
UpgradeableLoaderState::Program { .. }
)
{
flags.push("--clone-upgradeable-program".to_string());
flags.push(pubkey.to_string());
} else {
flags.push("--clone".to_string());
flags.push(pubkey.to_string());
}
}
_ => return Err(anyhow!("Account {} not found", pubkey)),
}
}
} else if key == "deactivate_feature" {
let pubkeys_result: Result<Vec<Pubkey>, _> = value
.as_array()
.unwrap()
.iter()
.map(|entry| {
let feature_flag = entry.as_str().unwrap();
Pubkey::try_from(feature_flag).map_err(|_| {
anyhow!("Invalid pubkey (feature flag) {}", feature_flag)
})
})
.collect();
let features = pubkeys_result?;
for feature in features {
flags.push("--deactivate-feature".to_string());
flags.push(feature.to_string());
}
} else {
flags.push(format!("--{}", key.replace('_', "-")));
if let serde_json::Value::String(v) = value {
flags.push(v.to_string());
} else {
flags.push(value.to_string());
}
}
}
}
}
Ok(flags)
}
fn surfpool_flags(
cfg: &WithPath<Config>,
surfpool_config: &Option<SurfpoolConfig>,
full_simnet_mode: bool,
skip_deploy: bool,
test_suite_path: Option<&Path>,
) -> Result<Vec<String>> {
let programs = cfg.programs.get(&Cluster::Localnet);
let mut flags = Vec::new();
for mut program in cfg.read_all_programs()? {
let address = programs
.and_then(|m| m.get(&program.lib_name))
.map(|deployment| Ok(deployment.address.to_string()))
.unwrap_or_else(|| program.pubkey().map(|p| p.to_string()))?;
if let Some(idl) = program.idl.as_mut() {
idl.address = address;
let idl_out = Path::new("target")
.join("idl")
.join(&idl.metadata.name)
.with_extension("json");
write_idl(idl, OutFile::File(idl_out))?;
}
}
if let Some(config) = &surfpool_config {
if let Some(airdrop_addresses) = &config.airdrop_addresses {
for address in airdrop_addresses {
flags.push("--airdrop".to_string());
flags.push(address.to_string());
}
}
if let Some(datasource_rpc_url) = &config.datasource_rpc_url {
flags.push("--rpc-url".to_string());
flags.push(datasource_rpc_url.to_string());
}
let host = &config.host;
flags.push("--host".to_string());
flags.push(host.to_string());
let rpc_port = &config.rpc_port;
flags.push("--port".to_string());
flags.push(rpc_port.to_string());
if let Some(ws_port) = &config.ws_port {
flags.push("--ws-port".to_string());
flags.push(ws_port.to_string());
}
if let Some(manifest_file_path) = &config.manifest_file_path {
flags.push("--manifest-file-path".to_string());
flags.push(manifest_file_path.to_string());
}
if let Some(runbooks) = &config.runbooks {
for runbook in runbooks {
flags.push("--runbook".to_string());
flags.push(runbook.to_string());
}
}
if let Some(slot_time) = &config.slot_time {
flags.push("--slot-time".to_string());
flags.push(slot_time.to_string());
}
}
let online = surfpool_config
.as_ref()
.and_then(|c| c.online)
.unwrap_or(false);
if !online {
flags.push("--offline".to_string());
}
let block_production_mode = surfpool_config
.as_ref()
.and_then(|c| c.block_production_mode.clone())
.unwrap_or("transaction".into());
flags.push("--block-production-mode".to_string());
flags.push(block_production_mode);
flags.push("--log-level".to_string());
flags.push(
surfpool_config
.as_ref()
.and_then(|c| c.log_level.clone())
.unwrap_or("none".into()),
);
if !full_simnet_mode {
flags.push("--no-tui".to_string());
flags.push("--disable-instruction-profiling".to_string());
flags.push("--max-profiles".to_string());
flags.push("1".to_string());
flags.push("--no-studio".to_string());
}
match skip_deploy {
true => flags.push("--no-deploy".to_string()),
false => {
flags.push("--legacy-anchor-compatibility".to_string());
if let Some(test_suite_path) = test_suite_path {
flags.push("--anchor-test-config-path".to_string());
flags.push(test_suite_path.display().to_string());
}
}
}
Ok(flags)
}
struct LogStreamHandle {
subscription: PubsubClientSubscription<RpcResponse<RpcLogsResponse>>,
}
impl LogStreamHandle {
fn shutdown(self) {
std::thread::spawn(move || {
let _ = self.subscription.send_unsubscribe();
});
}
}
fn spawn_log_receiver_thread<R>(receiver: R, log_file_path: PathBuf)
where
R: IntoIterator<Item = RpcResponse<RpcLogsResponse>> + Send + 'static,
{
std::thread::spawn(move || {
if let Ok(mut file) = File::create(&log_file_path) {
for response in receiver {
let _ = writeln!(
file,
"Transaction executed in slot {}:",
response.context.slot
);
let _ = writeln!(file, " Signature: {}", response.value.signature);
let _ = writeln!(
file,
" Status: {}",
response
.value
.err
.map(|err| err.to_string())
.unwrap_or_else(|| "Ok".to_string())
);
let _ = writeln!(file, " Log Messages:");
for log in response.value.logs {
let _ = writeln!(file, " {}", log);
}
let _ = writeln!(file); let _ = file.flush();
}
} else {
eprintln!("Failed to create log file: {:?}", log_file_path);
}
});
}
fn stream_logs(config: &WithPath<Config>, rpc_url: &str) -> Result<Vec<LogStreamHandle>> {
match &config.validator {
Some(ValidatorType::Surfpool) => {
if config
.surfpool_config
.as_ref()
.and_then(|s| {
s.log_level
.as_ref()
.map(|l| l.to_ascii_lowercase().ne("none"))
})
.unwrap_or(false)
{
println!("Surfpool validator logs: .surfpool/logs/ directory");
}
Ok(vec![])
}
Some(ValidatorType::Legacy) | None => stream_solana_logs(config, rpc_url),
}
}
fn stream_solana_logs(config: &WithPath<Config>, rpc_url: &str) -> Result<Vec<LogStreamHandle>> {
let program_logs_dir = Path::new(".anchor").join("program-logs");
if program_logs_dir.exists() {
fs::remove_dir_all(&program_logs_dir)?;
}
fs::create_dir_all(&program_logs_dir)?;
let ws_url = if rpc_url.contains("127.0.0.1") || rpc_url.contains("localhost") {
let rpc_port = rpc_url
.rsplit_once(':')
.and_then(|(_, port)| port.parse::<u16>().ok())
.unwrap_or(DEFAULT_RPC_PORT);
let ws_port = rpc_port + WEBSOCKET_PORT_OFFSET;
let url = format!("ws://127.0.0.1:{}", ws_port);
url
} else {
rpc_url
.replace("https://", "wss://")
.replace("http://", "ws://")
};
std::thread::sleep(std::time::Duration::from_millis(1500));
let mut handles = vec![];
for program in config.read_all_programs()? {
let idl_path = Path::new("target")
.join("idl")
.join(&program.lib_name)
.with_extension("json");
let idl = fs::read(&idl_path)?;
let idl = convert_idl(&idl)?;
let log_file_path =
program_logs_dir.join(format!("{}.{}.log", idl.address, program.lib_name));
let program_address = idl.address.clone();
let (client, receiver) = match PubsubClient::logs_subscribe(
&ws_url,
RpcTransactionLogsFilter::Mentions(vec![program_address.clone()]),
RpcTransactionLogsConfig {
commitment: Some(CommitmentConfig::confirmed()),
},
) {
Ok(result) => result,
Err(e) => {
eprintln!(
"Warning: Failed to subscribe to logs for program {}: {}",
program.lib_name, e
);
continue;
}
};
spawn_log_receiver_thread(receiver, log_file_path);
handles.push(LogStreamHandle {
subscription: client,
});
}
if let Some(test) = config.test_validator.as_ref() {
if let Some(genesis) = &test.genesis {
for entry in genesis {
let log_file_path = program_logs_dir.join(&entry.address).with_extension("log");
let address = entry.address.clone();
let (client, receiver) = match PubsubClient::logs_subscribe(
&ws_url,
RpcTransactionLogsFilter::Mentions(vec![address.clone()]),
RpcTransactionLogsConfig {
commitment: Some(CommitmentConfig::confirmed()),
},
) {
Ok(result) => result,
Err(e) => {
eprintln!(
"Warning: Failed to subscribe to logs for genesis program {}: {}",
&entry.address, e
);
continue;
}
};
spawn_log_receiver_thread(receiver, log_file_path);
handles.push(LogStreamHandle {
subscription: client,
});
}
}
}
Ok(handles)
}
fn start_surfpool_validator(
flags: Option<Vec<String>>,
surfpool_config: &Option<SurfpoolConfig>,
full_simnet_mode: bool,
) -> Result<Child> {
let rpc_url = surfpool_rpc_url(surfpool_config);
let (test_validator_stdout, test_validator_stderr) = match full_simnet_mode {
true => (Stdio::inherit(), Stdio::inherit()),
false => (Stdio::null(), Stdio::null()),
};
let mut validator_handle = std::process::Command::new("surfpool")
.arg("start")
.args(flags.unwrap_or_default())
.stdout(test_validator_stdout)
.stderr(test_validator_stderr)
.spawn()
.map_err(|e| anyhow!("Failed to spawn `surfpool`: {e}"))?;
let client = create_client(rpc_url.clone());
let mut count = 0;
let ms_wait = surfpool_config
.as_ref()
.map(|surfpool| surfpool.startup_wait)
.unwrap_or(STARTUP_WAIT);
while count < ms_wait {
let r = client.get_latest_blockhash();
if r.is_ok() {
break;
}
std::thread::sleep(std::time::Duration::from_millis(100));
count += 100;
}
if count >= ms_wait {
eprintln!(
"Unable to get latest blockhash. Surfpool validator does not look started. \
Check .surfpool/logs/ directory for errors. Consider increasing [surfpool.startup_wait] in Anchor.toml."
);
validator_handle.kill()?;
std::process::exit(1);
}
loop {
let resp = client
.send::<RpcResponse<SurfnetInfoResponse>>(
RpcRequest::Custom {
method: "surfnet_getSurfnetInfo",
},
serde_json::Value::Null,
)?
.value;
if resp
.runbook_executions
.iter()
.all(|ex| ex.completed_at.is_some())
{
break;
}
std::thread::sleep(std::time::Duration::from_millis(500));
}
Ok(validator_handle)
}
fn start_solana_test_validator(
cfg: &Config,
test_validator: &Option<TestValidator>,
flags: Option<Vec<String>>,
test_log_stdout: bool,
) -> Result<Child> {
let (test_ledger_directory, test_ledger_log_filename) =
test_validator_file_paths(test_validator)?;
let (test_validator_stdout, test_validator_stderr) = match test_log_stdout {
true => {
let test_validator_stdout_file =
File::create(&test_ledger_log_filename).with_context(|| {
format!(
"Failed to create validator log file {}",
test_ledger_log_filename.display()
)
})?;
let test_validator_sterr_file = test_validator_stdout_file.try_clone()?;
(
Stdio::from(test_validator_stdout_file),
Stdio::from(test_validator_sterr_file),
)
}
false => (Stdio::inherit(), Stdio::inherit()),
};
let rpc_url = test_validator_rpc_url(test_validator);
let rpc_port = cfg
.test_validator
.as_ref()
.and_then(|test| test.validator.as_ref().map(|v| v.rpc_port))
.unwrap_or(DEFAULT_RPC_PORT);
if !portpicker::is_free(rpc_port) {
return Err(anyhow!(
"Your configured rpc port: {rpc_port} is already in use"
));
}
let faucet_port = cfg
.test_validator
.as_ref()
.and_then(|test| test.validator.as_ref().and_then(|v| v.faucet_port))
.unwrap_or(solana_faucet::faucet::FAUCET_PORT);
if !portpicker::is_free(faucet_port) {
return Err(anyhow!(
"Your configured faucet port: {faucet_port} is already in use"
));
}
let mut validator_handle = std::process::Command::new("solana-test-validator")
.arg("--ledger")
.arg(test_ledger_directory)
.arg("--mint")
.arg(cfg.wallet_kp()?.pubkey().to_string())
.args(flags.unwrap_or_default())
.stdout(test_validator_stdout)
.stderr(test_validator_stderr)
.spawn()
.map_err(|e| anyhow!("Failed to spawn `solana-test-validator`: {e}"))?;
let client = create_client(rpc_url);
let mut count = 0;
let ms_wait = test_validator
.as_ref()
.map(|test| test.startup_wait)
.unwrap_or(STARTUP_WAIT);
while count < ms_wait {
let r = client.get_latest_blockhash();
if r.is_ok() {
break;
}
std::thread::sleep(std::time::Duration::from_millis(100));
count += 100;
}
if count >= ms_wait {
eprintln!(
"Unable to get latest blockhash. Test validator does not look started. \
Check {test_ledger_log_filename:?} for errors. Consider increasing [test.startup_wait] in Anchor.toml."
);
validator_handle.kill()?;
std::process::exit(1);
}
Ok(validator_handle)
}
fn test_validator_rpc_url(test_validator: &Option<TestValidator>) -> String {
match test_validator {
Some(TestValidator {
validator: Some(validator),
..
}) => format!("http://{}:{}", validator.bind_address, validator.rpc_port),
_ => "http://127.0.0.1:8899".to_string(),
}
}
fn surfpool_rpc_url(surfpool_config: &Option<SurfpoolConfig>) -> String {
match surfpool_config {
Some(SurfpoolConfig { host, rpc_port, .. }) => format!("http://{}:{}", host, rpc_port),
_ => format!("http://{}:{}", SURFPOOL_HOST, DEFAULT_RPC_PORT),
}
}
fn test_validator_file_paths(test_validator: &Option<TestValidator>) -> Result<(PathBuf, PathBuf)> {
let ledger_path = match test_validator {
Some(TestValidator {
validator: Some(validator),
..
}) => PathBuf::from(&validator.ledger),
_ => get_default_ledger_path(),
};
if !ledger_path.is_relative() {
eprintln!("Ledger directory {ledger_path:?} must be relative");
std::process::exit(1);
}
if ledger_path.exists() {
fs::remove_dir_all(&ledger_path).with_context(|| {
format!(
"Failed to remove ledger directory {}",
ledger_path.display()
)
})?;
}
fs::create_dir_all(&ledger_path).with_context(|| {
format!(
"Failed to create ledger directory {}",
ledger_path.display()
)
})?;
let log_path = ledger_path.join("test-ledger-log.txt");
Ok((ledger_path, log_path))
}
fn cluster_url(
cfg: &Config,
test_validator: &Option<TestValidator>,
surfpool_config: &Option<SurfpoolConfig>,
) -> String {
let is_localnet = cfg.provider.cluster == Cluster::Localnet;
match is_localnet {
true => match &cfg.validator {
Some(ValidatorType::Surfpool) => surfpool_rpc_url(surfpool_config),
Some(ValidatorType::Legacy) | None => test_validator_rpc_url(test_validator),
},
false => cfg.provider.cluster.url().to_string(),
}
}
fn clean(cfg_override: &ConfigOverride) -> Result<()> {
let workspace_root = if let Ok(Some(cfg)) = Config::discover(cfg_override) {
cfg.path()
.parent()
.expect("Invalid Anchor.toml")
.to_path_buf()
} else {
std::env::current_dir()?
};
let dot_anchor_dir = workspace_root.join(".anchor");
let target_dir = workspace_root.join("target");
let deploy_dir = target_dir.join("deploy");
if dot_anchor_dir.exists() {
fs::remove_dir_all(&dot_anchor_dir)
.map_err(|e| anyhow!("Could not remove directory {:?}: {}", dot_anchor_dir, e))?;
}
if target_dir.exists() {
for entry in fs::read_dir(target_dir)? {
let path = entry?.path();
if path.is_dir() && path != deploy_dir {
fs::remove_dir_all(&path)
.map_err(|e| anyhow!("Could not remove directory {}: {}", path.display(), e))?;
} else if path.is_file() {
fs::remove_file(&path)
.map_err(|e| anyhow!("Could not remove file {}: {}", path.display(), e))?;
}
}
} else {
println!("skipping target directory: not found")
}
if deploy_dir.exists() {
for file in fs::read_dir(deploy_dir)? {
let path = file?.path();
if path.extension() != Some(&OsString::from("json")) {
fs::remove_file(&path)
.map_err(|e| anyhow!("Could not remove file {}: {}", path.display(), e))?;
}
}
} else {
println!("skipping deploy directory: not found")
}
Ok(())
}
fn deploy(
cfg_override: &ConfigOverride,
program_name: Option<String>,
program_keypair: Option<String>,
verifiable: bool,
no_idl: bool,
solana_args: Vec<String>,
) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
let keypair = cfg.provider.wallet.to_string();
let client = create_client(&url);
let solana_args = add_recommended_deployment_solana_args(&client, solana_args)?;
cfg.run_hooks(HookType::PreDeploy)?;
println!("Deploying cluster: {url}");
println!("Upgrade authority: {keypair}");
for program in cfg.get_programs(program_name)? {
let binary_path = program.binary_path(verifiable).display().to_string();
println!("Deploying program {:?}...", program.lib_name);
println!("Program path: {binary_path}...");
let program_keypair_filepath = match &program_keypair {
Some(path) => path.clone(),
None => program.keypair_file()?.path().display().to_string(),
};
program::program_deploy(
cfg_override,
Some(strip_workspace_prefix(binary_path)),
None, Some(strip_workspace_prefix(program_keypair_filepath)),
None, None, None, None, no_idl,
false, solana_args.clone(),
)?;
}
println!("Deploy success");
cfg.run_hooks(HookType::PostDeploy)?;
Ok(())
})?
}
fn upgrade(
cfg_override: &ConfigOverride,
program_id: Pubkey,
program_filepath: String,
max_retries: u32,
solana_args: Vec<String>,
) -> Result<()> {
program::program_upgrade(
cfg_override,
program_id,
Some(program_filepath),
None, None, None, max_retries,
solana_args,
)
}
fn migrate(cfg_override: &ConfigOverride) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
println!("Running migration deploy script");
let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
let cur_dir = std::env::current_dir()?;
let migrations_dir = cur_dir.join("migrations");
let deploy_ts = Path::new("deploy.ts");
let use_ts = Path::new("tsconfig.json").exists() && migrations_dir.join(deploy_ts).exists();
if !Path::new(".anchor").exists() {
fs::create_dir(".anchor")?;
}
std::env::set_current_dir(".anchor")?;
let exit = if use_ts {
let module_path = migrations_dir.join(deploy_ts);
let deploy_script_host_str =
rust_template::deploy_ts_script_host(&url, &module_path.display().to_string());
fs::write(deploy_ts, deploy_script_host_str)?;
let pkg_manager_cmd = match &cfg.toolchain.package_manager {
Some(pkg_manager) => pkg_manager.to_string(),
None => PackageManager::default().to_string(),
};
std::process::Command::new(pkg_manager_cmd)
.args([
"run",
"ts-node",
&fs::canonicalize(deploy_ts)?.to_string_lossy(),
])
.env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()?
} else {
let deploy_js = deploy_ts.with_extension("js");
let module_path = migrations_dir.join(&deploy_js);
let deploy_script_host_str =
rust_template::deploy_js_script_host(&url, &module_path.display().to_string());
fs::write(&deploy_js, deploy_script_host_str)?;
std::process::Command::new("node")
.arg(&deploy_js)
.env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()?
};
if !exit.status.success() {
eprintln!("Deploy failed.");
std::process::exit(exit.status.code().unwrap());
}
println!("Deploy complete.");
Ok(())
})?
}
fn set_workspace_dir_or_exit() {
let d = match Config::discover(&ConfigOverride::default()) {
Err(err) => {
println!("Workspace configuration error: {err}");
std::process::exit(1);
}
Ok(d) => d,
};
match d {
None => {
let current_dir = match std::env::current_dir() {
Ok(dir) => dir,
Err(_) => {
println!("Unable to determine current directory");
std::process::exit(1);
}
};
let cargo_toml_path = current_dir.join("Cargo.toml");
if !cargo_toml_path.exists() {
println!("Not in a Solana workspace. This command requires either Anchor.toml or a Cargo workspace with Solana programs.");
std::process::exit(1);
}
match program::discover_solana_programs(None) {
Ok(programs) if !programs.is_empty() => {
}
_ => {
println!("Not in a Solana workspace. This command requires either Anchor.toml or a Cargo workspace with Solana programs.");
std::process::exit(1);
}
}
}
Some(cfg) => {
match cfg.path().parent() {
None => {
println!("Unable to make new program");
}
Some(parent) => {
if std::env::set_current_dir(parent).is_err() {
println!("Not in a Solana workspace. This command requires either Anchor.toml or a Cargo workspace with Solana programs.");
std::process::exit(1);
}
}
};
}
}
}
fn airdrop(cfg_override: &ConfigOverride, amount: f64, pubkey: Option<Pubkey>) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let client = RpcClient::new(cluster_url);
let recipient_pubkey = if let Some(pubkey) = pubkey {
pubkey
} else {
let keypair = Keypair::read_from_file(&wallet_path)
.map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
keypair.pubkey()
};
let lamports = (amount * 1_000_000_000.0) as u64;
println!("Requesting airdrop of {} SOL...", amount);
let signature = client
.request_airdrop(&recipient_pubkey, lamports)
.map_err(|e| anyhow!("Airdrop request failed: {}", e))?;
println!("Signature: {}", signature);
println!("Waiting for confirmation...");
client
.confirm_transaction(&signature)
.map_err(|e| anyhow!("Transaction confirmation failed: {}", e))?;
let balance = client.get_balance(&recipient_pubkey)?;
println!("{}", format_sol(balance));
Ok(())
}
fn cluster(_cmd: ClusterCommand) -> Result<()> {
println!("Cluster Endpoints:\n");
println!("* Mainnet - https://api.mainnet-beta.solana.com");
println!("* Devnet - https://api.devnet.solana.com");
println!("* Testnet - https://api.testnet.solana.com");
Ok(())
}
fn config_cmd(cfg_override: &ConfigOverride, cmd: ConfigCommand) -> Result<()> {
match cmd {
ConfigCommand::Get => config_get(cfg_override),
ConfigCommand::Set { url, keypair } => config_set(cfg_override, url, keypair),
}
}
fn config_get(cfg_override: &ConfigOverride) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
println!("Anchor Configuration:");
println!();
println!("Cluster: {}", cfg.provider.cluster.url());
println!("Wallet: {}", cfg.provider.wallet);
Ok(())
})?
}
fn config_set(
cfg_override: &ConfigOverride,
url: Option<String>,
keypair: Option<String>,
) -> Result<()> {
let anchor_toml_path = match Config::discover(cfg_override)? {
Some(cfg) => cfg.path().parent().unwrap().join("Anchor.toml"),
None => bail!("Not in an Anchor workspace"),
};
let mut toml_content =
fs::read_to_string(&anchor_toml_path).context("Failed to read Anchor.toml")?;
let mut toml_doc: toml::Value =
toml::from_str(&toml_content).context("Failed to parse Anchor.toml")?;
let mut updated = false;
if let Some(cluster_url) = url {
let expanded_url = match cluster_url.as_str() {
"m" => "https://api.mainnet-beta.solana.com".to_string(),
"d" => "https://api.devnet.solana.com".to_string(),
"t" => "https://api.testnet.solana.com".to_string(),
"l" => "http://127.0.0.1:8899".to_string(),
_ => cluster_url,
};
if let Some(provider) = toml_doc.get_mut("provider").and_then(|v| v.as_table_mut()) {
provider.insert(
"cluster".to_string(),
toml::Value::String(expanded_url.clone()),
);
println!("Updated cluster to: {}", expanded_url);
updated = true;
}
}
if let Some(keypair_path) = keypair {
let expanded_path = shellexpand::tilde(&keypair_path).to_string();
if !Path::new(&expanded_path).exists() {
eprintln!("Warning: Wallet file does not exist: {}", expanded_path);
}
if let Some(provider) = toml_doc.get_mut("provider").and_then(|v| v.as_table_mut()) {
provider.insert(
"wallet".to_string(),
toml::Value::String(expanded_path.clone()),
);
println!("Updated wallet to: {}", expanded_path);
updated = true;
}
}
if updated {
toml_content =
toml::to_string_pretty(&toml_doc).context("Failed to serialize Anchor.toml")?;
fs::write(&anchor_toml_path, toml_content).context("Failed to write Anchor.toml")?;
println!("\nConfiguration updated successfully!");
} else {
println!("No changes made. Use --url or --keypair to update settings.");
}
Ok(())
}
fn shell(cfg_override: &ConfigOverride) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
let programs = {
let mut idls: HashMap<String, Idl> = cfg
.read_all_programs()?
.iter()
.filter(|program| program.idl.is_some())
.map(|program| {
(
program.idl.as_ref().unwrap().metadata.name.clone(),
program.idl.clone().unwrap(),
)
})
.collect();
if let Some(programs) = cfg.programs.get(&cfg.provider.cluster) {
let _ = programs
.iter()
.map(|(name, pd)| {
if let Some(idl_fp) = &pd.idl {
let file_str =
fs::read_to_string(idl_fp).expect("Unable to read IDL file");
let idl = serde_json::from_str(&file_str).expect("Idl not readable");
idls.insert(name.clone(), idl);
}
})
.collect::<Vec<_>>();
}
match cfg.programs.get(&cfg.provider.cluster) {
None => Vec::new(),
Some(programs) => programs
.iter()
.filter_map(|(name, program_deployment)| {
Some(ProgramWorkspace {
name: name.to_string(),
program_id: program_deployment.address,
idl: match idls.get(name) {
None => return None,
Some(idl) => idl.clone(),
},
})
})
.collect::<Vec<ProgramWorkspace>>(),
}
};
let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
let js_code = rust_template::node_shell(&url, &cfg.provider.wallet.to_string(), programs)?;
let mut child = std::process::Command::new("node")
.args(["-e", &js_code, "-i", "--experimental-repl-await"])
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.spawn()
.map_err(|e| anyhow::format_err!("{}", e))?;
if !child.wait()?.success() {
println!("Error running node shell");
return Ok(());
}
Ok(())
})?
}
fn run(cfg_override: &ConfigOverride, script: String, script_args: Vec<String>) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
let script = cfg
.scripts
.get(&script)
.ok_or_else(|| anyhow!("Unable to find script"))?;
let script_with_args = format!("{script} {}", script_args.join(" "));
let exit = std::process::Command::new("bash")
.arg("-c")
.arg(&script_with_args)
.env("ANCHOR_PROVIDER_URL", url)
.env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.output()
.unwrap();
if !exit.status.success() {
std::process::exit(exit.status.code().unwrap_or(1));
}
Ok(())
})?
}
fn keys(cfg_override: &ConfigOverride, cmd: KeysCommand) -> Result<()> {
match cmd {
KeysCommand::List => keys_list(cfg_override),
KeysCommand::Sync { program_name } => keys_sync(cfg_override, program_name),
}
}
fn keys_list(cfg_override: &ConfigOverride) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
for program in cfg.read_all_programs()? {
let pubkey = program.pubkey()?;
println!("{}: {}", program.lib_name, pubkey);
}
Ok(())
})?
}
fn keys_sync(cfg_override: &ConfigOverride, program_name: Option<String>) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
let declare_id_regex = RegexBuilder::new(r#"^(([\w]+::)*)declare_id!\("(\w*)"\)"#)
.multi_line(true)
.build()
.unwrap();
let cfg_cluster = cfg.provider.cluster.to_owned();
println!("Syncing program ids for the configured cluster ({cfg_cluster})\n");
let mut changed_src = false;
for program in cfg.get_programs(program_name)? {
let actual_program_id = program.pubkey()?.to_string();
let src_path = program.path.join("src");
let files_to_check = vec![src_path.join("lib.rs"), src_path.join("id.rs")];
for path in files_to_check {
let mut content = match fs::read_to_string(&path) {
Ok(content) => content,
Err(_) => continue,
};
let incorrect_program_id = declare_id_regex
.captures(&content)
.and_then(|captures| captures.get(3))
.filter(|program_id_match| program_id_match.as_str() != actual_program_id);
if let Some(program_id_match) = incorrect_program_id {
println!("Found incorrect program id declaration in {path:?}");
content.replace_range(program_id_match.range(), &actual_program_id);
fs::write(&path, content)?;
changed_src = true;
println!("Updated to {actual_program_id}\n");
break;
}
}
'outer: for (cluster, programs) in &mut cfg.programs {
if cluster != &cfg_cluster {
continue;
}
for (name, deployment) in programs {
if name != &program.lib_name {
continue;
}
if deployment.address.to_string() != actual_program_id {
println!("Found incorrect program id declaration in Anchor.toml for the program `{name}`");
deployment.address = Pubkey::try_from(actual_program_id.as_str()).unwrap();
fs::write(cfg.path(), cfg.to_string())?;
println!("Updated to {actual_program_id}\n");
break 'outer;
}
}
}
}
println!("All program id declarations are synced.");
if changed_src {
println!("Please rebuild the program to update the generated artifacts.")
}
Ok(())
})?
}
fn check_program_id_mismatch(cfg: &WithPath<Config>, program_name: Option<String>) -> Result<()> {
let declare_id_regex = RegexBuilder::new(r#"^(([\w]+::)*)declare_id!\("(\w*)"\)"#)
.multi_line(true)
.build()
.unwrap();
for program in cfg.get_programs(program_name)? {
let actual_program_id = program.pubkey()?.to_string();
let src_path = program.path.join("src");
let files_to_check = vec![src_path.join("lib.rs"), src_path.join("id.rs")];
for path in files_to_check {
let content = match fs::read_to_string(&path) {
Ok(content) => content,
Err(_) => continue,
};
let incorrect_program_id = declare_id_regex
.captures(&content)
.and_then(|captures| captures.get(3))
.filter(|program_id_match| program_id_match.as_str() != actual_program_id);
if let Some(program_id_match) = incorrect_program_id {
let declared_id = program_id_match.as_str();
return Err(anyhow!(
"Program ID mismatch detected for program '{}':\n \
Keypair file has: {}\n \
Source code has: {}\n\n\
Please run 'anchor keys sync' to update the program ID in your source code or use the '--ignore-keys' flag to skip this check.",
program.lib_name,
actual_program_id,
declared_id
));
}
}
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn localnet(
cfg_override: &ConfigOverride,
skip_build: bool,
skip_deploy: bool,
skip_lint: bool,
ignore_keys: bool,
validator_type: ValidatorType,
env_vars: Vec<String>,
cargo_args: Vec<String>,
) -> Result<()> {
with_workspace(cfg_override, |cfg| -> Result<()> {
if !skip_build {
build(
cfg_override,
false,
None,
None,
false,
skip_lint,
ignore_keys,
None,
None,
None,
BootstrapMode::None,
None,
None,
env_vars,
cargo_args,
false,
)?;
}
let validator_handle: Option<Child> = match validator_type {
ValidatorType::Surfpool => {
let full_simnet_mode = true;
let flags = Some(surfpool_flags(
cfg,
&cfg.surfpool_config,
full_simnet_mode,
skip_deploy,
None,
)?);
Some(start_surfpool_validator(
flags,
&cfg.surfpool_config,
full_simnet_mode,
)?)
}
ValidatorType::Legacy => {
let flags = match skip_deploy {
true => None,
false => Some(validator_flags(cfg, &cfg.test_validator)?),
};
Some(start_solana_test_validator(
cfg,
&cfg.test_validator,
flags,
false,
)?)
}
};
let url = test_validator_rpc_url(&cfg.test_validator);
let log_streams = match stream_logs(cfg, &url) {
Ok(streams) => {
println!(
"Log streams set up successfully ({} streams)",
streams.len()
);
Some(streams)
}
Err(e) => {
eprintln!("Warning: Failed to setup program log streaming: {:#}", e);
eprintln!(" Program logs will still be visible in the validator output.");
None
}
};
std::io::stdin().lock().lines().next().unwrap().unwrap();
if let Some(mut handle) = validator_handle {
if let Err(err) = handle.kill() {
println!("Failed to kill subprocess {}: {}", handle.id(), err);
}
}
if let Some(log_streams) = log_streams {
for handle in log_streams {
handle.shutdown();
}
}
Ok(())
})?
}
fn with_workspace<R>(
cfg_override: &ConfigOverride,
f: impl FnOnce(&mut WithPath<Config>) -> R,
) -> Result<R> {
set_workspace_dir_or_exit();
let mut cfg = Config::discover(cfg_override)
.map_err(|e| anyhow!("Workspace configuration error: {}", e))?
.ok_or_else(|| anyhow!("This command requires an Anchor workspace."))?;
let r = f(&mut cfg);
set_workspace_dir_or_exit();
Ok(r)
}
fn is_hidden(entry: &walkdir::DirEntry) -> bool {
entry
.file_name()
.to_str()
.map(|s| s == "." || s.starts_with('.') || s == "target")
.unwrap_or(false)
}
fn get_node_version() -> Result<Version> {
let node_version = std::process::Command::new("node")
.arg("--version")
.stderr(Stdio::inherit())
.output()
.map_err(|e| anyhow::format_err!("node failed: {}", e))?;
let output = std::str::from_utf8(&node_version.stdout)?
.strip_prefix('v')
.unwrap()
.trim();
Version::parse(output).map_err(Into::into)
}
fn add_recommended_deployment_solana_args(
client: &RpcClient,
args: Vec<String>,
) -> Result<Vec<String>> {
let mut augmented_args = args.clone();
if !args.contains(&"--with-compute-unit-price".to_string()) {
let priority_fee = get_recommended_micro_lamport_fee(client)?;
augmented_args.push("--with-compute-unit-price".to_string());
augmented_args.push(priority_fee.to_string());
}
const DEFAULT_MAX_SIGN_ATTEMPTS: u8 = 30;
if !args.contains(&"--max-sign-attempts".to_string()) {
augmented_args.push("--max-sign-attempts".to_string());
augmented_args.push(DEFAULT_MAX_SIGN_ATTEMPTS.to_string());
}
if !args.contains(&"--buffer".to_owned()) {
let tmp_keypair_path = std::env::temp_dir().join("anchor-upgrade-buffer.json");
if !tmp_keypair_path.exists() {
if let Err(err) = Keypair::new().write_to_file(&tmp_keypair_path) {
return Err(anyhow!(
"Error creating keypair for buffer account, {:?}",
err
));
}
}
augmented_args.push("--buffer".to_owned());
augmented_args.push(tmp_keypair_path.to_string_lossy().to_string());
}
Ok(augmented_args)
}
fn get_node_dns_option() -> Result<&'static str> {
let version = get_node_version()?;
let req = VersionReq::parse(">=16.4.0").unwrap();
let option = match req.matches(&version) {
true => "--dns-result-order=ipv4first",
false => "",
};
Ok(option)
}
fn strip_workspace_prefix(absolute_path: String) -> String {
let workspace_prefix =
std::env::current_dir().unwrap().display().to_string() + std::path::MAIN_SEPARATOR_STR;
absolute_path
.strip_prefix(&workspace_prefix)
.unwrap_or(&absolute_path)
.into()
}
fn create_client<U: ToString>(url: U) -> RpcClient {
RpcClient::new_with_commitment(url, CommitmentConfig::confirmed())
}
fn address(cfg_override: &ConfigOverride) -> Result<()> {
let (_cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let keypair = Keypair::read_from_file(&wallet_path)
.map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
println!("{}", keypair.pubkey());
Ok(())
}
fn balance(cfg_override: &ConfigOverride, pubkey: Option<Pubkey>, lamports: bool) -> Result<()> {
let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
let client = RpcClient::new(cluster_url);
let account_pubkey = if let Some(pubkey) = pubkey {
pubkey
} else {
let keypair = Keypair::read_from_file(&wallet_path)
.map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
keypair.pubkey()
};
let balance = client.get_balance(&account_pubkey)?;
if lamports {
println!("{}", balance);
} else {
println!("{}", format_sol(balance));
}
Ok(())
}
fn epoch(cfg_override: &ConfigOverride) -> Result<()> {
let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
let client = RpcClient::new(cluster_url);
let epoch_info = client.get_epoch_info()?;
println!("{}", epoch_info.epoch);
Ok(())
}
fn epoch_info(cfg_override: &ConfigOverride) -> Result<()> {
let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
let client = RpcClient::new(cluster_url);
let epoch_info = client.get_epoch_info()?;
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
let last_slot_in_epoch = first_slot_in_epoch + epoch_info.slots_in_epoch;
let epoch_completed_percent =
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100.0;
let remaining_slots = epoch_info.slots_in_epoch - epoch_info.slot_index;
println!("Block height: {}", epoch_info.block_height);
println!("Slot: {}", epoch_info.absolute_slot);
println!("Epoch: {}", epoch_info.epoch);
if let Some(tx_count) = epoch_info.transaction_count {
println!("Transaction Count: {}", tx_count);
}
println!(
"Epoch Slot Range: [{}..{})",
first_slot_in_epoch, last_slot_in_epoch
);
println!("Epoch Completed Percent: {:>3.3}%", epoch_completed_percent);
println!(
"Epoch Completed Slots: {}/{} ({} remaining)",
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots
);
if let Ok(samples) = client.get_recent_performance_samples(Some(60)) {
let (total_slots, total_secs) =
samples.iter().fold((0u64, 0u64), |(slots, secs), sample| {
(
slots.saturating_add(sample.num_slots),
secs.saturating_add(sample.sample_period_secs as u64),
)
});
if total_slots > 0 {
let avg_slot_time_ms = (total_secs * 1000) / total_slots;
let remaining_secs = (remaining_slots * avg_slot_time_ms) / 1000;
let start_block_time = client
.get_blocks_with_limit(first_slot_in_epoch, 1)
.ok()
.and_then(|slots| slots.first().cloned())
.and_then(|first_actual_block| {
client.get_block_time(first_actual_block).ok().map(|time| {
let slot_diff = first_actual_block.saturating_sub(first_slot_in_epoch);
let time_adjustment = (slot_diff * avg_slot_time_ms / 1000) as i64;
time.saturating_sub(time_adjustment)
})
});
let current_block_time = client.get_block_time(epoch_info.absolute_slot).ok();
let (elapsed_secs, is_estimated) = if let (Some(start_time), Some(current_time)) =
(start_block_time, current_block_time)
{
((current_time - start_time) as u64, false)
} else {
((epoch_info.slot_index * avg_slot_time_ms) / 1000, true)
};
let total_secs = elapsed_secs + remaining_secs;
let estimated_marker = if is_estimated { "*" } else { "" };
println!(
"Epoch Completed Time: {}{}/{} ({} remaining)",
format_duration_secs(elapsed_secs),
estimated_marker,
format_duration_secs(total_secs),
format_duration_secs(remaining_secs)
);
}
}
Ok(())
}
fn format_duration_secs(total_seconds: u64) -> String {
let seconds = total_seconds % 60;
let total_minutes = total_seconds / 60;
let minutes = total_minutes % 60;
let total_hours = total_minutes / 60;
let hours = total_hours % 24;
let days = total_hours / 24;
let mut parts = Vec::new();
if days > 0 {
parts.push(format!("{}day", days));
}
if hours > 0 {
parts.push(format!("{}h", hours));
}
if minutes > 0 {
parts.push(format!("{}m", minutes));
}
if seconds > 0 || parts.is_empty() {
parts.push(format!("{}s", seconds));
}
parts.join(" ")
}
fn logs_subscribe(
cfg_override: &ConfigOverride,
include_votes: bool,
address: Option<Vec<Pubkey>>,
) -> Result<()> {
let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
let ws_url = if cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1") {
cluster_url
.replace("https://", "wss://")
.replace("http://", "ws://")
.replace(":8899", ":8900") } else {
cluster_url
.replace("https://", "wss://")
.replace("http://", "ws://")
};
println!("Connecting to {}", ws_url);
let filter = match (include_votes, address) {
(true, Some(address)) => {
RpcTransactionLogsFilter::Mentions(address.iter().map(|p| p.to_string()).collect())
}
(true, None) => RpcTransactionLogsFilter::AllWithVotes,
(false, Some(address)) => {
RpcTransactionLogsFilter::Mentions(address.iter().map(|p| p.to_string()).collect())
}
(false, None) => RpcTransactionLogsFilter::All,
};
let (_client, receiver) = PubsubClient::logs_subscribe(
&ws_url,
filter,
RpcTransactionLogsConfig {
commitment: cfg_override.commitment.map(|c| CommitmentConfig {
commitment: c.into(),
}),
},
)?;
loop {
match receiver.recv() {
Ok(logs) => {
println!("Transaction executed in slot {}:", logs.context.slot);
println!(" Signature: {}", logs.value.signature);
println!(
" Status: {}",
logs.value
.err
.map(|err| err.to_string())
.unwrap_or_else(|| "Ok".to_string())
);
println!(" Log Messages:");
for log in logs.value.logs {
println!(" {log}");
}
}
Err(err) => {
return Err(anyhow!("Disconnected: {err}"));
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
fn test_init_reserved_word() {
init(
&ConfigOverride {
cluster: None,
wallet: None,
commitment: None,
},
"await".to_string(),
true,
true,
PackageManager::default(),
false,
ProgramTemplate::default(),
TestTemplate::default(),
false,
true,
)
.unwrap();
}
#[test]
#[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
fn test_init_reserved_word_from_syn() {
init(
&ConfigOverride {
cluster: None,
wallet: None,
commitment: None,
},
"fn".to_string(),
true,
true,
PackageManager::default(),
false,
ProgramTemplate::default(),
TestTemplate::default(),
false,
true,
)
.unwrap();
}
#[test]
#[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
fn test_init_starting_with_digit() {
init(
&ConfigOverride {
cluster: None,
wallet: None,
commitment: None,
},
"1project".to_string(),
true,
true,
PackageManager::default(),
false,
ProgramTemplate::default(),
TestTemplate::default(),
false,
true,
)
.unwrap();
}
}