Skip to main content

anchor_cli/
lib.rs

1use {
2    crate::config::{
3        get_default_ledger_path, BootstrapMode, BuildConfig, Config, ConfigOverride, HookType,
4        Manifest, PackageManager, ProgramDeployment, ProgramWorkspace, ScriptsConfig,
5        SurfnetInfoResponse, SurfpoolConfig, TestValidator, ValidatorType, WithPath, SHUTDOWN_WAIT,
6        STARTUP_WAIT, SURFPOOL_HOST,
7    },
8    anchor_client::Cluster,
9    anchor_lang::{
10        prelude::UpgradeableLoaderState, solana_program::bpf_loader_upgradeable, AnchorDeserialize,
11    },
12    anchor_lang_idl::{
13        convert::convert_idl,
14        types::{Idl, IdlArrayLen, IdlDefinedFields, IdlType, IdlTypeDefTy},
15    },
16    anyhow::{anyhow, bail, Context, Result},
17    checks::{check_anchor_version, check_deps, check_idl_build_feature, check_overflow},
18    clap::{CommandFactory, Parser},
19    dirs::home_dir,
20    heck::{ToKebabCase, ToLowerCamelCase, ToPascalCase, ToSnakeCase},
21    regex::{Regex, RegexBuilder},
22    rust_template::{ProgramTemplate, TestTemplate},
23    semver::{Version, VersionReq},
24    serde_json::{json, Map, Value as JsonValue},
25    solana_cli_config::Config as SolanaCliConfig,
26    solana_commitment_config::CommitmentConfig,
27    solana_compute_budget_interface::ComputeBudgetInstruction,
28    solana_instruction::Instruction,
29    solana_keypair::Keypair,
30    solana_pubkey::Pubkey,
31    solana_pubsub_client::pubsub_client::{PubsubClient, PubsubClientSubscription},
32    solana_rpc_client::rpc_client::RpcClient,
33    solana_rpc_client_api::{
34        config::{RpcTransactionLogsConfig, RpcTransactionLogsFilter},
35        request::RpcRequest,
36        response::{Response as RpcResponse, RpcLogsResponse},
37    },
38    solana_signer::{EncodableKey, Signer},
39    std::{
40        collections::{BTreeMap, HashMap, HashSet},
41        ffi::OsString,
42        fs::{self, File},
43        io::prelude::*,
44        path::{Path, PathBuf},
45        process::{Child, Stdio},
46        string::ToString,
47        sync::LazyLock,
48    },
49};
50
51mod account;
52mod checks;
53pub mod config;
54mod keygen;
55mod metadata;
56mod program;
57pub mod rust_template;
58
59// Version of the docker image.
60pub const VERSION: &str = env!("CARGO_PKG_VERSION");
61pub const DOCKER_BUILDER_VERSION: &str = VERSION;
62/// Default RPC port
63pub const DEFAULT_RPC_PORT: u16 = 8899;
64
65/// WebSocket port offset for solana-test-validator (RPC port + 1)
66pub const WEBSOCKET_PORT_OFFSET: u16 = 1;
67
68pub static AVM_HOME: LazyLock<PathBuf> = LazyLock::new(|| {
69    if let Ok(avm_home) = std::env::var("AVM_HOME") {
70        PathBuf::from(avm_home)
71    } else {
72        let mut user_home = dirs::home_dir().expect("Could not find home directory");
73        user_home.push(".avm");
74        user_home
75    }
76});
77
78#[derive(Debug, Parser)]
79#[clap(version = VERSION)]
80pub struct Opts {
81    #[clap(flatten)]
82    pub cfg_override: ConfigOverride,
83    #[clap(subcommand)]
84    pub command: Command,
85}
86
87#[derive(Debug, Parser)]
88pub enum Command {
89    /// Initializes a workspace.
90    Init {
91        /// Workspace name
92        name: String,
93        /// Use JavaScript instead of TypeScript
94        #[clap(short, long)]
95        javascript: bool,
96        /// Don't install JavaScript dependencies
97        #[clap(long)]
98        no_install: bool,
99        /// Package Manager to use
100        #[clap(value_enum, long, default_value = "yarn")]
101        package_manager: PackageManager,
102        /// Don't initialize git
103        #[clap(long)]
104        no_git: bool,
105        /// Rust program template to use
106        #[clap(value_enum, short, long, default_value = "multiple")]
107        template: ProgramTemplate,
108        /// Test template to use
109        #[clap(value_enum, long, default_value = "litesvm")]
110        test_template: TestTemplate,
111        /// Initialize even if there are files
112        #[clap(long, action)]
113        force: bool,
114        /// Install Solana agent skills
115        #[clap(long)]
116        install_agent_skills: bool,
117    },
118    /// Builds the workspace.
119    #[clap(name = "build", alias = "b")]
120    Build {
121        /// True if the build should not fail even if there are no "CHECK" comments
122        #[clap(long)]
123        skip_lint: bool,
124        /// Skip checking for program ID mismatch between keypair and declare_id
125        #[clap(long)]
126        ignore_keys: bool,
127        /// Do not build the IDL
128        #[clap(long)]
129        no_idl: bool,
130        /// Output directory for the IDL.
131        #[clap(short, long)]
132        idl: Option<String>,
133        /// Output directory for the TypeScript IDL.
134        #[clap(short = 't', long)]
135        idl_ts: Option<String>,
136        /// True if the build artifact needs to be deterministic and verifiable.
137        #[clap(short, long)]
138        verifiable: bool,
139        /// Name of the program to build
140        #[clap(short, long)]
141        program_name: Option<String>,
142        /// Version of the Solana toolchain to use. For --verifiable builds
143        /// only.
144        #[clap(short, long)]
145        solana_version: Option<String>,
146        /// Docker image to use. For --verifiable builds only.
147        #[clap(short, long)]
148        docker_image: Option<String>,
149        /// Bootstrap docker image from scratch, installing all requirements for
150        /// verifiable builds. Only works for debian-based images.
151        #[clap(value_enum, short, long, default_value = "none")]
152        bootstrap: BootstrapMode,
153        /// Environment variables to pass into the docker container
154        #[clap(short, long, required = false)]
155        env: Vec<String>,
156        /// Arguments to pass to the underlying `cargo build-sbf` command
157        #[clap(required = false, last = true)]
158        cargo_args: Vec<String>,
159        /// Suppress doc strings in IDL output
160        #[clap(long)]
161        no_docs: bool,
162    },
163    /// Expands macros (wrapper around cargo expand)
164    ///
165    /// Use it in a program folder to expand program
166    ///
167    /// Use it in a workspace but outside a program
168    /// folder to expand the entire workspace
169    Expand {
170        /// Expand only this program
171        #[clap(short, long)]
172        program_name: Option<String>,
173        /// Arguments to pass to the underlying `cargo expand` command
174        #[clap(required = false, last = true)]
175        cargo_args: Vec<String>,
176    },
177    /// Verifies the on-chain bytecode matches the locally compiled artifact.
178    /// Run this command inside a program subdirectory, i.e., in the dir
179    /// containing the program's Cargo.toml.
180    Verify {
181        /// The program ID to verify.
182        program_id: Pubkey,
183        /// The URL of the repository to verify against. Conflicts with `--current-dir`.
184        #[clap(long, conflicts_with = "current_dir")]
185        repo_url: Option<String>,
186        /// The commit hash to verify against. Requires `--repo-url`.
187        #[clap(long, requires = "repo_url")]
188        commit_hash: Option<String>,
189        /// Verify against the source code in the current directory. Conflicts with `--repo-url`.
190        #[clap(long)]
191        current_dir: bool,
192        /// Name of the program to run the command on. Defaults to the package name.
193        #[clap(long)]
194        program_name: Option<String>,
195        /// Any additional arguments to pass to `solana-verify`.
196        #[clap(raw = true)]
197        args: Vec<String>,
198    },
199    #[clap(name = "test", alias = "t")]
200    /// Runs integration tests.
201    Test {
202        /// Build and test only this program
203        #[clap(short, long)]
204        program_name: Option<String>,
205        /// Use this flag if you want to run tests against previously deployed
206        /// programs.
207        #[clap(long)]
208        skip_deploy: bool,
209        /// True if the build should not fail even if there are
210        /// no "CHECK" comments where normally required
211        #[clap(long)]
212        skip_lint: bool,
213        /// Flag to skip starting a local validator, if the configured cluster
214        /// url is a localnet.
215        #[clap(long)]
216        skip_local_validator: bool,
217        /// Flag to skip building the program in the workspace,
218        /// use this to save time when running test and the program code is not altered.
219        #[clap(long)]
220        skip_build: bool,
221        /// Do not build the IDL
222        #[clap(long)]
223        no_idl: bool,
224        /// Flag to keep the local validator running after tests
225        /// to be able to check the transactions.
226        #[clap(long)]
227        detach: bool,
228        /// Run the test suites under the specified path
229        #[clap(long)]
230        run: Vec<String>,
231        /// Validator type to use for local testing
232        #[clap(value_enum, long, default_value = "surfpool")]
233        validator: ValidatorType,
234        args: Vec<String>,
235        /// Environment variables to pass into the docker container
236        #[clap(short, long, required = false)]
237        env: Vec<String>,
238        /// Arguments to pass to the underlying `cargo build-sbf` command.
239        #[clap(required = false, last = true)]
240        cargo_args: Vec<String>,
241    },
242    /// Creates a new program.
243    New {
244        /// Program name
245        name: String,
246        /// Rust program template to use
247        #[clap(value_enum, short, long, default_value = "multiple")]
248        template: ProgramTemplate,
249        /// Create new program even if there is already one
250        #[clap(long, action)]
251        force: bool,
252    },
253    /// Commands for interacting with interface definitions.
254    Idl {
255        #[clap(subcommand)]
256        subcmd: IdlCommand,
257    },
258    /// Remove all artifacts from the generated directories except program keypairs.
259    Clean,
260    /// Deploys each program in the workspace.
261    #[clap(hide = true)]
262    #[deprecated(since = "0.32.0", note = "use `anchor program deploy` instead")]
263    Deploy {
264        /// Only deploy this program
265        #[clap(short, long)]
266        program_name: Option<String>,
267        /// Keypair of the program (filepath) (requires program-name)
268        #[clap(long, requires = "program_name")]
269        program_keypair: Option<String>,
270        /// If true, deploy from path target/verifiable
271        #[clap(short, long)]
272        verifiable: bool,
273        /// Don't upload IDL during deployment (IDL is uploaded by default)
274        #[clap(long)]
275        no_idl: bool,
276        /// Arguments to pass to the underlying `solana program deploy` command.
277        #[clap(required = false, last = true)]
278        solana_args: Vec<String>,
279    },
280    /// Runs the deploy migration script.
281    Migrate,
282    /// Deploys, initializes an IDL, and migrates all in one command.
283    /// Upgrades a single program. The configured wallet must be the upgrade
284    /// authority.
285    #[clap(hide = true)]
286    #[deprecated(since = "0.32.0", note = "use `anchor program upgrade` instead")]
287    Upgrade {
288        /// The program to upgrade.
289        #[clap(short, long)]
290        program_id: Pubkey,
291        /// Filepath to the new program binary.
292        program_filepath: String,
293        /// Max times to retry on failure.
294        #[clap(long, default_value = "0")]
295        max_retries: u32,
296        /// Arguments to pass to the underlying `solana program deploy` command.
297        #[clap(required = false, last = true)]
298        solana_args: Vec<String>,
299    },
300    /// Request an airdrop of SOL
301    Airdrop {
302        /// Amount of SOL to airdrop
303        amount: f64,
304        /// Recipient address (defaults to configured wallet)
305        pubkey: Option<Pubkey>,
306    },
307    /// Cluster commands.
308    Cluster {
309        #[clap(subcommand)]
310        subcmd: ClusterCommand,
311    },
312    /// Configuration management commands.
313    Config {
314        #[clap(subcommand)]
315        subcmd: ConfigCommand,
316    },
317    /// Starts a node shell with an Anchor client setup according to the local
318    /// config.
319    Shell,
320    /// Runs the script defined by the current workspace's Anchor.toml.
321    Run {
322        /// The name of the script to run.
323        script: String,
324        /// Argument to pass to the underlying script.
325        #[clap(required = false, last = true)]
326        script_args: Vec<String>,
327    },
328    /// Program keypair commands.
329    Keys {
330        #[clap(subcommand)]
331        subcmd: KeysCommand,
332    },
333    /// Localnet commands.
334    Localnet {
335        /// Flag to skip building the program in the workspace,
336        /// use this to save time when running test and the program code is not altered.
337        #[clap(long)]
338        skip_build: bool,
339        /// Use this flag if you want to run tests against previously deployed
340        /// programs.
341        #[clap(long)]
342        skip_deploy: bool,
343        /// True if the build should not fail even if there are
344        /// no "CHECK" comments where normally required
345        #[clap(long)]
346        skip_lint: bool,
347        /// Skip checking for program ID mismatch between keypair and declare_id
348        #[clap(long)]
349        ignore_keys: bool,
350        /// Validator type to use for local testing
351        #[clap(value_enum, long, default_value = "surfpool")]
352        validator: ValidatorType,
353        /// Environment variables to pass into the docker container
354        #[clap(short, long, required = false)]
355        env: Vec<String>,
356        /// Arguments to pass to the underlying `cargo build-sbf` command.
357        #[clap(required = false, last = true)]
358        cargo_args: Vec<String>,
359    },
360    /// Fetch and deserialize an account using the IDL provided.
361    Account {
362        /// Account struct to deserialize (format: <program_name>.<Account>)
363        account_type: String,
364        /// Address of the account to deserialize
365        address: Pubkey,
366        /// IDL to use (defaults to workspace IDL)
367        #[clap(long)]
368        idl: Option<String>,
369    },
370    /// Generates shell completions.
371    Completions {
372        #[clap(value_enum)]
373        shell: clap_complete::Shell,
374    },
375    /// Get your public key
376    Address,
377    /// Get your balance
378    Balance {
379        /// Account to check balance for (defaults to configured wallet)
380        pubkey: Option<Pubkey>,
381        /// Display balance in lamports instead of SOL
382        #[clap(long)]
383        lamports: bool,
384    },
385    /// Get current epoch
386    Epoch,
387    /// Get information about the current epoch
388    #[clap(name = "epoch-info")]
389    EpochInfo,
390    /// Stream transaction logs
391    Logs {
392        /// Include vote transactions when monitoring all transactions
393        #[clap(long)]
394        include_votes: bool,
395        /// Addresses to filter logs by
396        #[clap(long)]
397        address: Option<Vec<Pubkey>>,
398    },
399    /// Show the contents of an account
400    ShowAccount {
401        #[clap(flatten)]
402        cmd: account::ShowAccountCommand,
403    },
404    /// Keypair generation and management
405    Keygen {
406        #[clap(subcommand)]
407        subcmd: KeygenCommand,
408    },
409    /// Program deployment and management commands
410    Program {
411        #[clap(subcommand)]
412        subcmd: ProgramCommand,
413    },
414}
415
416#[derive(Debug, Parser)]
417pub enum KeygenCommand {
418    /// Generate a new keypair
419    New {
420        /// Path to generated keypair file
421        #[clap(short = 'o', long)]
422        outfile: Option<String>,
423        /// Overwrite the output file if it exists
424        #[clap(short, long)]
425        force: bool,
426        /// Do not prompt for a passphrase
427        #[clap(long)]
428        no_passphrase: bool,
429        /// Do not display the generated pubkey
430        #[clap(long)]
431        silent: bool,
432        /// Number of words in the mnemonic phrase [possible values: 12, 15, 18, 21, 24]
433        #[clap(short = 'w', long, default_value = "12")]
434        word_count: usize,
435    },
436    /// Display the pubkey for a given keypair
437    Pubkey {
438        /// Keypair filepath
439        keypair: Option<String>,
440    },
441    /// Recover a keypair from a seed phrase
442    Recover {
443        /// Path to recovered keypair file
444        #[clap(short = 'o', long)]
445        outfile: Option<String>,
446        /// Overwrite the output file if it exists
447        #[clap(short, long)]
448        force: bool,
449        /// Skip seed phrase validation
450        #[clap(long)]
451        skip_seed_phrase_validation: bool,
452        /// Do not prompt for a passphrase
453        #[clap(long)]
454        no_passphrase: bool,
455    },
456    /// Verify a keypair can sign and verify a message
457    Verify {
458        /// Public key to verify
459        pubkey: Pubkey,
460        /// Keypair filepath (defaults to configured wallet)
461        keypair: Option<String>,
462    },
463}
464
465#[derive(Debug, Parser)]
466pub enum KeysCommand {
467    /// List all of the program keys.
468    List,
469    /// Sync program `declare_id!` pubkeys with the program's actual pubkey.
470    Sync {
471        /// Only sync the given program instead of all programs
472        #[clap(short, long)]
473        program_name: Option<String>,
474    },
475}
476
477#[derive(Debug, Parser)]
478pub enum ProgramCommand {
479    /// Deploy an upgradeable program
480    Deploy {
481        /// Program filepath (e.g., target/deploy/my_program.so).
482        /// If not provided, discovers programs from workspace
483        program_filepath: Option<String>,
484        /// Program name to deploy (from workspace). Used when program_filepath is not provided
485        #[clap(short, long)]
486        program_name: Option<String>,
487        /// Program keypair filepath (defaults to target/deploy/{program_name}-keypair.json)
488        #[clap(long)]
489        program_keypair: Option<String>,
490        /// Upgrade authority keypair (defaults to configured wallet)
491        #[clap(long)]
492        upgrade_authority: Option<String>,
493        /// Program id to deploy to (derived from program-keypair if not specified)
494        #[clap(long)]
495        program_id: Option<Pubkey>,
496        /// Buffer account to use for deployment
497        #[clap(long)]
498        buffer: Option<Pubkey>,
499        /// Maximum transaction length (BPF loader upgradeable limit)
500        #[clap(long)]
501        max_len: Option<usize>,
502        /// Don't upload IDL during deployment (IDL is uploaded by default)
503        #[clap(long)]
504        no_idl: bool,
505        /// Make the program immutable after deployment (cannot be upgraded)
506        #[clap(long = "final")]
507        make_final: bool,
508        /// Additional arguments to configure deployment (e.g., --with-compute-unit-price 1000)
509        #[clap(required = false, last = true)]
510        solana_args: Vec<String>,
511    },
512    /// Write a program into a buffer account
513    WriteBuffer {
514        /// Program filepath (e.g., target/deploy/my_program.so).
515        /// If not provided, discovers program from workspace using program_name
516        program_filepath: Option<String>,
517        /// Program name to write (from workspace). Used when program_filepath is not provided
518        #[clap(short, long)]
519        program_name: Option<String>,
520        /// Buffer account keypair (defaults to new keypair)
521        #[clap(long)]
522        buffer: Option<String>,
523        /// Buffer authority (defaults to configured wallet)
524        #[clap(long)]
525        buffer_authority: Option<String>,
526        /// Maximum transaction length
527        #[clap(long)]
528        max_len: Option<usize>,
529    },
530    /// Set a new buffer authority
531    SetBufferAuthority {
532        /// Buffer account address
533        buffer: Pubkey,
534        /// New buffer authority
535        new_buffer_authority: Pubkey,
536    },
537    /// Set a new program authority
538    SetUpgradeAuthority {
539        /// Program id
540        program_id: Pubkey,
541        /// New upgrade authority pubkey
542        #[clap(long)]
543        new_upgrade_authority: Option<Pubkey>,
544        /// New upgrade authority signer (keypair file). Required unless --skip-new-upgrade-authority-signer-check is used.
545        /// When provided, both current and new authority will sign (checked mode, recommended)
546        #[clap(long)]
547        new_upgrade_authority_signer: Option<String>,
548        /// Skip new upgrade authority signer check. Allows setting authority with only current authority signature.
549        /// WARNING: Less safe - use only if you're confident the pubkey is correct
550        #[clap(long)]
551        skip_new_upgrade_authority_signer_check: bool,
552        /// Make the program immutable (cannot be upgraded)
553        #[clap(long = "final")]
554        make_final: bool,
555        /// Current upgrade authority keypair (defaults to configured wallet)
556        #[clap(long)]
557        upgrade_authority: Option<String>,
558    },
559    /// Display information about a buffer or program
560    Show {
561        /// Account address (buffer or program)
562        account: Pubkey,
563        /// Get account information from the Solana config file
564        #[clap(long)]
565        get_programs: bool,
566        /// Get account information from the Solana config file
567        #[clap(long)]
568        get_buffers: bool,
569        /// Show all accounts
570        #[clap(long)]
571        all: bool,
572    },
573    /// Upgrade an upgradeable program
574    Upgrade {
575        /// Program id to upgrade
576        program_id: Pubkey,
577        /// Program filepath (e.g., target/deploy/my_program.so). If not provided, discovers from workspace
578        #[clap(long)]
579        program_filepath: Option<String>,
580        /// Program name to upgrade (from workspace). Used when program_filepath is not provided
581        #[clap(short, long)]
582        program_name: Option<String>,
583        /// Existing buffer account to upgrade from. If not provided, auto-discovers program from workspace
584        #[clap(long)]
585        buffer: Option<Pubkey>,
586        /// Upgrade authority (defaults to configured wallet)
587        #[clap(long)]
588        upgrade_authority: Option<String>,
589        /// Max times to retry on failure
590        #[clap(long, default_value = "0")]
591        max_retries: u32,
592        /// Additional arguments to configure deployment (e.g., --with-compute-unit-price 1000)
593        #[clap(required = false, last = true)]
594        solana_args: Vec<String>,
595    },
596    /// Write the program data to a file
597    Dump {
598        /// Program account address
599        account: Pubkey,
600        /// Output file path
601        output_file: String,
602    },
603    /// Close a program or buffer account and withdraw all lamports
604    Close {
605        /// Account address to close (buffer or program).
606        /// If not provided, discovers program from workspace using program_name
607        account: Option<Pubkey>,
608        /// Program name to close (from workspace). Used when account is not provided
609        #[clap(short, long)]
610        program_name: Option<String>,
611        /// Authority keypair (defaults to configured wallet)
612        #[clap(long)]
613        authority: Option<String>,
614        /// Recipient address for reclaimed lamports (defaults to authority)
615        #[clap(long)]
616        recipient: Option<Pubkey>,
617        /// Bypass warning prompts
618        #[clap(long)]
619        bypass_warning: bool,
620    },
621    /// Extend the length of an upgradeable program
622    Extend {
623        /// Program id to extend.
624        /// If not provided, discovers program from workspace using program_name
625        program_id: Option<Pubkey>,
626        /// Program name to extend (from workspace). Used when program_id is not provided
627        #[clap(short, long)]
628        program_name: Option<String>,
629        /// Additional bytes to allocate
630        additional_bytes: usize,
631    },
632}
633
634#[derive(Debug, Parser)]
635pub enum IdlCommand {
636    /// Initializes a program's IDL account. Can only be run once.
637    Init {
638        /// Program id to initialize IDL for.
639        /// If not provided, discovers program ID from IDL.
640        program_id: Option<Pubkey>,
641        #[clap(short, long)]
642        filepath: String,
643        #[clap(long)]
644        priority_fee: Option<u64>,
645        /// Create non-canonical metadata account (third-party metadata)
646        #[clap(long)]
647        non_canonical: bool,
648        /// Allow running against a localnet cluster (disabled by default)
649        #[clap(long)]
650        #[cfg(feature = "idl-localnet-testing")]
651        allow_localnet: bool,
652    },
653    /// Upgrades the IDL to the new file. An alias for first writing and then
654    /// then setting the idl buffer account.
655    Upgrade {
656        /// Program id to upgrade IDL for.
657        /// If not provided, discovers program ID from IDL.
658        program_id: Option<Pubkey>,
659        #[clap(short, long)]
660        filepath: String,
661        #[clap(long)]
662        priority_fee: Option<u64>,
663        /// Allow running against a localnet cluster (disabled by default)
664        #[clap(long)]
665        #[cfg(feature = "idl-localnet-testing")]
666        allow_localnet: bool,
667    },
668    /// Generates the IDL for the program using the compilation method.
669    #[clap(alias = "b")]
670    Build {
671        // Program name to build the IDL of(current dir's program if not specified)
672        #[clap(short, long)]
673        program_name: Option<String>,
674        /// Output file for the IDL (stdout if not specified)
675        #[clap(short, long)]
676        out: Option<String>,
677        /// Output file for the TypeScript IDL
678        #[clap(short = 't', long)]
679        out_ts: Option<String>,
680        /// Suppress doc strings in output
681        #[clap(long)]
682        no_docs: bool,
683        /// Do not check for safety comments
684        #[clap(long)]
685        skip_lint: bool,
686        /// Arguments to pass to the underlying `cargo test` command
687        #[clap(required = false, last = true)]
688        cargo_args: Vec<String>,
689    },
690    /// Fetches an IDL for the given program from a cluster.
691    Fetch {
692        program_id: Pubkey,
693        /// Output file for the IDL (stdout if not specified).
694        #[clap(short, long)]
695        out: Option<String>,
696        /// Fetch non-canonical metadata account (third-party metadata)
697        #[clap(long)]
698        non_canonical: bool,
699    },
700    /// Convert legacy IDLs (pre Anchor 0.30) to the new IDL spec
701    Convert {
702        /// Path to the IDL file
703        path: String,
704        /// Output file for the IDL (stdout if not specified)
705        #[clap(short, long)]
706        out: Option<String>,
707        /// Program id to initialize IDL for.
708        /// If not provided, discovers program ID from IDL.
709        #[clap(short, long)]
710        program_id: Option<Pubkey>,
711    },
712    /// Generate TypeScript type for the IDL
713    Type {
714        /// Path to the IDL file
715        path: String,
716        /// Output file for the IDL (stdout if not specified)
717        #[clap(short, long)]
718        out: Option<String>,
719    },
720    /// Close a metadata account and recover rent
721    Close {
722        /// The program ID
723        program_id: Pubkey,
724        /// The seed used for the metadata account (default: "idl")
725        #[clap(long, default_value = "idl")]
726        seed: String,
727        /// Priority fees in micro-lamports per compute unit
728        #[clap(long)]
729        priority_fee: Option<u64>,
730    },
731    /// Create a buffer account for metadata
732    CreateBuffer {
733        /// Path to the metadata file
734        #[clap(short, long)]
735        filepath: String,
736        /// Priority fees in micro-lamports per compute unit
737        #[clap(long)]
738        priority_fee: Option<u64>,
739    },
740    /// Set a new authority on a buffer account
741    SetBufferAuthority {
742        /// The buffer account address
743        buffer: Pubkey,
744        /// The new authority
745        #[clap(short, long)]
746        new_authority: Pubkey,
747        /// Priority fees in micro-lamports per compute unit
748        #[clap(long)]
749        priority_fee: Option<u64>,
750    },
751    /// Write metadata using a buffer account
752    WriteBuffer {
753        /// The program ID
754        program_id: Pubkey,
755        /// The buffer account address
756        #[clap(short, long)]
757        buffer: Pubkey,
758        /// The seed to use for the metadata account (default: "idl")
759        #[clap(long, default_value = "idl")]
760        seed: String,
761        /// Close the buffer after writing
762        #[clap(long)]
763        close_buffer: bool,
764        /// Priority fees in micro-lamports per compute unit
765        #[clap(long)]
766        priority_fee: Option<u64>,
767    },
768}
769
770#[derive(Debug, Parser)]
771pub enum ClusterCommand {
772    /// Prints common cluster urls.
773    List,
774}
775
776#[derive(Debug, Parser)]
777pub enum ConfigCommand {
778    /// Get configuration settings from the local Anchor.toml
779    Get,
780    /// Set configuration settings in the local Anchor.toml
781    Set {
782        /// Cluster to connect to (custom URL). Use -um, -ud, -ut, -ul for standard clusters
783        #[clap(short = 'u', long = "url")]
784        url: Option<String>,
785        /// Path to wallet keypair file to update the Anchor.toml file with
786        #[clap(short = 'k', long = "keypair")]
787        keypair: Option<String>,
788    },
789}
790
791fn get_keypair(path: &str) -> Result<Keypair> {
792    solana_keypair::read_keypair_file(path)
793        .map_err(|_| anyhow!("Unable to read keypair file ({path})"))
794}
795
796/// Format lamports as SOL with trailing zeros removed
797fn format_sol(lamports: u64) -> String {
798    let sol = lamports as f64 / 1_000_000_000.0;
799    let formatted = format!("{:.8}", sol);
800
801    // Remove trailing zeros and decimal point if not needed
802    let trimmed = formatted.trim_end_matches('0').trim_end_matches('.');
803    format!("{} SOL", trimmed)
804}
805
806/// Get cluster URL and wallet path from Anchor config, CLI overrides, or Solana CLI config
807fn get_cluster_and_wallet(cfg_override: &ConfigOverride) -> Result<(String, String)> {
808    // Try to get from Anchor workspace config first
809    if let Ok(Some(cfg)) = Config::discover(cfg_override) {
810        return Ok((
811            cfg.provider.cluster.url().to_string(),
812            cfg.provider.wallet.to_string(),
813        ));
814    }
815
816    // Try to load Solana CLI config
817    let (cluster_url, wallet_path) =
818        if let Some(config_file) = solana_cli_config::CONFIG_FILE.as_ref() {
819            match SolanaCliConfig::load(config_file) {
820                Ok(cli_config) => (
821                    cli_config.json_rpc_url.clone(),
822                    cli_config.keypair_path.clone(),
823                ),
824                Err(_) => {
825                    // Fallback to defaults if Solana CLI config doesn't exist
826                    (
827                        "https://api.mainnet-beta.solana.com".to_string(),
828                        dirs::home_dir()
829                            .map(|home| {
830                                home.join(".config/solana/id.json")
831                                    .to_string_lossy()
832                                    .to_string()
833                            })
834                            .unwrap_or_else(|| "~/.config/solana/id.json".to_string()),
835                    )
836                }
837            }
838        } else {
839            // If CONFIG_FILE is None, use defaults
840            (
841                "https://api.mainnet-beta.solana.com".to_string(),
842                dirs::home_dir()
843                    .map(|home| {
844                        home.join(".config/solana/id.json")
845                            .to_string_lossy()
846                            .to_string()
847                    })
848                    .unwrap_or_else(|| "~/.config/solana/id.json".to_string()),
849            )
850        };
851
852    // Apply cluster override if provided
853    let final_cluster = if let Some(cluster) = &cfg_override.cluster {
854        cluster.url().to_string()
855    } else {
856        cluster_url
857    };
858
859    Ok((final_cluster, wallet_path))
860}
861
862/// Get the recommended priority fee from the RPC client
863pub fn get_recommended_micro_lamport_fee(client: &RpcClient) -> Result<u64> {
864    let mut fees = client.get_recent_prioritization_fees(&[])?;
865    if fees.is_empty() {
866        // Fees may be empty, e.g. on localnet
867        return Ok(0);
868    }
869
870    // Get the median fee from the most recent 150 slots' prioritization fee
871    fees.sort_unstable_by_key(|fee| fee.prioritization_fee);
872    let median_index = fees.len() / 2;
873
874    let median_priority_fee = if fees.len() % 2 == 0 {
875        (fees[median_index - 1].prioritization_fee + fees[median_index].prioritization_fee) / 2
876    } else {
877        fees[median_index].prioritization_fee
878    };
879
880    Ok(median_priority_fee)
881}
882
883/// Prepend a compute unit ix, if the priority fee is greater than 0.
884pub fn prepend_compute_unit_ix(
885    instructions: Vec<Instruction>,
886    client: &RpcClient,
887    priority_fee: Option<u64>,
888) -> Result<Vec<Instruction>> {
889    let priority_fee = match priority_fee {
890        Some(fee) => fee,
891        None => get_recommended_micro_lamport_fee(client)?,
892    };
893
894    if priority_fee > 0 {
895        let mut instructions_appended = instructions.clone();
896        instructions_appended.insert(
897            0,
898            ComputeBudgetInstruction::set_compute_unit_price(priority_fee),
899        );
900        Ok(instructions_appended)
901    } else {
902        Ok(instructions)
903    }
904}
905
906pub fn entry(opts: Opts) -> Result<()> {
907    let restore_cbs = override_toolchain(&opts.cfg_override)?;
908    let result = process_command(opts);
909    restore_toolchain(restore_cbs)?;
910
911    result
912}
913
914/// Functions to restore toolchain entries
915type RestoreToolchainCallbacks = Vec<Box<dyn FnOnce() -> Result<()>>>;
916
917/// Override the toolchain from `Anchor.toml`.
918///
919/// Returns the previous versions to restore back to.
920fn override_toolchain(cfg_override: &ConfigOverride) -> Result<RestoreToolchainCallbacks> {
921    let mut restore_cbs: RestoreToolchainCallbacks = vec![];
922
923    let cfg = Config::discover(cfg_override)?;
924    if let Some(cfg) = cfg {
925        fn parse_version(text: &str) -> Option<String> {
926            Some(
927                Regex::new(r"(\d+\.\d+\.\S+)")
928                    .unwrap()
929                    .captures_iter(text)
930                    .next()?
931                    .get(0)?
932                    .as_str()
933                    .to_string(),
934            )
935        }
936
937        fn get_current_version(cmd_name: &str) -> Result<String> {
938            let output = std::process::Command::new(cmd_name)
939                .arg("--version")
940                .output()?;
941            if !output.status.success() {
942                return Err(anyhow!("Failed to run `{cmd_name} --version`"));
943            }
944
945            let output_version = std::str::from_utf8(&output.stdout)?;
946            parse_version(output_version)
947                .ok_or_else(|| anyhow!("Failed to parse the version of `{cmd_name}`"))
948        }
949
950        if let Some(solana_version) = &cfg.toolchain.solana_version {
951            let current_version = get_current_version("solana")?;
952            if solana_version != &current_version {
953                // We are overriding with `solana-install` command instead of using the binaries
954                // from `~/.local/share/solana/install/releases` because we use multiple Solana
955                // binaries in various commands.
956                fn override_solana_version(version: String) -> Result<bool> {
957                    // There is a deprecation warning message starting with `1.18.19` which causes
958                    // parsing problems https://github.com/solana-foundation/anchor/issues/3147
959                    let (cmd_name, domain) =
960                        if Version::parse(&version)? < Version::parse("1.18.19")? {
961                            ("solana-install", "solana.com")
962                        } else {
963                            ("agave-install", "anza.xyz")
964                        };
965
966                    // Install the command if it's not installed
967                    if get_current_version(cmd_name).is_err() {
968                        // `solana-install` and `agave-install` are not usable at the same time i.e.
969                        // using one of them makes the other unusable with the default installation,
970                        // causing the installation process to run each time users switch between
971                        // `agave` supported versions. For example, if the user's active Solana
972                        // version is `1.18.17`, and he specifies `solana_version = "2.0.6"`, this
973                        // code path will run each time an Anchor command gets executed.
974                        eprintln!(
975                            "Command not installed: `{cmd_name}`. \
976                            See https://github.com/anza-xyz/agave/wiki/Agave-Transition, \
977                            installing..."
978                        );
979                        let install_script = std::process::Command::new("curl")
980                            .args([
981                                "-sSfL",
982                                &format!("https://release.{domain}/v{version}/install"),
983                            ])
984                            .output()?;
985                        let is_successful = std::process::Command::new("sh")
986                            .args(["-c", std::str::from_utf8(&install_script.stdout)?])
987                            .spawn()?
988                            .wait_with_output()?
989                            .status
990                            .success();
991                        if !is_successful {
992                            return Err(anyhow!("Failed to install `{cmd_name}`"));
993                        }
994                    }
995
996                    let output = std::process::Command::new(cmd_name).arg("list").output()?;
997                    if !output.status.success() {
998                        return Err(anyhow!("Failed to list installed `solana` versions"));
999                    }
1000
1001                    // Hide the installation progress if the version is already installed
1002                    let is_installed = std::str::from_utf8(&output.stdout)?
1003                        .lines()
1004                        .filter_map(parse_version)
1005                        .any(|line_version| line_version == version);
1006                    let (stderr, stdout) = if is_installed {
1007                        (Stdio::null(), Stdio::null())
1008                    } else {
1009                        (Stdio::inherit(), Stdio::inherit())
1010                    };
1011
1012                    std::process::Command::new(cmd_name)
1013                        .arg("init")
1014                        .arg(&version)
1015                        .stderr(stderr)
1016                        .stdout(stdout)
1017                        .spawn()?
1018                        .wait()
1019                        .map(|status| status.success())
1020                        .map_err(|err| anyhow!("Failed to run `{cmd_name}` command: {err}"))
1021                }
1022
1023                match override_solana_version(solana_version.to_owned())? {
1024                    true => restore_cbs.push(Box::new(|| {
1025                        match override_solana_version(current_version)? {
1026                            true => Ok(()),
1027                            false => Err(anyhow!("Failed to restore `solana` version")),
1028                        }
1029                    })),
1030                    false => eprintln!(
1031                        "Failed to override `solana` version to {solana_version}, using \
1032                         {current_version} instead"
1033                    ),
1034                }
1035            }
1036        }
1037
1038        // Anchor version override should be handled last
1039        if let Some(anchor_version) = &cfg.toolchain.anchor_version {
1040            // Anchor binary name prefix(applies to binaries that are installed via `avm`)
1041            const ANCHOR_BINARY_PREFIX: &str = "anchor-";
1042
1043            // Get the current version from the executing binary name if possible because commit
1044            // based toolchain overrides do not have version information.
1045            let current_version = std::env::args()
1046                .next()
1047                .expect("First arg should exist")
1048                .parse::<PathBuf>()?
1049                .file_name()
1050                .and_then(|name| name.to_str())
1051                .expect("File name should be valid Unicode")
1052                .split_once(ANCHOR_BINARY_PREFIX)
1053                .map(|(_, version)| version)
1054                .unwrap_or(VERSION)
1055                .to_owned();
1056            if anchor_version != &current_version {
1057                let binary_path = home_dir()
1058                    .unwrap()
1059                    .join(".avm")
1060                    .join("bin")
1061                    .join(format!("{ANCHOR_BINARY_PREFIX}{anchor_version}"));
1062
1063                if !binary_path.exists() {
1064                    eprintln!(
1065                        "`anchor` {anchor_version} is not installed with `avm`. Installing...\n"
1066                    );
1067
1068                    if let Err(e) = install_with_avm(anchor_version, false) {
1069                        eprintln!(
1070                            "Failed to install `anchor`: {e}, using {current_version} instead"
1071                        );
1072                        return Ok(restore_cbs);
1073                    }
1074                }
1075
1076                let exit_code = std::process::Command::new(binary_path)
1077                    .args(std::env::args_os().skip(1))
1078                    .spawn()?
1079                    .wait()?
1080                    .code()
1081                    .unwrap_or(1);
1082                restore_toolchain(restore_cbs)?;
1083                std::process::exit(exit_code);
1084            }
1085        }
1086    }
1087
1088    Ok(restore_cbs)
1089}
1090
1091/// Installs Anchor using AVM, passing `--force` (and optionally) installing
1092/// `solana-verify`.
1093fn install_with_avm(version: &str, verify: bool) -> Result<()> {
1094    let mut cmd = std::process::Command::new("avm");
1095    cmd.arg("install");
1096    cmd.arg(version);
1097    cmd.arg("--force");
1098    if verify {
1099        cmd.arg("--verify");
1100    }
1101    let status = cmd.status().context("running AVM")?;
1102    if !status.success() {
1103        bail!("failed to install `anchor` {version} with avm");
1104    }
1105    Ok(())
1106}
1107
1108/// Restore toolchain to how it was before the command was run.
1109fn restore_toolchain(restore_cbs: RestoreToolchainCallbacks) -> Result<()> {
1110    for restore_toolchain in restore_cbs {
1111        if let Err(e) = restore_toolchain() {
1112            eprintln!("Toolchain error: {e}");
1113        }
1114    }
1115
1116    Ok(())
1117}
1118
1119/// Get the system's default license - what 'npm init' would use.
1120fn get_npm_init_license() -> Result<String> {
1121    let npm_init_license_output = std::process::Command::new("npm")
1122        .arg("config")
1123        .arg("get")
1124        .arg("init-license")
1125        .output()?;
1126
1127    if !npm_init_license_output.status.success() {
1128        return Err(anyhow!("Failed to get npm init license"));
1129    }
1130
1131    let license = String::from_utf8(npm_init_license_output.stdout)?;
1132    Ok(license.trim().to_string())
1133}
1134
1135fn process_command(opts: Opts) -> Result<()> {
1136    match opts.command {
1137        Command::Init {
1138            name,
1139            javascript,
1140            no_install,
1141            package_manager,
1142            no_git,
1143            template,
1144            test_template,
1145            force,
1146            install_agent_skills,
1147        } => init(
1148            &opts.cfg_override,
1149            name,
1150            javascript,
1151            no_install,
1152            package_manager,
1153            no_git,
1154            template,
1155            test_template,
1156            force,
1157            install_agent_skills,
1158        ),
1159        Command::New {
1160            name,
1161            template,
1162            force,
1163        } => new(&opts.cfg_override, name, template, force),
1164        Command::Build {
1165            no_idl,
1166            idl,
1167            idl_ts,
1168            verifiable,
1169            program_name,
1170            solana_version,
1171            docker_image,
1172            bootstrap,
1173            cargo_args,
1174            env,
1175            skip_lint,
1176            ignore_keys,
1177            no_docs,
1178        } => build(
1179            &opts.cfg_override,
1180            no_idl,
1181            idl,
1182            idl_ts,
1183            verifiable,
1184            skip_lint,
1185            ignore_keys,
1186            program_name,
1187            solana_version,
1188            docker_image,
1189            bootstrap,
1190            None,
1191            None,
1192            env,
1193            cargo_args,
1194            no_docs,
1195        ),
1196        Command::Verify {
1197            program_id,
1198            repo_url,
1199            commit_hash,
1200            current_dir,
1201            program_name,
1202            args,
1203        } => verify(
1204            program_id,
1205            repo_url,
1206            commit_hash,
1207            current_dir,
1208            program_name,
1209            args,
1210        ),
1211        Command::Clean => clean(&opts.cfg_override),
1212        #[allow(deprecated)]
1213        Command::Deploy {
1214            program_name,
1215            program_keypair,
1216            verifiable,
1217            no_idl,
1218            solana_args,
1219        } => {
1220            eprintln!(
1221                "Warning: 'anchor deploy' is deprecated. Use 'anchor program deploy' instead."
1222            );
1223            deploy(
1224                &opts.cfg_override,
1225                program_name,
1226                program_keypair,
1227                verifiable,
1228                no_idl,
1229                solana_args,
1230            )
1231        }
1232        Command::Expand {
1233            program_name,
1234            cargo_args,
1235        } => expand(&opts.cfg_override, program_name, &cargo_args),
1236        #[allow(deprecated)]
1237        Command::Upgrade {
1238            program_id,
1239            program_filepath,
1240            max_retries,
1241            solana_args,
1242        } => {
1243            eprintln!(
1244                "Warning: 'anchor upgrade' is deprecated. Use 'anchor program upgrade' instead."
1245            );
1246            upgrade(
1247                &opts.cfg_override,
1248                program_id,
1249                program_filepath,
1250                max_retries,
1251                solana_args,
1252            )
1253        }
1254        Command::Idl { subcmd } => idl(&opts.cfg_override, subcmd),
1255        Command::Migrate => migrate(&opts.cfg_override),
1256        Command::Test {
1257            program_name,
1258            skip_deploy,
1259            skip_local_validator,
1260            skip_build,
1261            no_idl,
1262            detach,
1263            run,
1264            validator,
1265            args,
1266            env,
1267            cargo_args,
1268            skip_lint,
1269        } => test(
1270            &opts.cfg_override,
1271            program_name,
1272            skip_deploy,
1273            skip_local_validator,
1274            skip_build,
1275            skip_lint,
1276            no_idl,
1277            detach,
1278            run,
1279            validator,
1280            args,
1281            env,
1282            cargo_args,
1283        ),
1284        Command::Airdrop { amount, pubkey } => airdrop(&opts.cfg_override, amount, pubkey),
1285        Command::Cluster { subcmd } => cluster(subcmd),
1286        Command::Config { subcmd } => config_cmd(&opts.cfg_override, subcmd),
1287        Command::Shell => shell(&opts.cfg_override),
1288        Command::Run {
1289            script,
1290            script_args,
1291        } => run(&opts.cfg_override, script, script_args),
1292        Command::Keys { subcmd } => keys(&opts.cfg_override, subcmd),
1293        Command::Localnet {
1294            skip_build,
1295            skip_deploy,
1296            skip_lint,
1297            ignore_keys,
1298            validator,
1299            env,
1300            cargo_args,
1301        } => localnet(
1302            &opts.cfg_override,
1303            skip_build,
1304            skip_deploy,
1305            skip_lint,
1306            ignore_keys,
1307            validator,
1308            env,
1309            cargo_args,
1310        ),
1311        Command::Account {
1312            account_type,
1313            address,
1314            idl,
1315        } => account(&opts.cfg_override, account_type, address, idl),
1316        Command::Completions { shell } => {
1317            clap_complete::generate(
1318                shell,
1319                &mut Opts::command(),
1320                "anchor",
1321                &mut std::io::stdout(),
1322            );
1323            Ok(())
1324        }
1325        Command::Address => address(&opts.cfg_override),
1326        Command::Balance { pubkey, lamports } => balance(&opts.cfg_override, pubkey, lamports),
1327        Command::Epoch => epoch(&opts.cfg_override),
1328        Command::EpochInfo => epoch_info(&opts.cfg_override),
1329        Command::Logs {
1330            include_votes,
1331            address,
1332        } => logs_subscribe(&opts.cfg_override, include_votes, address),
1333        Command::ShowAccount { cmd } => account::show_account(&opts.cfg_override, cmd),
1334        Command::Keygen { subcmd } => keygen::keygen(&opts.cfg_override, subcmd),
1335        Command::Program { subcmd } => program::program(&opts.cfg_override, subcmd),
1336    }
1337}
1338
1339#[allow(clippy::too_many_arguments)]
1340fn init(
1341    cfg_override: &ConfigOverride,
1342    name: String,
1343    javascript: bool,
1344    no_install: bool,
1345    package_manager: PackageManager,
1346    no_git: bool,
1347    template: ProgramTemplate,
1348    test_template: TestTemplate,
1349    force: bool,
1350    install_agent_skills: bool,
1351) -> Result<()> {
1352    if !force && Config::discover(cfg_override)?.is_some() {
1353        return Err(anyhow!("Workspace already initialized"));
1354    }
1355
1356    // We need to format different cases for the dir and the name
1357    let rust_name = name.to_snake_case();
1358    let project_name = if name == rust_name {
1359        rust_name.clone()
1360    } else {
1361        name.to_kebab_case()
1362    };
1363
1364    // Additional keywords that have not been added to the `syn` crate as reserved words
1365    // https://github.com/dtolnay/syn/pull/1098
1366    let extra_keywords = ["async", "await", "try"];
1367    // Anchor converts to snake case before writing the program name
1368    if syn::parse_str::<syn::Ident>(&rust_name).is_err()
1369        || extra_keywords.contains(&rust_name.as_str())
1370    {
1371        return Err(anyhow!(
1372            "Anchor workspace name must be a valid Rust identifier. It may not be a Rust reserved word, start with a digit, or include certain disallowed characters. See https://doc.rust-lang.org/reference/identifiers.html for more detail.",
1373        ));
1374    }
1375
1376    if force {
1377        fs::create_dir_all(&project_name)?;
1378    } else {
1379        fs::create_dir(&project_name)?;
1380    }
1381    std::env::set_current_dir(&project_name)?;
1382    fs::create_dir_all("app")?;
1383
1384    let mut cfg = Config::default();
1385
1386    let test_script = test_template.get_test_script(javascript, &package_manager);
1387    cfg.scripts.insert("test".to_owned(), test_script);
1388
1389    let package_manager_cmd = package_manager.to_string();
1390    cfg.toolchain.package_manager = Some(package_manager);
1391
1392    let mut localnet = BTreeMap::new();
1393    let program_id = rust_template::get_or_create_program_id(&rust_name);
1394    localnet.insert(
1395        rust_name,
1396        ProgramDeployment {
1397            address: program_id,
1398            path: None,
1399            idl: None,
1400        },
1401    );
1402    cfg.programs.insert(Cluster::Localnet, localnet);
1403    let toml = cfg.to_string();
1404    fs::write("Anchor.toml", toml)?;
1405
1406    // Initialize .gitignore file
1407    fs::write(".gitignore", rust_template::git_ignore())?;
1408
1409    // Initialize .prettierignore file
1410    fs::write(".prettierignore", rust_template::prettier_ignore())?;
1411
1412    // Remove the default program if `--force` is passed
1413    if force {
1414        fs::remove_dir_all(
1415            std::env::current_dir()?
1416                .join("programs")
1417                .join(&project_name),
1418        )?;
1419    }
1420
1421    // Build the program.
1422    rust_template::create_program(&project_name, template, Some(&test_template))?;
1423
1424    // Build the migrations directory.
1425    let migrations_path = Path::new("migrations");
1426    fs::create_dir_all(migrations_path)?;
1427
1428    let license = get_npm_init_license()?;
1429
1430    let jest = TestTemplate::Jest == test_template;
1431    if javascript {
1432        // Build javascript config
1433        let mut package_json = File::create("package.json")?;
1434        package_json.write_all(rust_template::package_json(jest, license).as_bytes())?;
1435
1436        let mut deploy = File::create(migrations_path.join("deploy.js"))?;
1437        deploy.write_all(rust_template::deploy_script().as_bytes())?;
1438    } else {
1439        // Build typescript config
1440        let mut ts_config = File::create("tsconfig.json")?;
1441        ts_config.write_all(rust_template::ts_config(jest).as_bytes())?;
1442
1443        let mut ts_package_json = File::create("package.json")?;
1444        ts_package_json.write_all(rust_template::ts_package_json(jest, license).as_bytes())?;
1445
1446        let mut deploy = File::create(migrations_path.join("deploy.ts"))?;
1447        deploy.write_all(rust_template::ts_deploy_script().as_bytes())?;
1448    }
1449
1450    test_template.create_test_files(&project_name, javascript, &program_id.to_string())?;
1451
1452    if !no_install {
1453        let package_manager_result = install_node_modules(&package_manager_cmd)?;
1454
1455        if !package_manager_result.status.success() && package_manager_cmd != "npm" {
1456            println!("Failed {package_manager_cmd} install will attempt to npm install");
1457            install_node_modules("npm")?;
1458        } else {
1459            eprintln!("Failed to install node modules");
1460        }
1461    }
1462
1463    if !no_git {
1464        let git_result = std::process::Command::new("git")
1465            .arg("init")
1466            .stdout(Stdio::inherit())
1467            .stderr(Stdio::inherit())
1468            .output()
1469            .map_err(|e| anyhow::format_err!("git init failed: {}", e))?;
1470        if !git_result.status.success() {
1471            eprintln!("Failed to automatically initialize a new git repository");
1472        }
1473    }
1474
1475    if install_agent_skills {
1476        install_solana_skill();
1477    }
1478
1479    println!("{project_name} initialized");
1480
1481    Ok(())
1482}
1483
1484fn install_solana_skill() {
1485    const SKILL_REPO: &str = "https://github.com/solana-foundation/solana-dev-skill";
1486    const SKILL_NAME: &str = "solana-dev";
1487
1488    // Skip if globally installed (active across all projects already)
1489    let global_path = home_dir()
1490        .unwrap_or_default()
1491        .join(".agents")
1492        .join("skills")
1493        .join(SKILL_NAME);
1494    if global_path.exists() {
1495        return;
1496    }
1497
1498    // Skip if already project-scoped (could be anchor init --force on existing folder)
1499    let project_path = Path::new(".agents").join("skills").join(SKILL_NAME);
1500    if project_path.exists() {
1501        return;
1502    }
1503
1504    println!("Installing Solana dev skill for Agents from {SKILL_REPO}");
1505
1506    let status = std::process::Command::new("npx")
1507        .args([
1508            "--yes",
1509            "skills@1.4.4",
1510            "add",
1511            SKILL_REPO,
1512            "--skill",
1513            "*",
1514            "-y",
1515        ])
1516        .stdout(Stdio::inherit())
1517        .stderr(Stdio::inherit())
1518        .status();
1519
1520    match status {
1521        Ok(s) if s.success() => {
1522            println!("Solana dev skill installed successfully");
1523        }
1524        _ => {
1525            eprintln!(
1526                "Warning: Failed to install Solana dev skill. Install manually with:\n  npx \
1527                 skills add {SKILL_REPO}"
1528            );
1529        }
1530    }
1531}
1532
1533fn install_node_modules(cmd: &str) -> Result<std::process::Output> {
1534    if cfg!(target_os = "windows") {
1535        std::process::Command::new("cmd")
1536            .arg(format!("/C {cmd} install"))
1537            .stdout(Stdio::inherit())
1538            .stderr(Stdio::inherit())
1539            .output()
1540            .map_err(|e| anyhow::format_err!("{} install failed: {}", cmd, e))
1541    } else {
1542        std::process::Command::new(cmd)
1543            .arg("install")
1544            .stdout(Stdio::inherit())
1545            .stderr(Stdio::inherit())
1546            .output()
1547            .map_err(|e| anyhow::format_err!("{} install failed: {}", cmd, e))
1548    }
1549}
1550
1551// Creates a new program crate in the `programs/<name>` directory.
1552fn new(
1553    cfg_override: &ConfigOverride,
1554    name: String,
1555    template: ProgramTemplate,
1556    force: bool,
1557) -> Result<()> {
1558    with_workspace(cfg_override, |cfg| -> Result<()> {
1559        match cfg.path().parent() {
1560            None => {
1561                println!("Unable to make new program");
1562            }
1563            Some(parent) => {
1564                std::env::set_current_dir(parent)?;
1565
1566                let cluster = cfg.provider.cluster.clone();
1567                let programs = cfg.programs.entry(cluster).or_default();
1568                if programs.contains_key(&name) {
1569                    if !force {
1570                        return Err(anyhow!("Program already exists"));
1571                    }
1572
1573                    // Delete all files within the program folder
1574                    fs::remove_dir_all(std::env::current_dir()?.join("programs").join(&name))?;
1575                }
1576
1577                rust_template::create_program(&name, template, None)?;
1578
1579                programs.insert(
1580                    name.clone(),
1581                    ProgramDeployment {
1582                        address: rust_template::get_or_create_program_id(&name),
1583                        path: None,
1584                        idl: None,
1585                    },
1586                );
1587
1588                let toml = cfg.to_string();
1589                fs::write("Anchor.toml", toml)?;
1590
1591                println!("Created new program.");
1592            }
1593        };
1594        Ok(())
1595    })?
1596}
1597
1598/// Array of (path, content) tuple.
1599pub type Files = Vec<(PathBuf, String)>;
1600
1601/// Create files from the given (path, content) tuple array.
1602///
1603/// # Example
1604///
1605/// ```ignore
1606/// crate_files(vec![("programs/my_program/src/lib.rs".into(), "// Content".into())])?;
1607/// ```
1608pub fn create_files(files: &Files) -> Result<()> {
1609    for (path, content) in files {
1610        let path = path
1611            .display()
1612            .to_string()
1613            .replace('/', std::path::MAIN_SEPARATOR_STR);
1614        let path = Path::new(&path);
1615        if path.exists() {
1616            continue;
1617        }
1618
1619        match path.extension() {
1620            Some(_) => {
1621                fs::create_dir_all(path.parent().unwrap())?;
1622                fs::write(path, content)?;
1623            }
1624            None => fs::create_dir_all(path)?,
1625        }
1626    }
1627
1628    Ok(())
1629}
1630
1631/// Override or create files from the given (path, content) tuple array.
1632///
1633/// # Example
1634///
1635/// ```ignore
1636/// override_or_create_files(vec![("programs/my_program/src/lib.rs".into(), "// Content".into())])?;
1637/// ```
1638pub fn override_or_create_files(files: &Files) -> Result<()> {
1639    for (path, content) in files {
1640        let path = Path::new(path);
1641        if path.exists() {
1642            let mut f = fs::OpenOptions::new()
1643                .write(true)
1644                .truncate(true)
1645                .open(path)?;
1646            f.write_all(content.as_bytes())?;
1647            f.flush()?;
1648        } else {
1649            fs::create_dir_all(path.parent().unwrap())?;
1650            fs::write(path, content)?;
1651        }
1652    }
1653
1654    Ok(())
1655}
1656
1657pub fn expand(
1658    cfg_override: &ConfigOverride,
1659    program_name: Option<String>,
1660    cargo_args: &[String],
1661) -> Result<()> {
1662    // Change to the workspace member directory, if needed.
1663    if let Some(program_name) = program_name.as_ref() {
1664        cd_member(cfg_override, program_name)?;
1665    }
1666
1667    let workspace_cfg = Config::discover(cfg_override)?
1668        .ok_or_else(|| anyhow!("The 'anchor expand' command requires an Anchor workspace."))?;
1669    let cfg_parent = workspace_cfg.path().parent().expect("Invalid Anchor.toml");
1670    let cargo = Manifest::discover()?;
1671
1672    let expansions_path = cfg_parent.join(".anchor").join("expanded-macros");
1673    fs::create_dir_all(&expansions_path)?;
1674
1675    match cargo {
1676        // No Cargo.toml found, expand entire workspace
1677        None => expand_all(&workspace_cfg, expansions_path, cargo_args),
1678        // Cargo.toml is at root of workspace, expand entire workspace
1679        Some(cargo) if cargo.path().parent() == workspace_cfg.path().parent() => {
1680            expand_all(&workspace_cfg, expansions_path, cargo_args)
1681        }
1682        // Reaching this arm means Cargo.toml belongs to a single package. Expand it.
1683        Some(cargo) => expand_program(
1684            // If we found Cargo.toml, it must be in a directory so unwrap is safe
1685            cargo.path().parent().unwrap().to_path_buf(),
1686            expansions_path,
1687            cargo_args,
1688        ),
1689    }
1690}
1691
1692fn expand_all(
1693    workspace_cfg: &WithPath<Config>,
1694    expansions_path: PathBuf,
1695    cargo_args: &[String],
1696) -> Result<()> {
1697    let cur_dir = std::env::current_dir()?;
1698    for p in workspace_cfg.get_rust_program_list()? {
1699        expand_program(p, expansions_path.clone(), cargo_args)?;
1700    }
1701    std::env::set_current_dir(cur_dir)?;
1702    Ok(())
1703}
1704
1705fn expand_program(
1706    program_path: PathBuf,
1707    expansions_path: PathBuf,
1708    cargo_args: &[String],
1709) -> Result<()> {
1710    let cargo = Manifest::from_path(program_path.join("Cargo.toml"))
1711        .map_err(|_| anyhow!("Could not find Cargo.toml for program"))?;
1712
1713    let target_dir_arg = {
1714        let mut target_dir_arg = OsString::from("--target-dir=");
1715        target_dir_arg.push(expansions_path.join("expand-target"));
1716        target_dir_arg
1717    };
1718
1719    let package_name = &cargo
1720        .package
1721        .as_ref()
1722        .ok_or_else(|| anyhow!("Cargo config is missing a package"))?
1723        .name;
1724    let program_expansions_path = expansions_path.join(package_name);
1725    fs::create_dir_all(&program_expansions_path)?;
1726
1727    let exit = std::process::Command::new("cargo")
1728        .arg("expand")
1729        .arg(target_dir_arg)
1730        .arg(format!("--package={package_name}"))
1731        .args(cargo_args)
1732        .stderr(Stdio::inherit())
1733        .output()
1734        .map_err(|e| anyhow::format_err!("{}", e))?;
1735    if !exit.status.success() {
1736        eprintln!("'anchor expand' failed. Perhaps you have not installed 'cargo-expand'? https://github.com/dtolnay/cargo-expand#installation");
1737        std::process::exit(exit.status.code().unwrap_or(1));
1738    }
1739
1740    let version = cargo.version();
1741    let time = chrono::Utc::now().to_string().replace(' ', "_");
1742    let file_path = program_expansions_path.join(format!("{package_name}-{version}-{time}.rs"));
1743    fs::write(&file_path, &exit.stdout).map_err(|e| anyhow::format_err!("{}", e))?;
1744
1745    println!(
1746        "Expanded {} into file {}\n",
1747        package_name,
1748        file_path.to_string_lossy()
1749    );
1750    Ok(())
1751}
1752
1753#[allow(clippy::too_many_arguments)]
1754pub fn build(
1755    cfg_override: &ConfigOverride,
1756    no_idl: bool,
1757    idl: Option<String>,
1758    idl_ts: Option<String>,
1759    verifiable: bool,
1760    skip_lint: bool,
1761    ignore_keys: bool,
1762    program_name: Option<String>,
1763    solana_version: Option<String>,
1764    docker_image: Option<String>,
1765    bootstrap: BootstrapMode,
1766    stdout: Option<File>, // Used for the package registry server.
1767    stderr: Option<File>, // Used for the package registry server.
1768    env_vars: Vec<String>,
1769    cargo_args: Vec<String>,
1770    no_docs: bool,
1771) -> Result<()> {
1772    // Change to the workspace member directory, if needed.
1773    if let Some(program_name) = program_name.as_ref() {
1774        cd_member(cfg_override, program_name)?;
1775    }
1776    let cfg = Config::discover(cfg_override)?
1777        .ok_or_else(|| anyhow!("The 'anchor build' command requires an Anchor workspace."))?;
1778    let cfg_parent = cfg.path().parent().expect("Invalid Anchor.toml");
1779
1780    // Require overflow checks
1781    let workspace_cargo_toml_path = cfg_parent.join("Cargo.toml");
1782    if workspace_cargo_toml_path.exists() {
1783        check_overflow(workspace_cargo_toml_path)?;
1784    }
1785
1786    // Check whether there is a mismatch between CLI and crate/package versions
1787    check_anchor_version(&cfg).ok();
1788    check_deps(&cfg).ok();
1789
1790    // Check for program ID mismatches before building (skip if --ignore-keys is used), Always skipped in anchor test
1791    if !ignore_keys {
1792        check_program_id_mismatch(&cfg, program_name.clone())?;
1793    }
1794
1795    let idl_out = match idl {
1796        Some(idl) => Some(PathBuf::from(idl)),
1797        None => Some(cfg_parent.join("target").join("idl")),
1798    };
1799    fs::create_dir_all(idl_out.as_ref().unwrap())?;
1800
1801    let idl_ts_out = match idl_ts {
1802        Some(idl_ts) => Some(PathBuf::from(idl_ts)),
1803        None => Some(cfg_parent.join("target").join("types")),
1804    };
1805    fs::create_dir_all(idl_ts_out.as_ref().unwrap())?;
1806
1807    if !cfg.workspace.types.is_empty() {
1808        fs::create_dir_all(cfg_parent.join(&cfg.workspace.types))?;
1809    };
1810
1811    cfg.run_hooks(HookType::PreBuild)?;
1812
1813    let cargo = Manifest::discover()?;
1814    let build_config = BuildConfig {
1815        verifiable,
1816        solana_version: solana_version.or_else(|| cfg.toolchain.solana_version.clone()),
1817        docker_image: docker_image.unwrap_or_else(|| cfg.docker()),
1818        bootstrap,
1819    };
1820    match cargo {
1821        // No Cargo.toml so build the entire workspace.
1822        None => build_all(
1823            &cfg,
1824            cfg.path(),
1825            no_idl,
1826            idl_out,
1827            idl_ts_out,
1828            &build_config,
1829            stdout,
1830            stderr,
1831            env_vars,
1832            cargo_args,
1833            skip_lint,
1834            no_docs,
1835        )?,
1836        // If the Cargo.toml is at the root, build the entire workspace.
1837        Some(cargo) if cargo.path().parent() == cfg.path().parent() => build_all(
1838            &cfg,
1839            cfg.path(),
1840            no_idl,
1841            idl_out,
1842            idl_ts_out,
1843            &build_config,
1844            stdout,
1845            stderr,
1846            env_vars,
1847            cargo_args,
1848            skip_lint,
1849            no_docs,
1850        )?,
1851        // Cargo.toml represents a single package. Build it.
1852        Some(cargo) => build_rust_cwd(
1853            &cfg,
1854            cargo.path().to_path_buf(),
1855            no_idl,
1856            idl_out,
1857            idl_ts_out,
1858            &build_config,
1859            stdout,
1860            stderr,
1861            env_vars,
1862            cargo_args,
1863            skip_lint,
1864            no_docs,
1865        )?,
1866    }
1867    cfg.run_hooks(HookType::PostBuild)?;
1868
1869    set_workspace_dir_or_exit();
1870
1871    Ok(())
1872}
1873
1874#[allow(clippy::too_many_arguments)]
1875fn build_all(
1876    cfg: &WithPath<Config>,
1877    cfg_path: &Path,
1878    no_idl: bool,
1879    idl_out: Option<PathBuf>,
1880    idl_ts_out: Option<PathBuf>,
1881    build_config: &BuildConfig,
1882    stdout: Option<File>, // Used for the package registry server.
1883    stderr: Option<File>, // Used for the package registry server.
1884    env_vars: Vec<String>,
1885    cargo_args: Vec<String>,
1886    skip_lint: bool,
1887    no_docs: bool,
1888) -> Result<()> {
1889    let cur_dir = std::env::current_dir()?;
1890    let r = match cfg_path.parent() {
1891        None => Err(anyhow!("Invalid Anchor.toml at {}", cfg_path.display())),
1892        Some(_parent) => {
1893            for p in cfg.get_rust_program_list()? {
1894                build_rust_cwd(
1895                    cfg,
1896                    p.join("Cargo.toml"),
1897                    no_idl,
1898                    idl_out.clone(),
1899                    idl_ts_out.clone(),
1900                    build_config,
1901                    stdout.as_ref().map(|f| f.try_clone()).transpose()?,
1902                    stderr.as_ref().map(|f| f.try_clone()).transpose()?,
1903                    env_vars.clone(),
1904                    cargo_args.clone(),
1905                    skip_lint,
1906                    no_docs,
1907                )?;
1908            }
1909            Ok(())
1910        }
1911    };
1912    std::env::set_current_dir(cur_dir)?;
1913    r
1914}
1915
1916// Runs the build command outside of a workspace.
1917#[allow(clippy::too_many_arguments)]
1918fn build_rust_cwd(
1919    cfg: &WithPath<Config>,
1920    cargo_toml: PathBuf,
1921    no_idl: bool,
1922    idl_out: Option<PathBuf>,
1923    idl_ts_out: Option<PathBuf>,
1924    build_config: &BuildConfig,
1925    stdout: Option<File>,
1926    stderr: Option<File>,
1927    env_vars: Vec<String>,
1928    cargo_args: Vec<String>,
1929    skip_lint: bool,
1930    no_docs: bool,
1931) -> Result<()> {
1932    match cargo_toml.parent() {
1933        None => return Err(anyhow!("Unable to find parent")),
1934        Some(p) => std::env::set_current_dir(p)?,
1935    };
1936    match build_config.verifiable {
1937        false => _build_rust_cwd(
1938            cfg, no_idl, idl_out, idl_ts_out, skip_lint, no_docs, cargo_args,
1939        ),
1940        true => build_cwd_verifiable(
1941            cfg,
1942            cargo_toml,
1943            build_config,
1944            stdout,
1945            stderr,
1946            skip_lint,
1947            env_vars,
1948            cargo_args,
1949            no_docs,
1950        ),
1951    }
1952}
1953
1954// Builds an anchor program in a docker image and copies the build artifacts
1955// into the `target/` directory.
1956#[allow(clippy::too_many_arguments)]
1957fn build_cwd_verifiable(
1958    cfg: &WithPath<Config>,
1959    cargo_toml: PathBuf,
1960    build_config: &BuildConfig,
1961    stdout: Option<File>,
1962    stderr: Option<File>,
1963    skip_lint: bool,
1964    env_vars: Vec<String>,
1965    cargo_args: Vec<String>,
1966    no_docs: bool,
1967) -> Result<()> {
1968    // Create output dirs.
1969    let workspace_dir = cfg.path().parent().unwrap().canonicalize()?;
1970    let target_dir = workspace_dir.join("target");
1971    fs::create_dir_all(target_dir.join("verifiable"))?;
1972    fs::create_dir_all(target_dir.join("idl"))?;
1973    fs::create_dir_all(target_dir.join("types"))?;
1974    if !&cfg.workspace.types.is_empty() {
1975        fs::create_dir_all(workspace_dir.join(&cfg.workspace.types))?;
1976    }
1977
1978    let container_name = "anchor-program";
1979
1980    // Build the binary in docker.
1981    let result = docker_build(
1982        cfg,
1983        container_name,
1984        cargo_toml,
1985        build_config,
1986        stdout,
1987        stderr,
1988        env_vars,
1989        cargo_args.clone(),
1990    );
1991
1992    match &result {
1993        Err(e) => {
1994            eprintln!("Error during Docker build: {e:?}");
1995        }
1996        Ok(_) => {
1997            // Build the idl.
1998            println!("Extracting the IDL");
1999            let idl = generate_idl(cfg, skip_lint, no_docs, &cargo_args)?;
2000            // Write out the JSON file.
2001            println!("Writing the IDL file");
2002            let out_file = workspace_dir
2003                .join("target")
2004                .join("idl")
2005                .join(&idl.metadata.name)
2006                .with_extension("json");
2007            write_idl(&idl, OutFile::File(out_file))?;
2008
2009            // Write out the TypeScript type.
2010            println!("Writing the .ts file");
2011            let ts_file = workspace_dir
2012                .join("target")
2013                .join("types")
2014                .join(&idl.metadata.name)
2015                .with_extension("ts");
2016            fs::write(&ts_file, idl_ts(&idl)?)?;
2017
2018            // Copy out the TypeScript type.
2019            if !&cfg.workspace.types.is_empty() {
2020                fs::copy(
2021                    ts_file,
2022                    workspace_dir
2023                        .join(&cfg.workspace.types)
2024                        .join(idl.metadata.name)
2025                        .with_extension("ts"),
2026                )?;
2027            }
2028
2029            println!("Build success");
2030        }
2031    }
2032
2033    result
2034}
2035
2036#[allow(clippy::too_many_arguments)]
2037fn docker_build(
2038    cfg: &WithPath<Config>,
2039    container_name: &str,
2040    cargo_toml: PathBuf,
2041    build_config: &BuildConfig,
2042    stdout: Option<File>,
2043    stderr: Option<File>,
2044    env_vars: Vec<String>,
2045    cargo_args: Vec<String>,
2046) -> Result<()> {
2047    let binary_name = Manifest::from_path(&cargo_toml)?.lib_name()?;
2048
2049    // Docker vars.
2050    let workdir = Path::new("/workdir");
2051    let volume_mount = format!(
2052        "{}:{}",
2053        cfg.path().parent().unwrap().canonicalize()?.display(),
2054        workdir.to_str().unwrap(),
2055    );
2056    println!("Using image {:?}", build_config.docker_image);
2057
2058    // Start the docker image running detached in the background.
2059    let target_dir = workdir.join("docker-target");
2060    println!("Run docker image");
2061    let exit = std::process::Command::new("docker")
2062        .args([
2063            "run",
2064            "-it",
2065            "-d",
2066            "--name",
2067            container_name,
2068            "--env",
2069            &format!(
2070                "CARGO_TARGET_DIR={}",
2071                target_dir.as_path().to_str().unwrap()
2072            ),
2073            "-v",
2074            &volume_mount,
2075            "-w",
2076            workdir.to_str().unwrap(),
2077            &build_config.docker_image,
2078            "bash",
2079        ])
2080        .stdout(Stdio::inherit())
2081        .stderr(Stdio::inherit())
2082        .output()
2083        .map_err(|e| anyhow::format_err!("Docker build failed: {}", e))?;
2084    if !exit.status.success() {
2085        return Err(anyhow!("Failed to build program"));
2086    }
2087
2088    let result = docker_prep(container_name, build_config).and_then(|_| {
2089        let cfg_parent = cfg.path().parent().unwrap();
2090        docker_build_bpf(
2091            container_name,
2092            cargo_toml.as_path(),
2093            cfg_parent,
2094            target_dir.as_path(),
2095            binary_name,
2096            stdout,
2097            stderr,
2098            env_vars,
2099            cargo_args,
2100        )
2101    });
2102
2103    // Cleanup regardless of errors
2104    docker_cleanup(container_name, target_dir.as_path())?;
2105
2106    // Done.
2107    result
2108}
2109
2110fn docker_prep(container_name: &str, build_config: &BuildConfig) -> Result<()> {
2111    // Set the solana version in the container, if given. Otherwise use the
2112    // default.
2113    match build_config.bootstrap {
2114        BootstrapMode::Debian => {
2115            // Install build requirements
2116            docker_exec(container_name, &["apt", "update"])?;
2117            docker_exec(
2118                container_name,
2119                &["apt", "install", "-y", "curl", "build-essential"],
2120            )?;
2121
2122            // Install Rust
2123            docker_exec(
2124                container_name,
2125                &["curl", "https://sh.rustup.rs", "-sfo", "rustup.sh"],
2126            )?;
2127            docker_exec(container_name, &["sh", "rustup.sh", "-y"])?;
2128            docker_exec(container_name, &["rm", "-f", "rustup.sh"])?;
2129        }
2130        BootstrapMode::None => {}
2131    }
2132
2133    if let Some(solana_version) = &build_config.solana_version {
2134        println!("Using solana version: {solana_version}");
2135
2136        // Install Solana CLI
2137        docker_exec(
2138            container_name,
2139            &[
2140                "curl",
2141                "-sSfL",
2142                &format!("https://release.anza.xyz/v{solana_version}/install",),
2143                "-o",
2144                "solana_installer.sh",
2145            ],
2146        )?;
2147        docker_exec(container_name, &["sh", "solana_installer.sh"])?;
2148        docker_exec(container_name, &["rm", "-f", "solana_installer.sh"])?;
2149    }
2150    Ok(())
2151}
2152
2153#[allow(clippy::too_many_arguments)]
2154fn docker_build_bpf(
2155    container_name: &str,
2156    cargo_toml: &Path,
2157    cfg_parent: &Path,
2158    target_dir: &Path,
2159    binary_name: String,
2160    stdout: Option<File>,
2161    stderr: Option<File>,
2162    env_vars: Vec<String>,
2163    cargo_args: Vec<String>,
2164) -> Result<()> {
2165    let manifest_path =
2166        pathdiff::diff_paths(cargo_toml.canonicalize()?, cfg_parent.canonicalize()?)
2167            .ok_or_else(|| anyhow!("Unable to diff paths"))?;
2168    println!(
2169        "Building {} manifest: {:?}",
2170        binary_name,
2171        manifest_path.display()
2172    );
2173
2174    // Execute the build.
2175    let exit = std::process::Command::new("docker")
2176        .args([
2177            "exec",
2178            "--env",
2179            "PATH=/root/.local/share/solana/install/active_release/bin:/root/.cargo/bin:/usr/\
2180             local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
2181        ])
2182        .args(
2183            env_vars
2184                .iter()
2185                .map(|x| ["--env", x.as_str()])
2186                .collect::<Vec<[&str; 2]>>()
2187                .concat(),
2188        )
2189        .args([container_name, "cargo"])
2190        .args(BUILD_SUBCOMMAND)
2191        .args(["--manifest-path", &manifest_path.display().to_string()])
2192        .args(cargo_args)
2193        .stdout(match stdout {
2194            None => Stdio::inherit(),
2195            Some(f) => f.into(),
2196        })
2197        .stderr(match stderr {
2198            None => Stdio::inherit(),
2199            Some(f) => f.into(),
2200        })
2201        .output()
2202        .map_err(|e| anyhow::format_err!("Docker build failed: {}", e))?;
2203    if !exit.status.success() {
2204        return Err(anyhow!("Failed to build program"));
2205    }
2206
2207    // Copy the binary out of the docker image.
2208    println!("Copying out the build artifacts");
2209    let out_file = cfg_parent
2210        .canonicalize()?
2211        .join(
2212            Path::new("target")
2213                .join("verifiable")
2214                .join(&binary_name)
2215                .with_extension("so"),
2216        )
2217        .display()
2218        .to_string();
2219
2220    // This requires the target directory of any built program to be located at
2221    // the root of the workspace.
2222    let mut bin_path = target_dir.join("deploy");
2223    bin_path.push(format!("{binary_name}.so"));
2224    let bin_artifact = format!(
2225        "{}:{}",
2226        container_name,
2227        bin_path.as_path().to_str().unwrap()
2228    );
2229    let exit = std::process::Command::new("docker")
2230        .args(["cp", &bin_artifact, &out_file])
2231        .stdout(Stdio::inherit())
2232        .stderr(Stdio::inherit())
2233        .output()
2234        .map_err(|e| anyhow::format_err!("{}", e))?;
2235    if !exit.status.success() {
2236        Err(anyhow!(
2237            "Failed to copy binary out of docker. Is the target directory set correctly?"
2238        ))
2239    } else {
2240        Ok(())
2241    }
2242}
2243
2244fn docker_cleanup(container_name: &str, target_dir: &Path) -> Result<()> {
2245    // Wipe the generated docker-target dir.
2246    println!("Cleaning up the docker target directory");
2247    docker_exec(container_name, &["rm", "-rf", target_dir.to_str().unwrap()])?;
2248
2249    // Remove the docker image.
2250    println!("Removing the docker container");
2251    let exit = std::process::Command::new("docker")
2252        .args(["rm", "-f", container_name])
2253        .stdout(Stdio::inherit())
2254        .stderr(Stdio::inherit())
2255        .output()
2256        .map_err(|e| anyhow::format_err!("{}", e))?;
2257    if !exit.status.success() {
2258        println!("Unable to remove the docker container");
2259        std::process::exit(exit.status.code().unwrap_or(1));
2260    }
2261    Ok(())
2262}
2263
2264fn docker_exec(container_name: &str, args: &[&str]) -> Result<()> {
2265    let exit = std::process::Command::new("docker")
2266        .args([&["exec", container_name], args].concat())
2267        .stdout(Stdio::inherit())
2268        .stderr(Stdio::inherit())
2269        .output()
2270        .map_err(|e| anyhow!("Failed to run command \"{:?}\": {:?}", args, e))?;
2271    if !exit.status.success() {
2272        Err(anyhow!("Failed to run command: {:?}", args))
2273    } else {
2274        Ok(())
2275    }
2276}
2277
2278#[allow(clippy::too_many_arguments)]
2279fn _build_rust_cwd(
2280    cfg: &WithPath<Config>,
2281    no_idl: bool,
2282    idl_out: Option<PathBuf>,
2283    idl_ts_out: Option<PathBuf>,
2284    skip_lint: bool,
2285    no_docs: bool,
2286    cargo_args: Vec<String>,
2287) -> Result<()> {
2288    let exit = std::process::Command::new("cargo")
2289        .args(BUILD_SUBCOMMAND)
2290        .args(cargo_args.clone())
2291        .stdout(Stdio::inherit())
2292        .stderr(Stdio::inherit())
2293        .output()
2294        .map_err(|e| anyhow::format_err!("{}", e))?;
2295    if !exit.status.success() {
2296        std::process::exit(exit.status.code().unwrap_or(1));
2297    }
2298
2299    // Generate IDL
2300    if !no_idl {
2301        let idl = generate_idl(cfg, skip_lint, no_docs, &cargo_args)?;
2302
2303        // JSON out path.
2304        let out = match idl_out {
2305            None => PathBuf::from(".")
2306                .join(&idl.metadata.name)
2307                .with_extension("json"),
2308            Some(o) => PathBuf::from(&o.join(&idl.metadata.name).with_extension("json")),
2309        };
2310        // TS out path.
2311        let ts_out = match idl_ts_out {
2312            None => PathBuf::from(".")
2313                .join(&idl.metadata.name)
2314                .with_extension("ts"),
2315            Some(o) => PathBuf::from(&o.join(&idl.metadata.name).with_extension("ts")),
2316        };
2317
2318        // Write out the JSON file.
2319        write_idl(&idl, OutFile::File(out))?;
2320        // Write out the TypeScript type.
2321        fs::write(&ts_out, idl_ts(&idl)?)?;
2322
2323        // Copy out the TypeScript type.
2324        let cfg_parent = cfg.path().parent().expect("Invalid Anchor.toml");
2325        if !&cfg.workspace.types.is_empty() {
2326            fs::copy(
2327                &ts_out,
2328                cfg_parent
2329                    .join(&cfg.workspace.types)
2330                    .join(&idl.metadata.name)
2331                    .with_extension("ts"),
2332            )?;
2333        }
2334    }
2335
2336    Ok(())
2337}
2338
2339/// Subcommand and any arguments to be passed to cargo
2340const BUILD_SUBCOMMAND: &[&str] = &["build-sbf", "--tools-version", "v1.52"];
2341
2342pub fn verify(
2343    program_id: Pubkey,
2344    repo_url: Option<String>,
2345    commit_hash: Option<String>,
2346    current_dir: bool,
2347    program_name: Option<String>,
2348    args: Vec<String>,
2349) -> Result<()> {
2350    let mut command_args = Vec::new();
2351
2352    match (current_dir, repo_url) {
2353        (true, _) => {
2354            let current_path = std::env::current_dir()?
2355                .to_str()
2356                .ok_or_else(|| anyhow!("Invalid current directory path"))?
2357                .to_owned();
2358            command_args.push(current_path);
2359            command_args.push("--current-dir".into());
2360        }
2361        (false, Some(url)) => {
2362            command_args.push(url);
2363        }
2364        (false, None) => {
2365            return Err(anyhow!(
2366                "You must provide either --repo-url or --current-dir"
2367            ));
2368        }
2369    }
2370
2371    if let Some(commit) = commit_hash {
2372        command_args.push("--commit-hash".into());
2373        command_args.push(commit);
2374    }
2375
2376    if let Some(name) = program_name {
2377        command_args.push("--library-name".into());
2378        command_args.push(name);
2379    }
2380
2381    command_args.push("--program-id".into());
2382    command_args.push(program_id.to_string());
2383
2384    command_args.extend(args);
2385
2386    println!("Verifying program {program_id}");
2387    let verify_path = AVM_HOME.join("bin").join("solana-verify");
2388    if !verify_path.exists() {
2389        install_with_avm(env!("CARGO_PKG_VERSION"), true)
2390            .context("installing Anchor with solana-verify")?;
2391    }
2392
2393    let status = std::process::Command::new(verify_path)
2394        .arg("verify-from-repo")
2395        .args(&command_args)
2396        .stdout(std::process::Stdio::inherit())
2397        .stderr(std::process::Stdio::inherit())
2398        .status()
2399        .with_context(|| "Failed to run `solana-verify`")?;
2400
2401    if !status.success() {
2402        return Err(anyhow!("Failed to verify program"));
2403    }
2404
2405    Ok(())
2406}
2407
2408fn cd_member(cfg_override: &ConfigOverride, program_name: &str) -> Result<()> {
2409    // Change directories to the given `program_name`, using either Anchor or Cargo workspace
2410    let programs = program::get_programs_from_workspace(cfg_override, None)?;
2411
2412    for program in programs {
2413        let cargo_toml = program.path.join("Cargo.toml");
2414        if !cargo_toml.exists() {
2415            return Err(anyhow!(
2416                "Did not find Cargo.toml at the path: {}",
2417                program.path.display()
2418            ));
2419        }
2420
2421        let manifest = Manifest::from_path(&cargo_toml)?;
2422        let pkg_name = manifest.package().name();
2423        if program_name == pkg_name || program_name == program.lib_name {
2424            std::env::set_current_dir(&program.path)?;
2425            return Ok(());
2426        }
2427    }
2428
2429    Err(anyhow!("{} is not part of the workspace", program_name,))
2430}
2431
2432fn idl(cfg_override: &ConfigOverride, subcmd: IdlCommand) -> Result<()> {
2433    match subcmd {
2434        IdlCommand::Init {
2435            program_id,
2436            filepath,
2437            priority_fee,
2438            non_canonical,
2439            #[cfg(feature = "idl-localnet-testing")]
2440            allow_localnet,
2441        } => {
2442            #[cfg(feature = "idl-localnet-testing")]
2443            let allow_localnet = allow_localnet;
2444            #[cfg(not(feature = "idl-localnet-testing"))]
2445            let allow_localnet = false;
2446            idl_init(
2447                program_id,
2448                cfg_override,
2449                filepath,
2450                priority_fee,
2451                non_canonical,
2452                allow_localnet,
2453            )
2454        }
2455        IdlCommand::Upgrade {
2456            program_id,
2457            filepath,
2458            priority_fee,
2459            #[cfg(feature = "idl-localnet-testing")]
2460            allow_localnet,
2461        } => {
2462            #[cfg(feature = "idl-localnet-testing")]
2463            let allow_localnet = allow_localnet;
2464            #[cfg(not(feature = "idl-localnet-testing"))]
2465            let allow_localnet = false;
2466            idl_upgrade(
2467                program_id,
2468                cfg_override,
2469                filepath,
2470                priority_fee,
2471                allow_localnet,
2472            )
2473        }
2474        IdlCommand::Build {
2475            program_name,
2476            out,
2477            out_ts,
2478            no_docs,
2479            skip_lint,
2480            cargo_args,
2481        } => idl_build(
2482            cfg_override,
2483            program_name,
2484            out,
2485            out_ts,
2486            no_docs,
2487            skip_lint,
2488            cargo_args,
2489        ),
2490        IdlCommand::Fetch {
2491            program_id: address,
2492            out,
2493            non_canonical,
2494        } => idl_fetch(cfg_override, address, out, non_canonical),
2495        IdlCommand::Convert {
2496            path,
2497            out,
2498            program_id,
2499        } => idl_convert(path, out, program_id),
2500        IdlCommand::Type { path, out } => idl_type(path, out),
2501        IdlCommand::Close {
2502            program_id,
2503            seed,
2504            priority_fee,
2505        } => idl_close_metadata(cfg_override, program_id, seed, priority_fee),
2506        IdlCommand::CreateBuffer {
2507            filepath,
2508            priority_fee,
2509        } => idl_create_buffer(cfg_override, filepath, priority_fee),
2510        IdlCommand::SetBufferAuthority {
2511            buffer,
2512            new_authority,
2513            priority_fee,
2514        } => idl_set_buffer_authority(cfg_override, buffer, new_authority, priority_fee),
2515        IdlCommand::WriteBuffer {
2516            program_id,
2517            buffer,
2518            seed,
2519            close_buffer,
2520            priority_fee,
2521        } => idl_write_buffer_metadata(
2522            cfg_override,
2523            program_id,
2524            buffer,
2525            seed,
2526            close_buffer,
2527            priority_fee,
2528        ),
2529    }
2530}
2531
2532fn idl_init(
2533    program_id: Option<Pubkey>,
2534    cfg_override: &ConfigOverride,
2535    idl_filepath: String,
2536    priority_fee: Option<u64>,
2537    non_canonical: bool,
2538    allow_localnet: bool,
2539) -> Result<()> {
2540    // Get cluster URL and wallet path from Anchor config
2541    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2542
2543    let is_localnet = cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1");
2544    if is_localnet && !allow_localnet {
2545        #[cfg(feature = "idl-localnet-testing")]
2546        println!(
2547            "Skipping IDL initialization on localnet. To deploy on localnet, use --allow-localnet"
2548        );
2549        #[cfg(not(feature = "idl-localnet-testing"))]
2550        println!("Skipping IDL initialization on localnet");
2551        return Ok(());
2552    }
2553
2554    let program_id = match program_id {
2555        Some(id) => id.to_string(),
2556        _ => {
2557            let idl = fs::read(&idl_filepath)?;
2558            let idl = convert_idl(&idl)?;
2559            idl.address
2560        }
2561    };
2562
2563    let command = metadata::IdlCommand::funded(
2564        cluster_url,
2565        wallet_path,
2566        priority_fee,
2567        metadata::FundedIdlSubcommand::Write {
2568            program_id,
2569            idl_filepath,
2570            non_canonical,
2571        },
2572    );
2573
2574    if !command.status()?.success() {
2575        return Err(anyhow!("Failed to initialize IDL"));
2576    }
2577
2578    println!("IDL initialized.");
2579    Ok(())
2580}
2581
2582// Currently identical to `idl_init`, other than not accepting `non_canonical`
2583fn idl_upgrade(
2584    program_id: Option<Pubkey>,
2585    cfg_override: &ConfigOverride,
2586    idl_filepath: String,
2587    priority_fee: Option<u64>,
2588    allow_localnet: bool,
2589) -> Result<()> {
2590    // Get cluster URL and wallet path from Anchor config
2591    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2592
2593    let is_localnet = cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1");
2594    if is_localnet && !allow_localnet {
2595        #[cfg(feature = "idl-localnet-testing")]
2596        println!("Skipping IDL upgrade on localnet. To deploy on localnet, use --allow-localnet");
2597        #[cfg(not(feature = "idl-localnet-testing"))]
2598        println!("Skipping IDL upgrade on localnet");
2599        return Ok(());
2600    }
2601
2602    let program_id = match program_id {
2603        Some(id) => id.to_string(),
2604        _ => {
2605            let idl = fs::read(&idl_filepath)?;
2606            let idl = convert_idl(&idl)?;
2607            idl.address
2608        }
2609    };
2610
2611    let command = metadata::IdlCommand::funded(
2612        cluster_url,
2613        wallet_path,
2614        priority_fee,
2615        metadata::FundedIdlSubcommand::Write {
2616            program_id,
2617            idl_filepath,
2618            non_canonical: false,
2619        },
2620    );
2621    if !command.status()?.success() {
2622        return Err(anyhow!("Failed to initialize IDL"));
2623    }
2624
2625    println!("IDL upgraded.");
2626    Ok(())
2627}
2628
2629fn idl_build(
2630    cfg_override: &ConfigOverride,
2631    program_name: Option<String>,
2632    out: Option<String>,
2633    out_ts: Option<String>,
2634    no_docs: bool,
2635    skip_lint: bool,
2636    cargo_args: Vec<String>,
2637) -> Result<()> {
2638    let cfg = Config::discover(cfg_override)?
2639        .ok_or_else(|| anyhow!("The 'anchor idl build' command requires an Anchor workspace."))?;
2640    let current_dir = std::env::current_dir()?;
2641    let program_path = match program_name {
2642        Some(name) => cfg.get_program(&name)?.path,
2643        None => {
2644            let programs = cfg.read_all_programs()?;
2645            if programs.len() == 1 {
2646                programs.into_iter().next().unwrap().path
2647            } else {
2648                programs
2649                    .into_iter()
2650                    .find(|program| program.path == current_dir)
2651                    .ok_or_else(|| anyhow!("Not in a program directory"))?
2652                    .path
2653            }
2654        }
2655    };
2656    std::env::set_current_dir(program_path)?;
2657    let idl = generate_idl(&cfg, skip_lint, no_docs, &cargo_args)?;
2658    std::env::set_current_dir(current_dir)?;
2659
2660    let out = match out {
2661        Some(path) => OutFile::File(PathBuf::from(path)),
2662        None => OutFile::Stdout,
2663    };
2664    write_idl(&idl, out)?;
2665
2666    if let Some(path) = out_ts {
2667        fs::write(path, idl_ts(&idl)?)?;
2668    }
2669
2670    Ok(())
2671}
2672
2673/// Generate IDL with method decided by whether manifest file has `idl-build` feature or not.
2674fn generate_idl(
2675    cfg: &WithPath<Config>,
2676    skip_lint: bool,
2677    no_docs: bool,
2678    cargo_args: &[String],
2679) -> Result<Idl> {
2680    check_idl_build_feature()?;
2681
2682    anchor_lang_idl::build::IdlBuilder::new()
2683        .resolution(cfg.features.resolution)
2684        .skip_lint(cfg.features.skip_lint || skip_lint)
2685        .no_docs(no_docs)
2686        .cargo_args(cargo_args.into())
2687        .build()
2688}
2689
2690fn idl_fetch(
2691    cfg_override: &ConfigOverride,
2692    address: Pubkey,
2693    out: Option<String>,
2694    non_canonical: bool,
2695) -> Result<()> {
2696    let (cluster_url, _) = get_cluster_and_wallet(cfg_override)?;
2697    let command = metadata::IdlCommand::unfunded(
2698        cluster_url,
2699        metadata::UnfundedIdlSubcommand::Fetch {
2700            program_id: address.to_string(),
2701            out,
2702            non_canonical,
2703        },
2704    );
2705
2706    if !command.status()?.success() {
2707        return Err(anyhow!("Failed to fetch IDL"));
2708    }
2709    Ok(())
2710}
2711
2712fn idl_convert(path: String, out: Option<String>, program_id: Option<Pubkey>) -> Result<()> {
2713    let idl = fs::read(path)?;
2714
2715    // Set the `metadata.address` field based on the given `program_id`
2716    let idl = match program_id {
2717        Some(program_id) => {
2718            let mut idl = serde_json::from_slice::<serde_json::Value>(&idl)?;
2719            idl.as_object_mut()
2720                .ok_or_else(|| anyhow!("IDL must be an object"))?
2721                .insert(
2722                    "metadata".into(),
2723                    serde_json::json!({ "address": program_id.to_string() }),
2724                );
2725            serde_json::to_vec(&idl)?
2726        }
2727        _ => idl,
2728    };
2729
2730    let idl = convert_idl(&idl)?;
2731    let out = match out {
2732        None => OutFile::Stdout,
2733        Some(out) => OutFile::File(PathBuf::from(out)),
2734    };
2735    write_idl(&idl, out)
2736}
2737
2738fn idl_type(path: String, out: Option<String>) -> Result<()> {
2739    let idl = fs::read(path)?;
2740    let idl = convert_idl(&idl)?;
2741    let types = idl_ts(&idl)?;
2742    match out {
2743        Some(out) => fs::write(out, types)?,
2744        _ => println!("{types}"),
2745    };
2746    Ok(())
2747}
2748
2749fn idl_close_metadata(
2750    cfg_override: &ConfigOverride,
2751    program_id: Pubkey,
2752    seed: String,
2753    priority_fee: Option<u64>,
2754) -> Result<()> {
2755    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2756    let command = metadata::IdlCommand::funded(
2757        cluster_url,
2758        wallet_path,
2759        priority_fee,
2760        metadata::FundedIdlSubcommand::Close {
2761            program_id: program_id.to_string(),
2762            seed,
2763        },
2764    );
2765
2766    if !command.status()?.success() {
2767        return Err(anyhow!("Failed to close metadata account"));
2768    }
2769
2770    println!("Metadata account closed successfully.");
2771    Ok(())
2772}
2773
2774fn idl_create_buffer(
2775    cfg_override: &ConfigOverride,
2776    filepath: String,
2777    priority_fee: Option<u64>,
2778) -> Result<()> {
2779    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2780    let command = metadata::IdlCommand::funded(
2781        cluster_url,
2782        wallet_path,
2783        priority_fee,
2784        metadata::FundedIdlSubcommand::CreateBuffer { filepath },
2785    );
2786
2787    if !command.status()?.success() {
2788        return Err(anyhow!("Failed to create buffer"));
2789    }
2790
2791    println!("Buffer created successfully.");
2792    Ok(())
2793}
2794
2795fn idl_set_buffer_authority(
2796    cfg_override: &ConfigOverride,
2797    buffer: Pubkey,
2798    new_authority: Pubkey,
2799    priority_fee: Option<u64>,
2800) -> Result<()> {
2801    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2802    let command = metadata::IdlCommand::funded(
2803        cluster_url,
2804        wallet_path,
2805        priority_fee,
2806        metadata::FundedIdlSubcommand::SetBufferAuthority {
2807            buffer: buffer.to_string(),
2808            new_authority: new_authority.to_string(),
2809        },
2810    );
2811
2812    if !command.status()?.success() {
2813        return Err(anyhow!("Failed to set buffer authority"));
2814    }
2815
2816    println!("Buffer authority set successfully.");
2817    Ok(())
2818}
2819
2820fn idl_write_buffer_metadata(
2821    cfg_override: &ConfigOverride,
2822    program_id: Pubkey,
2823    buffer: Pubkey,
2824    seed: String,
2825    close_buffer: bool,
2826    priority_fee: Option<u64>,
2827) -> Result<()> {
2828    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2829    let command = metadata::IdlCommand::funded(
2830        cluster_url,
2831        wallet_path,
2832        priority_fee,
2833        metadata::FundedIdlSubcommand::WriteBuffer {
2834            program_id: program_id.to_string(),
2835            buffer: buffer.to_string(),
2836            seed,
2837            close_buffer,
2838        },
2839    );
2840
2841    if !command.status()?.success() {
2842        return Err(anyhow!("Failed to write metadata using buffer"));
2843    }
2844
2845    println!("Metadata written successfully using buffer.");
2846    Ok(())
2847}
2848
2849fn idl_ts(idl: &Idl) -> Result<String> {
2850    let idl_name = &idl.metadata.name;
2851    let type_name = idl_name.to_pascal_case();
2852    let idl = serde_json::to_string(idl)?;
2853
2854    // Convert every field of the IDL to camelCase
2855    let camel_idl = Regex::new(r#""\w+":"([\w\d]+)""#)?
2856        .captures_iter(&idl)
2857        .fold(idl.clone(), |acc, cur| {
2858            let name = cur.get(1).unwrap().as_str();
2859
2860            // Do not modify pubkeys
2861            if Pubkey::try_from(name).is_ok() {
2862                return acc;
2863            }
2864
2865            let camel_name = name.to_lower_camel_case();
2866            acc.replace(&format!(r#""{name}""#), &format!(r#""{camel_name}""#))
2867        });
2868
2869    // Pretty format
2870    let camel_idl = serde_json::to_string_pretty(&serde_json::from_str::<Idl>(&camel_idl)?)?;
2871
2872    Ok(format!(
2873        r#"/**
2874 * Program IDL in camelCase format in order to be used in JS/TS.
2875 *
2876 * Note that this is only a type helper and is not the actual IDL. The original
2877 * IDL can be found at `target/idl/{idl_name}.json`.
2878 */
2879export type {type_name} = {camel_idl};
2880"#
2881    ))
2882}
2883
2884fn write_idl(idl: &Idl, out: OutFile) -> Result<()> {
2885    let idl_json = serde_json::to_string_pretty(idl)?;
2886    match out {
2887        OutFile::Stdout => println!("{idl_json}"),
2888        OutFile::File(out) => fs::write(out, idl_json)?,
2889    };
2890
2891    Ok(())
2892}
2893fn account(
2894    cfg_override: &ConfigOverride,
2895    account_type: String,
2896    address: Pubkey,
2897    idl_filepath: Option<String>,
2898) -> Result<()> {
2899    let (program_name, account_type_name) = account_type
2900        .split_once('.') // Split at first occurrence of dot
2901        .and_then(|(x, y)| y.find('.').map_or_else(|| Some((x, y)), |_| None)) // ensures no dots in second substring
2902        .ok_or_else(|| {
2903            anyhow!(
2904                "Please enter the account struct in the following format: <program_name>.<Account>",
2905            )
2906        })?;
2907
2908    let idl = idl_filepath.map_or_else(
2909        || {
2910            Config::discover(cfg_override)?
2911                .ok_or_else(|| {
2912                    anyhow!(
2913                        "The 'anchor account' command requires an Anchor workspace with \
2914                         Anchor.toml for IDL type generation."
2915                    )
2916                })?
2917                .read_all_programs()
2918                .expect("Workspace must contain atleast one program.")
2919                .into_iter()
2920                .find(|p| p.lib_name == *program_name)
2921                .ok_or_else(|| anyhow!("Program {program_name} not found in workspace."))
2922                .map(|p| p.idl)?
2923                .ok_or_else(|| {
2924                    anyhow!(
2925                        "IDL not found. Please build the program atleast once to generate the IDL."
2926                    )
2927                })
2928        },
2929        |idl_path| {
2930            let idl = fs::read(idl_path)?;
2931            let idl = convert_idl(&idl)?;
2932            if idl.metadata.name != program_name {
2933                return Err(anyhow!("IDL does not match program {program_name}."));
2934            }
2935
2936            Ok(idl)
2937        },
2938    )?;
2939
2940    let cluster = match &cfg_override.cluster {
2941        Some(cluster) => cluster.clone(),
2942        None => Config::discover(cfg_override)?
2943            .map(|cfg| cfg.provider.cluster.clone())
2944            .unwrap_or(Cluster::Localnet),
2945    };
2946
2947    let data = create_client(cluster.url()).get_account_data(&address)?;
2948    let disc_len = idl
2949        .accounts
2950        .iter()
2951        .find(|acc| acc.name == account_type_name)
2952        .map(|acc| acc.discriminator.len())
2953        .ok_or_else(|| anyhow!("Account `{account_type_name}` not found in IDL"))?;
2954    let mut data_view = &data[disc_len..];
2955
2956    let deserialized_json =
2957        deserialize_idl_defined_type_to_json(&idl, account_type_name, &mut data_view)?;
2958
2959    println!(
2960        "{}",
2961        serde_json::to_string_pretty(&deserialized_json).unwrap()
2962    );
2963
2964    Ok(())
2965}
2966
2967// Deserializes user defined IDL types by munching the account data(recursively).
2968fn deserialize_idl_defined_type_to_json(
2969    idl: &Idl,
2970    defined_type_name: &str,
2971    data: &mut &[u8],
2972) -> Result<JsonValue, anyhow::Error> {
2973    let defined_type = &idl
2974        .accounts
2975        .iter()
2976        .find(|acc| acc.name == defined_type_name)
2977        .and_then(|acc| idl.types.iter().find(|ty| ty.name == acc.name))
2978        .or_else(|| idl.types.iter().find(|ty| ty.name == defined_type_name))
2979        .ok_or_else(|| anyhow!("Type `{}` not found in IDL.", defined_type_name))?
2980        .ty;
2981
2982    let mut deserialized_fields = Map::new();
2983
2984    match defined_type {
2985        IdlTypeDefTy::Struct { fields } => {
2986            if let Some(fields) = fields {
2987                match fields {
2988                    IdlDefinedFields::Named(fields) => {
2989                        for field in fields {
2990                            deserialized_fields.insert(
2991                                field.name.clone(),
2992                                deserialize_idl_type_to_json(&field.ty, data, idl)?,
2993                            );
2994                        }
2995                    }
2996                    IdlDefinedFields::Tuple(fields) => {
2997                        let mut values = Vec::new();
2998                        for field in fields {
2999                            values.push(deserialize_idl_type_to_json(field, data, idl)?);
3000                        }
3001                        deserialized_fields
3002                            .insert(defined_type_name.to_owned(), JsonValue::Array(values));
3003                    }
3004                }
3005            }
3006        }
3007        IdlTypeDefTy::Enum { variants } => {
3008            let repr = <u8 as AnchorDeserialize>::deserialize(data)?;
3009
3010            let variant = variants
3011                .get(repr as usize)
3012                .ok_or_else(|| anyhow!("Error while deserializing enum variant {repr}"))?;
3013
3014            let mut value = json!({});
3015
3016            if let Some(enum_field) = &variant.fields {
3017                match enum_field {
3018                    IdlDefinedFields::Named(fields) => {
3019                        let mut values = Map::new();
3020                        for field in fields {
3021                            values.insert(
3022                                field.name.clone(),
3023                                deserialize_idl_type_to_json(&field.ty, data, idl)?,
3024                            );
3025                        }
3026                        value = JsonValue::Object(values);
3027                    }
3028                    IdlDefinedFields::Tuple(fields) => {
3029                        let mut values = Vec::new();
3030                        for field in fields {
3031                            values.push(deserialize_idl_type_to_json(field, data, idl)?);
3032                        }
3033                        value = JsonValue::Array(values);
3034                    }
3035                }
3036            }
3037
3038            deserialized_fields.insert(variant.name.clone(), value);
3039        }
3040        IdlTypeDefTy::Type { alias } => {
3041            return deserialize_idl_type_to_json(alias, data, idl);
3042        }
3043    }
3044
3045    Ok(JsonValue::Object(deserialized_fields))
3046}
3047
3048// Deserializes a primitive type using AnchorDeserialize
3049fn deserialize_idl_type_to_json(
3050    idl_type: &IdlType,
3051    data: &mut &[u8],
3052    parent_idl: &Idl,
3053) -> Result<JsonValue, anyhow::Error> {
3054    if data.is_empty() {
3055        return Err(anyhow::anyhow!("Unable to parse from empty bytes"));
3056    }
3057
3058    Ok(match idl_type {
3059        IdlType::Bool => json!(<bool as AnchorDeserialize>::deserialize(data)?),
3060        IdlType::U8 => {
3061            json!(<u8 as AnchorDeserialize>::deserialize(data)?)
3062        }
3063        IdlType::I8 => {
3064            json!(<i8 as AnchorDeserialize>::deserialize(data)?)
3065        }
3066        IdlType::U16 => {
3067            json!(<u16 as AnchorDeserialize>::deserialize(data)?)
3068        }
3069        IdlType::I16 => {
3070            json!(<i16 as AnchorDeserialize>::deserialize(data)?)
3071        }
3072        IdlType::U32 => {
3073            json!(<u32 as AnchorDeserialize>::deserialize(data)?)
3074        }
3075        IdlType::I32 => {
3076            json!(<i32 as AnchorDeserialize>::deserialize(data)?)
3077        }
3078        IdlType::F32 => json!(<f32 as AnchorDeserialize>::deserialize(data)?),
3079        IdlType::U64 => {
3080            json!(<u64 as AnchorDeserialize>::deserialize(data)?)
3081        }
3082        IdlType::I64 => {
3083            json!(<i64 as AnchorDeserialize>::deserialize(data)?)
3084        }
3085        IdlType::F64 => json!(<f64 as AnchorDeserialize>::deserialize(data)?),
3086        IdlType::U128 => {
3087            json!(<u128 as AnchorDeserialize>::deserialize(data)?)
3088        }
3089        IdlType::I128 => {
3090            json!(<i128 as AnchorDeserialize>::deserialize(data)?)
3091        }
3092        IdlType::U256 => todo!("Upon completion of u256 IDL standard"),
3093        IdlType::I256 => todo!("Upon completion of i256 IDL standard"),
3094        IdlType::Bytes => JsonValue::Array(
3095            <Vec<u8> as AnchorDeserialize>::deserialize(data)?
3096                .iter()
3097                .map(|i| json!(*i))
3098                .collect(),
3099        ),
3100        IdlType::String => json!(<String as AnchorDeserialize>::deserialize(data)?),
3101        IdlType::Pubkey => {
3102            json!(<Pubkey as AnchorDeserialize>::deserialize(data)?.to_string())
3103        }
3104        IdlType::Array(ty, size) => match size {
3105            IdlArrayLen::Value(size) => {
3106                let mut array_data: Vec<JsonValue> = Vec::with_capacity(*size);
3107
3108                for _ in 0..*size {
3109                    array_data.push(deserialize_idl_type_to_json(ty, data, parent_idl)?);
3110                }
3111
3112                JsonValue::Array(array_data)
3113            }
3114            // TODO:
3115            IdlArrayLen::Generic(_) => unimplemented!("Generic array length is not yet supported"),
3116        },
3117        IdlType::Option(ty) => {
3118            let is_present = <u8 as AnchorDeserialize>::deserialize(data)?;
3119
3120            if is_present == 0 {
3121                JsonValue::String("None".to_string())
3122            } else {
3123                deserialize_idl_type_to_json(ty, data, parent_idl)?
3124            }
3125        }
3126        IdlType::Vec(ty) => {
3127            let size: usize = <u32 as AnchorDeserialize>::deserialize(data)?
3128                .try_into()
3129                .unwrap();
3130
3131            let mut vec_data: Vec<JsonValue> = Vec::with_capacity(size);
3132
3133            for _ in 0..size {
3134                vec_data.push(deserialize_idl_type_to_json(ty, data, parent_idl)?);
3135            }
3136
3137            JsonValue::Array(vec_data)
3138        }
3139        IdlType::Defined {
3140            name,
3141            generics: _generics,
3142        } => {
3143            // TODO: Generics
3144            deserialize_idl_defined_type_to_json(parent_idl, name, data)?
3145        }
3146        IdlType::Generic(generic) => json!(generic),
3147        _ => unimplemented!("{idl_type:?}"),
3148    })
3149}
3150
3151enum OutFile {
3152    Stdout,
3153    File(PathBuf),
3154}
3155
3156// Builds, deploys, and tests all workspace programs in a single command.
3157#[allow(clippy::too_many_arguments)]
3158fn test(
3159    cfg_override: &ConfigOverride,
3160    program_name: Option<String>,
3161    skip_deploy: bool,
3162    skip_local_validator: bool,
3163    skip_build: bool,
3164    skip_lint: bool,
3165    no_idl: bool,
3166    detach: bool,
3167    tests_to_run: Vec<String>,
3168    validator_type: ValidatorType,
3169    extra_args: Vec<String>,
3170    env_vars: Vec<String>,
3171    cargo_args: Vec<String>,
3172) -> Result<()> {
3173    let test_paths = tests_to_run
3174        .iter()
3175        .map(|path| {
3176            PathBuf::from(path)
3177                .canonicalize()
3178                .map_err(|_| anyhow!("Wrong path {}", path))
3179        })
3180        .collect::<Result<Vec<_>, _>>()?;
3181
3182    with_workspace(cfg_override, |cfg| -> Result<()> {
3183        // Set validator type based on CLI choice
3184        cfg.validator = Some(validator_type);
3185
3186        // Build if needed.
3187        if !skip_build {
3188            build(
3189                cfg_override,
3190                no_idl,
3191                None,
3192                None,
3193                false,
3194                skip_lint,
3195                true,
3196                program_name.clone(),
3197                None,
3198                None,
3199                BootstrapMode::None,
3200                None,
3201                None,
3202                env_vars,
3203                cargo_args,
3204                false,
3205            )?;
3206        }
3207
3208        let root = cfg.path().parent().unwrap().to_owned();
3209        cfg.add_test_config(root, test_paths)?;
3210
3211        // Run the deploy against the cluster in two cases:
3212        //
3213        // 1. The cluster is not localnet.
3214        // 2. The cluster is localnet, but we're not booting a local validator.
3215        //
3216        // In either case, skip the deploy if the user specifies.
3217        let is_localnet = cfg.provider.cluster == Cluster::Localnet;
3218        if (!is_localnet || skip_local_validator) && !skip_deploy {
3219            deploy(cfg_override, None, None, false, true, vec![])?;
3220        }
3221
3222        cfg.run_hooks(HookType::PreTest)?;
3223
3224        let mut is_first_suite = true;
3225        if let Some(test_script) = cfg.scripts.get_mut("test") {
3226            is_first_suite = false;
3227
3228            match program_name {
3229                Some(program_name) => {
3230                    if let Some((from, to)) = Regex::new("\\s(tests/\\S+\\.(js|ts))")
3231                        .unwrap()
3232                        .captures_iter(&test_script.clone())
3233                        .last()
3234                        .and_then(|c| c.get(1).and_then(|mtch| c.get(2).map(|ext| (mtch, ext))))
3235                        .map(|(mtch, ext)| {
3236                            (
3237                                mtch.as_str(),
3238                                format!("tests/{program_name}.{}", ext.as_str()),
3239                            )
3240                        })
3241                    {
3242                        println!("\nRunning tests of program `{program_name}`!");
3243                        // Replace the last path to the program name's path
3244                        *test_script = test_script.replace(from, &to);
3245                    }
3246                }
3247                _ => println!(
3248                    "\nFound a 'test' script in the Anchor.toml. Running it as a test suite!"
3249                ),
3250            }
3251
3252            run_test_suite(
3253                cfg,
3254                cfg.path(),
3255                is_localnet,
3256                skip_local_validator,
3257                skip_deploy,
3258                detach,
3259                validator_type,
3260                &cfg.test_validator,
3261                &cfg.scripts,
3262                &extra_args,
3263                &cfg.surfpool_config,
3264            )?;
3265        }
3266        if let Some(test_config) = &cfg.test_config {
3267            for test_suite in test_config.iter() {
3268                if !is_first_suite {
3269                    std::thread::sleep(std::time::Duration::from_millis(
3270                        test_suite
3271                            .1
3272                            .test
3273                            .as_ref()
3274                            .map(|val| val.shutdown_wait)
3275                            .unwrap_or(SHUTDOWN_WAIT) as u64,
3276                    ));
3277                } else {
3278                    is_first_suite = false;
3279                }
3280
3281                run_test_suite(
3282                    cfg,
3283                    test_suite.0,
3284                    is_localnet,
3285                    skip_local_validator,
3286                    skip_deploy,
3287                    detach,
3288                    validator_type,
3289                    &test_suite.1.test,
3290                    &test_suite.1.scripts,
3291                    &extra_args,
3292                    &cfg.surfpool_config,
3293                )?;
3294            }
3295        }
3296        cfg.run_hooks(HookType::PostTest)?;
3297        Ok(())
3298    })?
3299}
3300
3301#[allow(clippy::too_many_arguments)]
3302fn run_test_suite(
3303    cfg: &WithPath<Config>,
3304    test_suite_path: impl AsRef<Path>,
3305    is_localnet: bool,
3306    skip_local_validator: bool,
3307    skip_deploy: bool,
3308    detach: bool,
3309    validator_type: ValidatorType,
3310    test_validator: &Option<TestValidator>,
3311    scripts: &ScriptsConfig,
3312    extra_args: &[String],
3313    surfpool_config: &Option<SurfpoolConfig>,
3314) -> Result<()> {
3315    println!("\nRunning test suite: {:#?}\n", test_suite_path.as_ref());
3316    let mut validator_handle = None;
3317    if is_localnet && !skip_local_validator {
3318        match validator_type {
3319            ValidatorType::Surfpool => {
3320                let full_simnet_mode = false;
3321                let flags = Some(surfpool_flags(
3322                    cfg,
3323                    surfpool_config,
3324                    full_simnet_mode,
3325                    skip_deploy,
3326                    Some(test_suite_path.as_ref()),
3327                )?);
3328                validator_handle = Some(start_surfpool_validator(
3329                    flags,
3330                    surfpool_config,
3331                    full_simnet_mode,
3332                )?);
3333            }
3334            ValidatorType::Legacy => {
3335                let flags = match skip_deploy {
3336                    true => None,
3337                    false => Some(validator_flags(cfg, test_validator)?),
3338                };
3339                validator_handle = Some(start_solana_test_validator(
3340                    cfg,
3341                    test_validator,
3342                    flags,
3343                    true,
3344                )?);
3345            }
3346        }
3347    }
3348    let url = cluster_url(cfg, test_validator, surfpool_config);
3349
3350    let node_options = format!(
3351        "{} {}",
3352        match std::env::var_os("NODE_OPTIONS") {
3353            Some(value) => value
3354                .into_string()
3355                .map_err(std::env::VarError::NotUnicode)?,
3356            None => "".to_owned(),
3357        },
3358        get_node_dns_option()?,
3359    );
3360
3361    // Setup log reader - kept alive until end of scope
3362    let log_streams = match stream_logs(cfg, &url) {
3363        Ok(streams) => Some(streams),
3364        Err(e) => {
3365            eprintln!("Warning: Failed to setup program log streaming: {:#}", e);
3366            eprintln!("Program logs will still be visible in the test output.");
3367            None
3368        }
3369    };
3370
3371    // Run the tests.
3372    let test_result = {
3373        let cmd = scripts
3374            .get("test")
3375            .expect("Not able to find script for `test`")
3376            .clone();
3377        let script_args = format!("{cmd} {}", extra_args.join(" "));
3378
3379        std::process::Command::new("bash")
3380            .arg("-c")
3381            .arg(script_args)
3382            .env("ANCHOR_PROVIDER_URL", url)
3383            .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
3384            .env("NODE_OPTIONS", node_options)
3385            .stdout(Stdio::inherit())
3386            .stderr(Stdio::inherit())
3387            .output()
3388            .map_err(anyhow::Error::from)
3389            .context(cmd)
3390    };
3391
3392    // Keep validator running if needed.
3393    if test_result.is_ok() && detach {
3394        println!("Local validator still running. Press Ctrl + C quit.");
3395        std::io::stdin().lock().lines().next().unwrap().unwrap();
3396    }
3397
3398    // Check all errors and shut down.
3399    if let Some(mut child) = validator_handle {
3400        if let Err(err) = child.kill() {
3401            println!("Failed to kill subprocess {}: {}", child.id(), err);
3402        }
3403    }
3404
3405    // Explicitly shutdown log streams - closes WebSocket subscriptions
3406    if let Some(log_streams) = log_streams {
3407        for handle in log_streams {
3408            handle.shutdown();
3409        }
3410    }
3411
3412    // Must exist *after* shutting down the validator and log streams.
3413    match test_result {
3414        Ok(exit) => {
3415            if !exit.status.success() {
3416                std::process::exit(exit.status.code().unwrap());
3417            }
3418        }
3419        Err(err) => {
3420            println!("Failed to run test: {err:#}");
3421            return Err(err);
3422        }
3423    }
3424
3425    Ok(())
3426}
3427
3428// Returns the solana-test-validator flags. This will embed the workspace
3429// programs in the genesis block so we don't have to deploy every time. It also
3430// allows control of other solana-test-validator features.
3431fn validator_flags(
3432    cfg: &WithPath<Config>,
3433    test_validator: &Option<TestValidator>,
3434) -> Result<Vec<String>> {
3435    let programs = cfg.programs.get(&Cluster::Localnet);
3436
3437    let test_upgradeable_program = test_validator
3438        .as_ref()
3439        .map(|test_validator| test_validator.upgradeable)
3440        .unwrap_or(false);
3441
3442    let mut flags = Vec::new();
3443    for mut program in cfg.read_all_programs()? {
3444        let verifiable = false;
3445        let binary_path = program.binary_path(verifiable).display().to_string();
3446        // Use the [programs.cluster] override and fallback to the keypair
3447        // files if no override is given.
3448        let address = programs
3449            .and_then(|m| m.get(&program.lib_name))
3450            .map(|deployment| Ok(deployment.address.to_string()))
3451            .unwrap_or_else(|| program.pubkey().map(|p| p.to_string()))?;
3452
3453        if test_upgradeable_program {
3454            flags.push("--upgradeable-program".to_string());
3455            flags.push(address.clone());
3456            flags.push(binary_path);
3457            flags.push(cfg.wallet_kp()?.pubkey().to_string());
3458        } else {
3459            flags.push("--bpf-program".to_string());
3460            flags.push(address.clone());
3461            flags.push(binary_path);
3462        }
3463
3464        if let Some(idl) = program.idl.as_mut() {
3465            // Add program address to the IDL.
3466            idl.address = address;
3467
3468            // Persist it.
3469            let idl_out = Path::new("target")
3470                .join("idl")
3471                .join(&idl.metadata.name)
3472                .with_extension("json");
3473            write_idl(idl, OutFile::File(idl_out))?;
3474        }
3475    }
3476
3477    if let Some(test) = test_validator.as_ref() {
3478        if let Some(genesis) = &test.genesis {
3479            for entry in genesis {
3480                let program_path = Path::new(&entry.program);
3481                if !program_path.exists() {
3482                    return Err(anyhow!(
3483                        "Program in genesis configuration does not exist at path: {}",
3484                        program_path.display()
3485                    ));
3486                }
3487                if entry.upgradeable.unwrap_or(false) {
3488                    flags.push("--upgradeable-program".to_string());
3489                    flags.push(entry.address.clone());
3490                    flags.push(entry.program.clone());
3491                    flags.push(cfg.wallet_kp()?.pubkey().to_string());
3492                } else {
3493                    flags.push("--bpf-program".to_string());
3494                    flags.push(entry.address.clone());
3495                    flags.push(entry.program.clone());
3496                }
3497            }
3498        }
3499        if let Some(validator) = &test.validator {
3500            let entries = serde_json::to_value(validator)?;
3501            for (key, value) in entries.as_object().unwrap() {
3502                if key == "ledger" {
3503                    // Ledger flag is a special case as it is passed separately to the rest of
3504                    // these validator flags.
3505                    continue;
3506                };
3507                if key == "account" {
3508                    for entry in value.as_array().unwrap() {
3509                        // Push the account flag for each array entry
3510                        flags.push("--account".to_string());
3511                        flags.push(entry["address"].as_str().unwrap().to_string());
3512                        flags.push(entry["filename"].as_str().unwrap().to_string());
3513                    }
3514                } else if key == "account_dir" {
3515                    for entry in value.as_array().unwrap() {
3516                        flags.push("--account-dir".to_string());
3517                        flags.push(entry["directory"].as_str().unwrap().to_string());
3518                    }
3519                } else if key == "clone" {
3520                    // Client for fetching accounts data
3521                    let client = if let Some(url) = entries["url"].as_str() {
3522                        create_client(url)
3523                    } else {
3524                        return Err(anyhow!(
3525                            "Validator url for Solana's JSON RPC should be provided in order to \
3526                             clone accounts from it"
3527                        ));
3528                    };
3529
3530                    let pubkeys = value
3531                        .as_array()
3532                        .unwrap()
3533                        .iter()
3534                        .map(|entry| {
3535                            let address = entry["address"].as_str().unwrap();
3536                            Pubkey::try_from(address)
3537                                .map_err(|_| anyhow!("Invalid pubkey {}", address))
3538                        })
3539                        .collect::<Result<HashSet<Pubkey>>>()?
3540                        .into_iter()
3541                        .collect::<Vec<_>>();
3542                    let accounts = client.get_multiple_accounts(&pubkeys)?;
3543
3544                    for (pubkey, account) in pubkeys.into_iter().zip(accounts) {
3545                        match account {
3546                            Some(account) => {
3547                                // Use a different flag for program accounts to fix the problem
3548                                // described in https://github.com/anza-xyz/agave/issues/522
3549                                if account.owner == bpf_loader_upgradeable::id()
3550                                    // Only programs are supported with `--clone-upgradeable-program`
3551                                    && matches!(
3552                                        account.deserialize_data::<UpgradeableLoaderState>()?,
3553                                        UpgradeableLoaderState::Program { .. }
3554                                    )
3555                                {
3556                                    flags.push("--clone-upgradeable-program".to_string());
3557                                    flags.push(pubkey.to_string());
3558                                } else {
3559                                    flags.push("--clone".to_string());
3560                                    flags.push(pubkey.to_string());
3561                                }
3562                            }
3563                            _ => return Err(anyhow!("Account {} not found", pubkey)),
3564                        }
3565                    }
3566                } else if key == "deactivate_feature" {
3567                    // Verify that the feature flags are valid pubkeys
3568                    let pubkeys_result: Result<Vec<Pubkey>, _> = value
3569                        .as_array()
3570                        .unwrap()
3571                        .iter()
3572                        .map(|entry| {
3573                            let feature_flag = entry.as_str().unwrap();
3574                            Pubkey::try_from(feature_flag).map_err(|_| {
3575                                anyhow!("Invalid pubkey (feature flag) {}", feature_flag)
3576                            })
3577                        })
3578                        .collect();
3579                    let features = pubkeys_result?;
3580                    for feature in features {
3581                        flags.push("--deactivate-feature".to_string());
3582                        flags.push(feature.to_string());
3583                    }
3584                } else {
3585                    // Remaining validator flags are non-array types
3586                    flags.push(format!("--{}", key.replace('_', "-")));
3587                    if let serde_json::Value::String(v) = value {
3588                        flags.push(v.to_string());
3589                    } else {
3590                        flags.push(value.to_string());
3591                    }
3592                }
3593            }
3594        }
3595    }
3596
3597    Ok(flags)
3598}
3599
3600// Returns Surfpool flags.
3601// This flags will be passed to the Surfpool, it allows to configure the validator.
3602fn surfpool_flags(
3603    cfg: &WithPath<Config>,
3604    surfpool_config: &Option<SurfpoolConfig>,
3605    full_simnet_mode: bool,
3606    skip_deploy: bool,
3607    test_suite_path: Option<&Path>,
3608) -> Result<Vec<String>> {
3609    let programs = cfg.programs.get(&Cluster::Localnet);
3610    let mut flags = Vec::new();
3611
3612    for mut program in cfg.read_all_programs()? {
3613        let address = programs
3614            .and_then(|m| m.get(&program.lib_name))
3615            .map(|deployment| Ok(deployment.address.to_string()))
3616            .unwrap_or_else(|| program.pubkey().map(|p| p.to_string()))?;
3617        if let Some(idl) = program.idl.as_mut() {
3618            // Creating the idl files
3619            idl.address = address;
3620            let idl_out = Path::new("target")
3621                .join("idl")
3622                .join(&idl.metadata.name)
3623                .with_extension("json");
3624            write_idl(idl, OutFile::File(idl_out))?;
3625        }
3626    }
3627
3628    if let Some(config) = &surfpool_config {
3629        if let Some(airdrop_addresses) = &config.airdrop_addresses {
3630            for address in airdrop_addresses {
3631                flags.push("--airdrop".to_string());
3632                flags.push(address.to_string());
3633            }
3634        }
3635        if let Some(datasource_rpc_url) = &config.datasource_rpc_url {
3636            flags.push("--rpc-url".to_string());
3637            flags.push(datasource_rpc_url.to_string());
3638        }
3639
3640        let host = &config.host;
3641        flags.push("--host".to_string());
3642        flags.push(host.to_string());
3643
3644        let rpc_port = &config.rpc_port;
3645        flags.push("--port".to_string());
3646        flags.push(rpc_port.to_string());
3647
3648        if let Some(ws_port) = &config.ws_port {
3649            flags.push("--ws-port".to_string());
3650            flags.push(ws_port.to_string());
3651        }
3652
3653        if let Some(manifest_file_path) = &config.manifest_file_path {
3654            flags.push("--manifest-file-path".to_string());
3655            flags.push(manifest_file_path.to_string());
3656        }
3657
3658        if let Some(runbooks) = &config.runbooks {
3659            for runbook in runbooks {
3660                flags.push("--runbook".to_string());
3661                flags.push(runbook.to_string());
3662            }
3663        }
3664
3665        if let Some(slot_time) = &config.slot_time {
3666            flags.push("--slot-time".to_string());
3667            flags.push(slot_time.to_string());
3668        }
3669    }
3670
3671    let online = surfpool_config
3672        .as_ref()
3673        .and_then(|c| c.online)
3674        .unwrap_or(false);
3675    if !online {
3676        flags.push("--offline".to_string());
3677    }
3678
3679    let block_production_mode = surfpool_config
3680        .as_ref()
3681        .and_then(|c| c.block_production_mode.clone())
3682        .unwrap_or("transaction".into());
3683    flags.push("--block-production-mode".to_string());
3684    flags.push(block_production_mode);
3685
3686    flags.push("--log-level".to_string());
3687    flags.push(
3688        surfpool_config
3689            .as_ref()
3690            .and_then(|c| c.log_level.clone())
3691            .unwrap_or("none".into()),
3692    );
3693
3694    if !full_simnet_mode {
3695        flags.push("--no-tui".to_string());
3696        flags.push("--disable-instruction-profiling".to_string());
3697        flags.push("--max-profiles".to_string());
3698        flags.push("1".to_string());
3699        flags.push("--no-studio".to_string());
3700    }
3701
3702    match skip_deploy {
3703        true => flags.push("--no-deploy".to_string()),
3704        false => {
3705            // automatically generate in-memory runbooks
3706            flags.push("--legacy-anchor-compatibility".to_string());
3707            if let Some(test_suite_path) = test_suite_path {
3708                flags.push("--anchor-test-config-path".to_string());
3709                flags.push(test_suite_path.display().to_string());
3710            }
3711        }
3712    }
3713
3714    Ok(flags)
3715}
3716
3717/// Handle for a log streaming thread.
3718///
3719/// Manages a WebSocket subscription and its associated receiver thread.
3720/// Call `shutdown()` to cleanly stop the thread.
3721struct LogStreamHandle {
3722    subscription: PubsubClientSubscription<RpcResponse<RpcLogsResponse>>,
3723}
3724
3725impl LogStreamHandle {
3726    /// Explicitly shutdown the log stream
3727    fn shutdown(self) {
3728        // Send unsubscribe in a background thread to avoid blocking
3729        // PubsubClientSubscription::send_unsubscribe() can block indefinitely if WebSocket is stuck
3730        // The receiver threads will exit when the subscription closes
3731        std::thread::spawn(move || {
3732            let _ = self.subscription.send_unsubscribe();
3733        });
3734    }
3735}
3736
3737/// Spawns a thread to receive logs from a subscription and write them to a file
3738fn spawn_log_receiver_thread<R>(receiver: R, log_file_path: PathBuf)
3739where
3740    R: IntoIterator<Item = RpcResponse<RpcLogsResponse>> + Send + 'static,
3741{
3742    std::thread::spawn(move || {
3743        if let Ok(mut file) = File::create(&log_file_path) {
3744            for response in receiver {
3745                let _ = writeln!(
3746                    file,
3747                    "Transaction executed in slot {}:",
3748                    response.context.slot
3749                );
3750                let _ = writeln!(file, "  Signature: {}", response.value.signature);
3751                let _ = writeln!(
3752                    file,
3753                    "  Status: {}",
3754                    response
3755                        .value
3756                        .err
3757                        .map(|err| err.to_string())
3758                        .unwrap_or_else(|| "Ok".to_string())
3759                );
3760                let _ = writeln!(file, "  Log Messages:");
3761                for log in response.value.logs {
3762                    let _ = writeln!(file, "    {}", log);
3763                }
3764                let _ = writeln!(file); // Empty line between transactions
3765                let _ = file.flush();
3766            }
3767        } else {
3768            eprintln!("Failed to create log file: {:?}", log_file_path);
3769        }
3770    });
3771}
3772
3773fn stream_logs(config: &WithPath<Config>, rpc_url: &str) -> Result<Vec<LogStreamHandle>> {
3774    // Determine validator type to use appropriate logging
3775    match &config.validator {
3776        Some(ValidatorType::Surfpool) => {
3777            // For Surfpool, we don't need to stream logs via external commands
3778            // Surfpool handles its own logging to .surfpool/logs/ directory
3779            if config
3780                .surfpool_config
3781                .as_ref()
3782                .and_then(|s| {
3783                    s.log_level
3784                        .as_ref()
3785                        .map(|l| l.to_ascii_lowercase().ne("none"))
3786                })
3787                .unwrap_or(false)
3788            {
3789                println!("Surfpool validator logs: .surfpool/logs/ directory");
3790            }
3791            Ok(vec![])
3792        }
3793        Some(ValidatorType::Legacy) | None => stream_solana_logs(config, rpc_url),
3794    }
3795}
3796
3797fn stream_solana_logs(config: &WithPath<Config>, rpc_url: &str) -> Result<Vec<LogStreamHandle>> {
3798    let program_logs_dir = Path::new(".anchor").join("program-logs");
3799    if program_logs_dir.exists() {
3800        fs::remove_dir_all(&program_logs_dir)?;
3801    }
3802    fs::create_dir_all(&program_logs_dir)?;
3803
3804    // For solana-test-validator, the WebSocket port is RPC port + WEBSOCKET_PORT_OFFSET
3805    // Extract port from rpc_url and construct WebSocket URL
3806    let ws_url = if rpc_url.contains("127.0.0.1") || rpc_url.contains("localhost") {
3807        // Local validator: increment port by 1 for WebSocket
3808        let rpc_port = rpc_url
3809            .rsplit_once(':')
3810            .and_then(|(_, port)| port.parse::<u16>().ok())
3811            .unwrap_or(DEFAULT_RPC_PORT);
3812
3813        let ws_port = rpc_port + WEBSOCKET_PORT_OFFSET;
3814        let url = format!("ws://127.0.0.1:{}", ws_port);
3815        url
3816    } else {
3817        // Remote cluster: use same URL but replace http(s) with ws(s)
3818        rpc_url
3819            .replace("https://", "wss://")
3820            .replace("http://", "ws://")
3821    };
3822
3823    // Give the WebSocket endpoint a moment to be ready (especially for local validators)
3824    std::thread::sleep(std::time::Duration::from_millis(1500));
3825
3826    let mut handles = vec![];
3827
3828    // Subscribe to logs for all workspace programs
3829    for program in config.read_all_programs()? {
3830        let idl_path = Path::new("target")
3831            .join("idl")
3832            .join(&program.lib_name)
3833            .with_extension("json");
3834        let idl = fs::read(&idl_path)?;
3835        let idl = convert_idl(&idl)?;
3836
3837        let log_file_path =
3838            program_logs_dir.join(format!("{}.{}.log", idl.address, program.lib_name));
3839        let program_address = idl.address.clone();
3840
3841        // Subscribe to logs using PubsubClient
3842        let (client, receiver) = match PubsubClient::logs_subscribe(
3843            &ws_url,
3844            RpcTransactionLogsFilter::Mentions(vec![program_address.clone()]),
3845            RpcTransactionLogsConfig {
3846                commitment: Some(CommitmentConfig::confirmed()),
3847            },
3848        ) {
3849            Ok(result) => result,
3850            Err(e) => {
3851                eprintln!(
3852                    "Warning: Failed to subscribe to logs for program {}: {}",
3853                    program.lib_name, e
3854                );
3855                continue;
3856            }
3857        };
3858
3859        // Spawn thread to write logs to file
3860        spawn_log_receiver_thread(receiver, log_file_path);
3861
3862        handles.push(LogStreamHandle {
3863            subscription: client,
3864        });
3865    }
3866
3867    // Also subscribe to logs for genesis programs
3868    if let Some(test) = config.test_validator.as_ref() {
3869        if let Some(genesis) = &test.genesis {
3870            for entry in genesis {
3871                let log_file_path = program_logs_dir.join(&entry.address).with_extension("log");
3872                let address = entry.address.clone();
3873
3874                // Subscribe to logs using PubsubClient
3875                let (client, receiver) = match PubsubClient::logs_subscribe(
3876                    &ws_url,
3877                    RpcTransactionLogsFilter::Mentions(vec![address.clone()]),
3878                    RpcTransactionLogsConfig {
3879                        commitment: Some(CommitmentConfig::confirmed()),
3880                    },
3881                ) {
3882                    Ok(result) => result,
3883                    Err(e) => {
3884                        eprintln!(
3885                            "Warning: Failed to subscribe to logs for genesis program {}: {}",
3886                            &entry.address, e
3887                        );
3888                        continue;
3889                    }
3890                };
3891
3892                // Spawn thread to write logs to file
3893                spawn_log_receiver_thread(receiver, log_file_path);
3894
3895                handles.push(LogStreamHandle {
3896                    subscription: client,
3897                });
3898            }
3899        }
3900    }
3901
3902    Ok(handles)
3903}
3904
3905fn start_surfpool_validator(
3906    flags: Option<Vec<String>>,
3907    surfpool_config: &Option<SurfpoolConfig>,
3908    full_simnet_mode: bool,
3909) -> Result<Child> {
3910    let rpc_url = surfpool_rpc_url(surfpool_config);
3911
3912    let (test_validator_stdout, test_validator_stderr) = match full_simnet_mode {
3913        true => (Stdio::inherit(), Stdio::inherit()),
3914        false => (Stdio::null(), Stdio::null()),
3915    };
3916
3917    let mut validator_handle = std::process::Command::new("surfpool")
3918        .arg("start")
3919        .args(flags.unwrap_or_default())
3920        .stdout(test_validator_stdout)
3921        .stderr(test_validator_stderr)
3922        .spawn()
3923        .map_err(|e| anyhow!("Failed to spawn `surfpool`: {e}"))?;
3924
3925    let client = create_client(rpc_url.clone());
3926
3927    let mut count = 0;
3928
3929    let ms_wait = surfpool_config
3930        .as_ref()
3931        .map(|surfpool| surfpool.startup_wait)
3932        .unwrap_or(STARTUP_WAIT);
3933
3934    while count < ms_wait {
3935        let r = client.get_latest_blockhash();
3936        if r.is_ok() {
3937            break;
3938        }
3939        std::thread::sleep(std::time::Duration::from_millis(100));
3940        count += 100;
3941    }
3942
3943    if count >= ms_wait {
3944        eprintln!(
3945            "Unable to get latest blockhash. Surfpool validator does not look started. Check \
3946             .surfpool/logs/ directory for errors. Consider increasing [surfpool.startup_wait] in \
3947             Anchor.toml."
3948        );
3949        validator_handle.kill()?;
3950        std::process::exit(1);
3951    }
3952
3953    loop {
3954        let resp = client
3955            .send::<RpcResponse<SurfnetInfoResponse>>(
3956                RpcRequest::Custom {
3957                    method: "surfnet_getSurfnetInfo",
3958                },
3959                serde_json::Value::Null,
3960            )?
3961            .value;
3962
3963        // break out if all runbooks are completed
3964        if resp
3965            .runbook_executions
3966            .iter()
3967            .all(|ex| ex.completed_at.is_some())
3968        {
3969            break;
3970        }
3971        std::thread::sleep(std::time::Duration::from_millis(500));
3972    }
3973    Ok(validator_handle)
3974}
3975
3976fn start_solana_test_validator(
3977    cfg: &Config,
3978    test_validator: &Option<TestValidator>,
3979    flags: Option<Vec<String>>,
3980    test_log_stdout: bool,
3981) -> Result<Child> {
3982    let (test_ledger_directory, test_ledger_log_filename) =
3983        test_validator_file_paths(test_validator)?;
3984
3985    // Start a validator for testing.
3986    let (test_validator_stdout, test_validator_stderr) = match test_log_stdout {
3987        true => {
3988            let test_validator_stdout_file =
3989                File::create(&test_ledger_log_filename).with_context(|| {
3990                    format!(
3991                        "Failed to create validator log file {}",
3992                        test_ledger_log_filename.display()
3993                    )
3994                })?;
3995            let test_validator_sterr_file = test_validator_stdout_file.try_clone()?;
3996            (
3997                Stdio::from(test_validator_stdout_file),
3998                Stdio::from(test_validator_sterr_file),
3999            )
4000        }
4001        false => (Stdio::inherit(), Stdio::inherit()),
4002    };
4003
4004    let rpc_url = test_validator_rpc_url(test_validator);
4005
4006    let rpc_port = cfg
4007        .test_validator
4008        .as_ref()
4009        .and_then(|test| test.validator.as_ref().map(|v| v.rpc_port))
4010        .unwrap_or(DEFAULT_RPC_PORT);
4011    if !portpicker::is_free(rpc_port) {
4012        return Err(anyhow!(
4013            "Your configured rpc port: {rpc_port} is already in use"
4014        ));
4015    }
4016    let faucet_port = cfg
4017        .test_validator
4018        .as_ref()
4019        .and_then(|test| test.validator.as_ref().and_then(|v| v.faucet_port))
4020        .unwrap_or(solana_faucet::faucet::FAUCET_PORT);
4021    if !portpicker::is_free(faucet_port) {
4022        return Err(anyhow!(
4023            "Your configured faucet port: {faucet_port} is already in use"
4024        ));
4025    }
4026
4027    let mut validator_handle = std::process::Command::new("solana-test-validator")
4028        .arg("--ledger")
4029        .arg(test_ledger_directory)
4030        .arg("--mint")
4031        .arg(cfg.wallet_kp()?.pubkey().to_string())
4032        .args(flags.unwrap_or_default())
4033        .stdout(test_validator_stdout)
4034        .stderr(test_validator_stderr)
4035        .spawn()
4036        .map_err(|e| anyhow!("Failed to spawn `solana-test-validator`: {e}"))?;
4037
4038    // Wait for the validator to be ready.
4039    let client = create_client(rpc_url);
4040    let mut count = 0;
4041    let ms_wait = test_validator
4042        .as_ref()
4043        .map(|test| test.startup_wait)
4044        .unwrap_or(STARTUP_WAIT);
4045    while count < ms_wait {
4046        let r = client.get_latest_blockhash();
4047        if r.is_ok() {
4048            break;
4049        }
4050        std::thread::sleep(std::time::Duration::from_millis(100));
4051        count += 100;
4052    }
4053    if count >= ms_wait {
4054        eprintln!(
4055            "Unable to get latest blockhash. Test validator does not look started. Check \
4056             {test_ledger_log_filename:?} for errors. Consider increasing [test.startup_wait] in \
4057             Anchor.toml."
4058        );
4059        validator_handle.kill()?;
4060        std::process::exit(1);
4061    }
4062    Ok(validator_handle)
4063}
4064
4065// Return the URL that solana-test-validator should be running on given the
4066// configuration
4067fn test_validator_rpc_url(test_validator: &Option<TestValidator>) -> String {
4068    match test_validator {
4069        Some(TestValidator {
4070            validator: Some(validator),
4071            ..
4072        }) => format!("http://{}:{}", validator.bind_address, validator.rpc_port),
4073        _ => "http://127.0.0.1:8899".to_string(),
4074    }
4075}
4076
4077// Returns the URL that surfpool should be running for the given configuration
4078fn surfpool_rpc_url(surfpool_config: &Option<SurfpoolConfig>) -> String {
4079    match surfpool_config {
4080        Some(SurfpoolConfig { host, rpc_port, .. }) => format!("http://{}:{}", host, rpc_port),
4081        _ => format!("http://{}:{}", SURFPOOL_HOST, DEFAULT_RPC_PORT),
4082    }
4083}
4084
4085// Setup and return paths to the solana-test-validator ledger directory and log
4086// files given the configuration
4087fn test_validator_file_paths(test_validator: &Option<TestValidator>) -> Result<(PathBuf, PathBuf)> {
4088    let ledger_path = match test_validator {
4089        Some(TestValidator {
4090            validator: Some(validator),
4091            ..
4092        }) => PathBuf::from(&validator.ledger),
4093        _ => get_default_ledger_path(),
4094    };
4095
4096    if !ledger_path.is_relative() {
4097        // Prevent absolute paths to avoid someone using / or similar, as the
4098        // directory gets removed
4099        eprintln!("Ledger directory {ledger_path:?} must be relative");
4100        std::process::exit(1);
4101    }
4102    if ledger_path.exists() {
4103        fs::remove_dir_all(&ledger_path).with_context(|| {
4104            format!(
4105                "Failed to remove ledger directory {}",
4106                ledger_path.display()
4107            )
4108        })?;
4109    }
4110
4111    fs::create_dir_all(&ledger_path).with_context(|| {
4112        format!(
4113            "Failed to create ledger directory {}",
4114            ledger_path.display()
4115        )
4116    })?;
4117
4118    let log_path = ledger_path.join("test-ledger-log.txt");
4119    Ok((ledger_path, log_path))
4120}
4121
4122fn cluster_url(
4123    cfg: &Config,
4124    test_validator: &Option<TestValidator>,
4125    surfpool_config: &Option<SurfpoolConfig>,
4126) -> String {
4127    let is_localnet = cfg.provider.cluster == Cluster::Localnet;
4128    match is_localnet {
4129        // Cluster is Localnet, determine which validator to use
4130        true => match &cfg.validator {
4131            Some(ValidatorType::Surfpool) => surfpool_rpc_url(surfpool_config),
4132            Some(ValidatorType::Legacy) | None => test_validator_rpc_url(test_validator),
4133        },
4134        false => cfg.provider.cluster.url().to_string(),
4135    }
4136}
4137
4138fn clean(cfg_override: &ConfigOverride) -> Result<()> {
4139    // Get workspace root - either from Anchor.toml or use current directory
4140    let workspace_root = if let Ok(Some(cfg)) = Config::discover(cfg_override) {
4141        cfg.path()
4142            .parent()
4143            .expect("Invalid Anchor.toml")
4144            .to_path_buf()
4145    } else {
4146        // No Anchor.toml - use current directory for Cargo workspace
4147        std::env::current_dir()?
4148    };
4149
4150    let dot_anchor_dir = workspace_root.join(".anchor");
4151    let target_dir = workspace_root.join("target");
4152    let deploy_dir = target_dir.join("deploy");
4153
4154    if dot_anchor_dir.exists() {
4155        fs::remove_dir_all(&dot_anchor_dir)
4156            .map_err(|e| anyhow!("Could not remove directory {:?}: {}", dot_anchor_dir, e))?;
4157    }
4158
4159    if target_dir.exists() {
4160        for entry in fs::read_dir(target_dir)? {
4161            let path = entry?.path();
4162            if path.is_dir() && path != deploy_dir {
4163                fs::remove_dir_all(&path)
4164                    .map_err(|e| anyhow!("Could not remove directory {}: {}", path.display(), e))?;
4165            } else if path.is_file() {
4166                fs::remove_file(&path)
4167                    .map_err(|e| anyhow!("Could not remove file {}: {}", path.display(), e))?;
4168            }
4169        }
4170    } else {
4171        println!("skipping target directory: not found")
4172    }
4173
4174    if deploy_dir.exists() {
4175        for file in fs::read_dir(deploy_dir)? {
4176            let path = file?.path();
4177            if path.extension() != Some(&OsString::from("json")) {
4178                fs::remove_file(&path)
4179                    .map_err(|e| anyhow!("Could not remove file {}: {}", path.display(), e))?;
4180            }
4181        }
4182    } else {
4183        println!("skipping deploy directory: not found")
4184    }
4185
4186    Ok(())
4187}
4188
4189fn deploy(
4190    cfg_override: &ConfigOverride,
4191    program_name: Option<String>,
4192    program_keypair: Option<String>,
4193    verifiable: bool,
4194    no_idl: bool,
4195    solana_args: Vec<String>,
4196) -> Result<()> {
4197    // Execute the code within the workspace
4198    with_workspace(cfg_override, |cfg| -> Result<()> {
4199        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4200        let keypair = cfg.provider.wallet.to_string();
4201
4202        // Augment the given solana args with recommended defaults.
4203        let client = create_client(&url);
4204        let solana_args = add_recommended_deployment_solana_args(&client, solana_args)?;
4205
4206        cfg.run_hooks(HookType::PreDeploy)?;
4207        // Deploy the programs.
4208        println!("Deploying cluster: {url}");
4209        println!("Upgrade authority: {keypair}");
4210
4211        for program in cfg.get_programs(program_name)? {
4212            let binary_path = program.binary_path(verifiable).display().to_string();
4213
4214            println!("Deploying program {:?}...", program.lib_name);
4215            println!("Program path: {binary_path}...");
4216
4217            let program_keypair_filepath = match &program_keypair {
4218                Some(path) => path.clone(),
4219                None => program.keypair_file()?.path().display().to_string(),
4220            };
4221
4222            // Deploy using our native implementation
4223            program::program_deploy(
4224                cfg_override,
4225                Some(strip_workspace_prefix(binary_path)),
4226                None, // program_name - not needed since we have filepath
4227                Some(strip_workspace_prefix(program_keypair_filepath)),
4228                None, // upgrade_authority - uses wallet from config
4229                None, // program_id - derived from program_keypair
4230                None, // buffer
4231                None, // max_len
4232                no_idl,
4233                false, // make_final
4234                solana_args.clone(),
4235            )?;
4236        }
4237
4238        println!("Deploy success");
4239        cfg.run_hooks(HookType::PostDeploy)?;
4240
4241        Ok(())
4242    })?
4243}
4244
4245fn upgrade(
4246    cfg_override: &ConfigOverride,
4247    program_id: Pubkey,
4248    program_filepath: String,
4249    max_retries: u32,
4250    solana_args: Vec<String>,
4251) -> Result<()> {
4252    // Use our native upgrade implementation
4253    program::program_upgrade(
4254        cfg_override,
4255        program_id,
4256        Some(program_filepath),
4257        None, // program_name - not needed since we have filepath
4258        None, // buffer
4259        None, // upgrade_authority - uses wallet from config
4260        max_retries,
4261        solana_args,
4262    )
4263}
4264
4265fn migrate(cfg_override: &ConfigOverride) -> Result<()> {
4266    with_workspace(cfg_override, |cfg| -> Result<()> {
4267        println!("Running migration deploy script");
4268
4269        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4270        let cur_dir = std::env::current_dir()?;
4271        let migrations_dir = cur_dir.join("migrations");
4272        let deploy_ts = Path::new("deploy.ts");
4273
4274        let use_ts = Path::new("tsconfig.json").exists() && migrations_dir.join(deploy_ts).exists();
4275
4276        if !Path::new(".anchor").exists() {
4277            fs::create_dir(".anchor")?;
4278        }
4279        std::env::set_current_dir(".anchor")?;
4280
4281        let exit = if use_ts {
4282            let module_path = migrations_dir.join(deploy_ts);
4283            let deploy_script_host_str =
4284                rust_template::deploy_ts_script_host(&url, &module_path.display().to_string());
4285            fs::write(deploy_ts, deploy_script_host_str)?;
4286
4287            let pkg_manager_cmd = match &cfg.toolchain.package_manager {
4288                Some(pkg_manager) => pkg_manager.to_string(),
4289                None => PackageManager::default().to_string(),
4290            };
4291
4292            std::process::Command::new(pkg_manager_cmd)
4293                .args([
4294                    "run",
4295                    "ts-node",
4296                    &fs::canonicalize(deploy_ts)?.to_string_lossy(),
4297                ])
4298                .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
4299                .stdout(Stdio::inherit())
4300                .stderr(Stdio::inherit())
4301                .output()?
4302        } else {
4303            let deploy_js = deploy_ts.with_extension("js");
4304            let module_path = migrations_dir.join(&deploy_js);
4305            let deploy_script_host_str =
4306                rust_template::deploy_js_script_host(&url, &module_path.display().to_string());
4307            fs::write(&deploy_js, deploy_script_host_str)?;
4308
4309            std::process::Command::new("node")
4310                .arg(&deploy_js)
4311                .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
4312                .stdout(Stdio::inherit())
4313                .stderr(Stdio::inherit())
4314                .output()?
4315        };
4316
4317        if !exit.status.success() {
4318            eprintln!("Deploy failed.");
4319            std::process::exit(exit.status.code().unwrap());
4320        }
4321
4322        println!("Deploy complete.");
4323        Ok(())
4324    })?
4325}
4326
4327fn set_workspace_dir_or_exit() {
4328    // First try to find Anchor workspace
4329    let d = match Config::discover(&ConfigOverride::default()) {
4330        Err(err) => {
4331            println!("Workspace configuration error: {err}");
4332            std::process::exit(1);
4333        }
4334        Ok(d) => d,
4335    };
4336
4337    match d {
4338        None => {
4339            // No Anchor.toml found - check for Cargo workspace with Solana programs
4340            let current_dir = match std::env::current_dir() {
4341                Ok(dir) => dir,
4342                Err(_) => {
4343                    println!("Unable to determine current directory");
4344                    std::process::exit(1);
4345                }
4346            };
4347
4348            let cargo_toml_path = current_dir.join("Cargo.toml");
4349            if !cargo_toml_path.exists() {
4350                println!(
4351                    "Not in a Solana workspace. This command requires either Anchor.toml or a \
4352                     Cargo workspace with Solana programs."
4353                );
4354                std::process::exit(1);
4355            }
4356
4357            // Check if this is a workspace and has Solana programs
4358            match program::discover_solana_programs(None) {
4359                Ok(programs) if !programs.is_empty() => {
4360                    // Found Solana programs in Cargo workspace - stay in current directory
4361                    // (already in the right place)
4362                }
4363                _ => {
4364                    println!(
4365                        "Not in a Solana workspace. This command requires either Anchor.toml or a \
4366                         Cargo workspace with Solana programs."
4367                    );
4368                    std::process::exit(1);
4369                }
4370            }
4371        }
4372        Some(cfg) => {
4373            // Found Anchor.toml - change to workspace root
4374            match cfg.path().parent() {
4375                None => {
4376                    println!("Unable to make new program");
4377                }
4378                Some(parent) => {
4379                    if std::env::set_current_dir(parent).is_err() {
4380                        println!(
4381                            "Not in a Solana workspace. This command requires either Anchor.toml \
4382                             or a Cargo workspace with Solana programs."
4383                        );
4384                        std::process::exit(1);
4385                    }
4386                }
4387            };
4388        }
4389    }
4390}
4391
4392fn airdrop(cfg_override: &ConfigOverride, amount: f64, pubkey: Option<Pubkey>) -> Result<()> {
4393    // Get cluster URL and wallet path
4394    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
4395
4396    // Create RPC client
4397    let client = RpcClient::new(cluster_url);
4398
4399    // Determine recipient
4400    let recipient_pubkey = if let Some(pubkey) = pubkey {
4401        pubkey
4402    } else {
4403        // Load keypair from wallet path and get pubkey
4404        let keypair = Keypair::read_from_file(&wallet_path)
4405            .map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
4406        keypair.pubkey()
4407    };
4408
4409    // Convert SOL to lamports
4410    let lamports = (amount * 1_000_000_000.0) as u64;
4411
4412    // Request airdrop
4413    println!("Requesting airdrop of {} SOL...", amount);
4414    let signature = client
4415        .request_airdrop(&recipient_pubkey, lamports)
4416        .map_err(|e| anyhow!("Airdrop request failed: {}", e))?;
4417
4418    println!("Signature: {}", signature);
4419    println!("Waiting for confirmation...");
4420
4421    // Wait for confirmation
4422    client
4423        .confirm_transaction(&signature)
4424        .map_err(|e| anyhow!("Transaction confirmation failed: {}", e))?;
4425
4426    // Get and display the new balance
4427    let balance = client.get_balance(&recipient_pubkey)?;
4428    println!("{}", format_sol(balance));
4429
4430    Ok(())
4431}
4432
4433fn cluster(_cmd: ClusterCommand) -> Result<()> {
4434    println!("Cluster Endpoints:\n");
4435    println!("* Mainnet - https://api.mainnet-beta.solana.com");
4436    println!("* Devnet  - https://api.devnet.solana.com");
4437    println!("* Testnet - https://api.testnet.solana.com");
4438    Ok(())
4439}
4440
4441fn config_cmd(cfg_override: &ConfigOverride, cmd: ConfigCommand) -> Result<()> {
4442    match cmd {
4443        ConfigCommand::Get => config_get(cfg_override),
4444        ConfigCommand::Set { url, keypair } => config_set(cfg_override, url, keypair),
4445    }
4446}
4447
4448fn config_get(cfg_override: &ConfigOverride) -> Result<()> {
4449    with_workspace(cfg_override, |cfg| -> Result<()> {
4450        println!("Anchor Configuration:");
4451        println!();
4452        println!("Cluster: {}", cfg.provider.cluster.url());
4453        println!("Wallet:  {}", cfg.provider.wallet);
4454        Ok(())
4455    })?
4456}
4457
4458fn config_set(
4459    cfg_override: &ConfigOverride,
4460    url: Option<String>,
4461    keypair: Option<String>,
4462) -> Result<()> {
4463    // Find the Anchor.toml file
4464    let anchor_toml_path = match Config::discover(cfg_override)? {
4465        Some(cfg) => cfg.path().parent().unwrap().join("Anchor.toml"),
4466        None => bail!("Not in an Anchor workspace"),
4467    };
4468
4469    // Read the current Anchor.toml
4470    let mut toml_content =
4471        fs::read_to_string(&anchor_toml_path).context("Failed to read Anchor.toml")?;
4472    let mut toml_doc: toml::Value =
4473        toml::from_str(&toml_content).context("Failed to parse Anchor.toml")?;
4474
4475    let mut updated = false;
4476
4477    // Update cluster URL if provided
4478    if let Some(cluster_url) = url {
4479        let expanded_url = match cluster_url.as_str() {
4480            "m" => "https://api.mainnet-beta.solana.com".to_string(),
4481            "d" => "https://api.devnet.solana.com".to_string(),
4482            "t" => "https://api.testnet.solana.com".to_string(),
4483            "l" => "http://127.0.0.1:8899".to_string(),
4484            _ => cluster_url,
4485        };
4486
4487        if let Some(provider) = toml_doc.get_mut("provider").and_then(|v| v.as_table_mut()) {
4488            provider.insert(
4489                "cluster".to_string(),
4490                toml::Value::String(expanded_url.clone()),
4491            );
4492            println!("Updated cluster to: {}", expanded_url);
4493            updated = true;
4494        }
4495    }
4496
4497    // Update wallet path if provided
4498    if let Some(keypair_path) = keypair {
4499        let expanded_path = shellexpand::tilde(&keypair_path).to_string();
4500
4501        // Check if the wallet file exists
4502        if !Path::new(&expanded_path).exists() {
4503            eprintln!("Warning: Wallet file does not exist: {}", expanded_path);
4504        }
4505
4506        if let Some(provider) = toml_doc.get_mut("provider").and_then(|v| v.as_table_mut()) {
4507            provider.insert(
4508                "wallet".to_string(),
4509                toml::Value::String(expanded_path.clone()),
4510            );
4511            println!("Updated wallet to: {}", expanded_path);
4512            updated = true;
4513        }
4514    }
4515
4516    if updated {
4517        // Write the updated config back to Anchor.toml
4518        toml_content =
4519            toml::to_string_pretty(&toml_doc).context("Failed to serialize Anchor.toml")?;
4520        fs::write(&anchor_toml_path, toml_content).context("Failed to write Anchor.toml")?;
4521        println!("\nConfiguration updated successfully!");
4522    } else {
4523        println!("No changes made. Use --url or --keypair to update settings.");
4524    }
4525
4526    Ok(())
4527}
4528
4529fn shell(cfg_override: &ConfigOverride) -> Result<()> {
4530    with_workspace(cfg_override, |cfg| -> Result<()> {
4531        let programs = {
4532            // Create idl map from all workspace programs.
4533            let mut idls: HashMap<String, Idl> = cfg
4534                .read_all_programs()?
4535                .iter()
4536                .filter(|program| program.idl.is_some())
4537                .map(|program| {
4538                    (
4539                        program.idl.as_ref().unwrap().metadata.name.clone(),
4540                        program.idl.clone().unwrap(),
4541                    )
4542                })
4543                .collect();
4544            // Insert all manually specified idls into the idl map.
4545            if let Some(programs) = cfg.programs.get(&cfg.provider.cluster) {
4546                let _ = programs
4547                    .iter()
4548                    .map(|(name, pd)| {
4549                        if let Some(idl_fp) = &pd.idl {
4550                            let file_str =
4551                                fs::read_to_string(idl_fp).expect("Unable to read IDL file");
4552                            let idl = serde_json::from_str(&file_str).expect("Idl not readable");
4553                            idls.insert(name.clone(), idl);
4554                        }
4555                    })
4556                    .collect::<Vec<_>>();
4557            }
4558
4559            // Finalize program list with all programs with IDLs.
4560            match cfg.programs.get(&cfg.provider.cluster) {
4561                None => Vec::new(),
4562                Some(programs) => programs
4563                    .iter()
4564                    .filter_map(|(name, program_deployment)| {
4565                        Some(ProgramWorkspace {
4566                            name: name.to_string(),
4567                            program_id: program_deployment.address,
4568                            idl: match idls.get(name) {
4569                                None => return None,
4570                                Some(idl) => idl.clone(),
4571                            },
4572                        })
4573                    })
4574                    .collect::<Vec<ProgramWorkspace>>(),
4575            }
4576        };
4577        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4578        let js_code = rust_template::node_shell(&url, &cfg.provider.wallet.to_string(), programs)?;
4579        let mut child = std::process::Command::new("node")
4580            .args(["-e", &js_code, "-i", "--experimental-repl-await"])
4581            .stdout(Stdio::inherit())
4582            .stderr(Stdio::inherit())
4583            .spawn()
4584            .map_err(|e| anyhow::format_err!("{}", e))?;
4585
4586        if !child.wait()?.success() {
4587            println!("Error running node shell");
4588            return Ok(());
4589        }
4590        Ok(())
4591    })?
4592}
4593
4594fn run(cfg_override: &ConfigOverride, script: String, script_args: Vec<String>) -> Result<()> {
4595    with_workspace(cfg_override, |cfg| -> Result<()> {
4596        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4597        let script = cfg
4598            .scripts
4599            .get(&script)
4600            .ok_or_else(|| anyhow!("Unable to find script"))?;
4601        let script_with_args = format!("{script} {}", script_args.join(" "));
4602        let exit = std::process::Command::new("bash")
4603            .arg("-c")
4604            .arg(&script_with_args)
4605            .env("ANCHOR_PROVIDER_URL", url)
4606            .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
4607            .stdout(Stdio::inherit())
4608            .stderr(Stdio::inherit())
4609            .output()
4610            .unwrap();
4611        if !exit.status.success() {
4612            std::process::exit(exit.status.code().unwrap_or(1));
4613        }
4614        Ok(())
4615    })?
4616}
4617
4618fn keys(cfg_override: &ConfigOverride, cmd: KeysCommand) -> Result<()> {
4619    match cmd {
4620        KeysCommand::List => keys_list(cfg_override),
4621        KeysCommand::Sync { program_name } => keys_sync(cfg_override, program_name),
4622    }
4623}
4624
4625fn keys_list(cfg_override: &ConfigOverride) -> Result<()> {
4626    with_workspace(cfg_override, |cfg| -> Result<()> {
4627        for program in cfg.read_all_programs()? {
4628            let pubkey = program.pubkey()?;
4629            println!("{}: {}", program.lib_name, pubkey);
4630        }
4631        Ok(())
4632    })?
4633}
4634
4635/// Sync program `declare_id!` pubkeys with the pubkey from `target/deploy/<KEYPAIR>.json`.
4636fn keys_sync(cfg_override: &ConfigOverride, program_name: Option<String>) -> Result<()> {
4637    with_workspace(cfg_override, |cfg| -> Result<()> {
4638        let declare_id_regex = RegexBuilder::new(r#"^(([\w]+::)*)declare_id!\("(\w*)"\)"#)
4639            .multi_line(true)
4640            .build()
4641            .unwrap();
4642
4643        let cfg_cluster = cfg.provider.cluster.to_owned();
4644        println!("Syncing program ids for the configured cluster ({cfg_cluster})\n");
4645
4646        let mut changed_src = false;
4647        for program in cfg.get_programs(program_name)? {
4648            // Get the pubkey from the keypair file
4649            let actual_program_id = program.pubkey()?.to_string();
4650
4651            // Handle declaration in program files
4652            let src_path = program.path.join("src");
4653            let files_to_check = vec![src_path.join("lib.rs"), src_path.join("id.rs")];
4654
4655            for path in files_to_check {
4656                let mut content = match fs::read_to_string(&path) {
4657                    Ok(content) => content,
4658                    Err(_) => continue,
4659                };
4660
4661                let incorrect_program_id = declare_id_regex
4662                    .captures(&content)
4663                    .and_then(|captures| captures.get(3))
4664                    .filter(|program_id_match| program_id_match.as_str() != actual_program_id);
4665                if let Some(program_id_match) = incorrect_program_id {
4666                    println!("Found incorrect program id declaration in {path:?}");
4667
4668                    // Update the program id
4669                    content.replace_range(program_id_match.range(), &actual_program_id);
4670                    fs::write(&path, content)?;
4671
4672                    changed_src = true;
4673                    println!("Updated to {actual_program_id}\n");
4674                    break;
4675                }
4676            }
4677
4678            // Handle declaration in Anchor.toml
4679            'outer: for (cluster, programs) in &mut cfg.programs {
4680                // Only change if the configured cluster matches the program's cluster
4681                if cluster != &cfg_cluster {
4682                    continue;
4683                }
4684
4685                for (name, deployment) in programs {
4686                    // Skip other programs
4687                    if name != &program.lib_name {
4688                        continue;
4689                    }
4690
4691                    if deployment.address.to_string() != actual_program_id {
4692                        println!(
4693                            "Found incorrect program id declaration in Anchor.toml for the \
4694                             program `{name}`"
4695                        );
4696
4697                        // Update the program id
4698                        deployment.address = Pubkey::try_from(actual_program_id.as_str()).unwrap();
4699                        fs::write(cfg.path(), cfg.to_string())?;
4700
4701                        println!("Updated to {actual_program_id}\n");
4702                        break 'outer;
4703                    }
4704                }
4705            }
4706        }
4707
4708        println!("All program id declarations are synced.");
4709        if changed_src {
4710            println!("Please rebuild the program to update the generated artifacts.")
4711        }
4712
4713        Ok(())
4714    })?
4715}
4716
4717/// Check if there's a mismatch between the program keypair and the `declare_id!` in the source code.
4718/// Returns an error if a mismatch is detected, prompting the user to run `anchor keys sync`.
4719fn check_program_id_mismatch(cfg: &WithPath<Config>, program_name: Option<String>) -> Result<()> {
4720    let declare_id_regex = RegexBuilder::new(r#"^(([\w]+::)*)declare_id!\("(\w*)"\)"#)
4721        .multi_line(true)
4722        .build()
4723        .unwrap();
4724
4725    for program in cfg.get_programs(program_name)? {
4726        // Get the pubkey from the keypair file
4727        let actual_program_id = program.pubkey()?.to_string();
4728
4729        // Check declaration in program files
4730        let src_path = program.path.join("src");
4731        let files_to_check = vec![src_path.join("lib.rs"), src_path.join("id.rs")];
4732
4733        for path in files_to_check {
4734            let content = match fs::read_to_string(&path) {
4735                Ok(content) => content,
4736                Err(_) => continue,
4737            };
4738
4739            let incorrect_program_id = declare_id_regex
4740                .captures(&content)
4741                .and_then(|captures| captures.get(3))
4742                .filter(|program_id_match| program_id_match.as_str() != actual_program_id);
4743
4744            if let Some(program_id_match) = incorrect_program_id {
4745                let declared_id = program_id_match.as_str();
4746                return Err(anyhow!(
4747                    "Program ID mismatch detected for program '{}':\n  Keypair file has: {}\n  \
4748                     Source code has:  {}\n\nPlease run 'anchor keys sync' to update the program \
4749                     ID in your source code or use the '--ignore-keys' flag to skip this check.",
4750                    program.lib_name,
4751                    actual_program_id,
4752                    declared_id
4753                ));
4754            }
4755        }
4756    }
4757
4758    Ok(())
4759}
4760
4761#[allow(clippy::too_many_arguments)]
4762fn localnet(
4763    cfg_override: &ConfigOverride,
4764    skip_build: bool,
4765    skip_deploy: bool,
4766    skip_lint: bool,
4767    ignore_keys: bool,
4768    validator_type: ValidatorType,
4769    env_vars: Vec<String>,
4770    cargo_args: Vec<String>,
4771) -> Result<()> {
4772    with_workspace(cfg_override, |cfg| -> Result<()> {
4773        // Build if needed.
4774        if !skip_build {
4775            build(
4776                cfg_override,
4777                false,
4778                None,
4779                None,
4780                false,
4781                skip_lint,
4782                ignore_keys,
4783                None,
4784                None,
4785                None,
4786                BootstrapMode::None,
4787                None,
4788                None,
4789                env_vars,
4790                cargo_args,
4791                false,
4792            )?;
4793        }
4794
4795        let validator_handle: Option<Child> = match validator_type {
4796            ValidatorType::Surfpool => {
4797                let full_simnet_mode = true;
4798                let flags = Some(surfpool_flags(
4799                    cfg,
4800                    &cfg.surfpool_config,
4801                    full_simnet_mode,
4802                    skip_deploy,
4803                    None,
4804                )?);
4805                Some(start_surfpool_validator(
4806                    flags,
4807                    &cfg.surfpool_config,
4808                    full_simnet_mode,
4809                )?)
4810            }
4811            ValidatorType::Legacy => {
4812                let flags = match skip_deploy {
4813                    true => None,
4814                    false => Some(validator_flags(cfg, &cfg.test_validator)?),
4815                };
4816                Some(start_solana_test_validator(
4817                    cfg,
4818                    &cfg.test_validator,
4819                    flags,
4820                    false,
4821                )?)
4822            }
4823        };
4824
4825        // Setup log reader.
4826        let url = test_validator_rpc_url(&cfg.test_validator);
4827        let log_streams = match stream_logs(cfg, &url) {
4828            Ok(streams) => {
4829                println!(
4830                    "Log streams set up successfully ({} streams)",
4831                    streams.len()
4832                );
4833                Some(streams)
4834            }
4835            Err(e) => {
4836                eprintln!("Warning: Failed to setup program log streaming: {:#}", e);
4837                eprintln!("  Program logs will still be visible in the validator output.");
4838                None
4839            }
4840        };
4841
4842        std::io::stdin().lock().lines().next().unwrap().unwrap();
4843
4844        // Check all errors and shut down.
4845        if let Some(mut handle) = validator_handle {
4846            if let Err(err) = handle.kill() {
4847                println!("Failed to kill subprocess {}: {}", handle.id(), err);
4848            }
4849        }
4850
4851        // Explicitly shutdown log streams - closes WebSocket subscriptions
4852        if let Some(log_streams) = log_streams {
4853            for handle in log_streams {
4854                handle.shutdown();
4855            }
4856        }
4857
4858        Ok(())
4859    })?
4860}
4861
4862// with_workspace ensures the current working directory is always the top level
4863// workspace directory, i.e., where the `Anchor.toml` file is located, before
4864// and after the closure invocation.
4865//
4866// The closure passed into this function must never change the working directory
4867// to be outside the workspace. Doing so will have undefined behavior.
4868fn with_workspace<R>(
4869    cfg_override: &ConfigOverride,
4870    f: impl FnOnce(&mut WithPath<Config>) -> R,
4871) -> Result<R> {
4872    set_workspace_dir_or_exit();
4873
4874    let mut cfg = Config::discover(cfg_override)
4875        .map_err(|e| anyhow!("Workspace configuration error: {}", e))?
4876        .ok_or_else(|| anyhow!("This command requires an Anchor workspace."))?;
4877
4878    let r = f(&mut cfg);
4879
4880    set_workspace_dir_or_exit();
4881
4882    Ok(r)
4883}
4884
4885fn is_hidden(entry: &walkdir::DirEntry) -> bool {
4886    entry
4887        .file_name()
4888        .to_str()
4889        .map(|s| s == "." || s.starts_with('.') || s == "target")
4890        .unwrap_or(false)
4891}
4892
4893fn get_node_version() -> Result<Version> {
4894    let node_version = std::process::Command::new("node")
4895        .arg("--version")
4896        .stderr(Stdio::inherit())
4897        .output()
4898        .map_err(|e| anyhow::format_err!("node failed: {}", e))?;
4899    let output = std::str::from_utf8(&node_version.stdout)?
4900        .strip_prefix('v')
4901        .unwrap()
4902        .trim();
4903    Version::parse(output).map_err(Into::into)
4904}
4905
4906fn add_recommended_deployment_solana_args(
4907    client: &RpcClient,
4908    args: Vec<String>,
4909) -> Result<Vec<String>> {
4910    let mut augmented_args = args.clone();
4911
4912    // If no priority fee is provided, calculate a recommended fee based on recent txs.
4913    if !args.contains(&"--with-compute-unit-price".to_string()) {
4914        let priority_fee = get_recommended_micro_lamport_fee(client)?;
4915        augmented_args.push("--with-compute-unit-price".to_string());
4916        augmented_args.push(priority_fee.to_string());
4917    }
4918
4919    const DEFAULT_MAX_SIGN_ATTEMPTS: u8 = 30;
4920    if !args.contains(&"--max-sign-attempts".to_string()) {
4921        augmented_args.push("--max-sign-attempts".to_string());
4922        augmented_args.push(DEFAULT_MAX_SIGN_ATTEMPTS.to_string());
4923    }
4924
4925    // If no buffer keypair is provided, create a temporary one to reuse across deployments.
4926    // This is particularly useful for upgrading larger programs, which suffer from an increased
4927    // likelihood of some write transactions failing during any single deployment.
4928    if !args.contains(&"--buffer".to_owned()) {
4929        let tmp_keypair_path = std::env::temp_dir().join("anchor-upgrade-buffer.json");
4930        if !tmp_keypair_path.exists() {
4931            if let Err(err) = Keypair::new().write_to_file(&tmp_keypair_path) {
4932                return Err(anyhow!(
4933                    "Error creating keypair for buffer account, {:?}",
4934                    err
4935                ));
4936            }
4937        }
4938
4939        augmented_args.push("--buffer".to_owned());
4940        augmented_args.push(tmp_keypair_path.to_string_lossy().to_string());
4941    }
4942
4943    Ok(augmented_args)
4944}
4945
4946fn get_node_dns_option() -> Result<&'static str> {
4947    let version = get_node_version()?;
4948    let req = VersionReq::parse(">=16.4.0").unwrap();
4949    let option = match req.matches(&version) {
4950        true => "--dns-result-order=ipv4first",
4951        false => "",
4952    };
4953    Ok(option)
4954}
4955
4956// Remove the current workspace directory if it prefixes a string.
4957// This is used as a workaround for the Solana CLI using the uriparse crate to
4958// parse args but not handling percent encoding/decoding when using the path as
4959// a local filesystem path. Removing the workspace prefix handles most/all cases
4960// of spaces in keypair/binary paths, but this should be fixed in the Solana CLI
4961// and removed here.
4962fn strip_workspace_prefix(absolute_path: String) -> String {
4963    let workspace_prefix =
4964        std::env::current_dir().unwrap().display().to_string() + std::path::MAIN_SEPARATOR_STR;
4965    absolute_path
4966        .strip_prefix(&workspace_prefix)
4967        .unwrap_or(&absolute_path)
4968        .into()
4969}
4970
4971/// Create a new [`RpcClient`] with `confirmed` commitment level instead of the default(finalized).
4972fn create_client<U: ToString>(url: U) -> RpcClient {
4973    RpcClient::new_with_commitment(url, CommitmentConfig::confirmed())
4974}
4975
4976fn address(cfg_override: &ConfigOverride) -> Result<()> {
4977    let (_cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
4978
4979    // Load keypair and get pubkey
4980    let keypair = Keypair::read_from_file(&wallet_path)
4981        .map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
4982
4983    // Print the public key
4984    println!("{}", keypair.pubkey());
4985
4986    Ok(())
4987}
4988
4989fn balance(cfg_override: &ConfigOverride, pubkey: Option<Pubkey>, lamports: bool) -> Result<()> {
4990    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
4991
4992    // Create RPC client
4993    let client = RpcClient::new(cluster_url);
4994
4995    // Determine which account to check
4996    let account_pubkey = if let Some(pubkey) = pubkey {
4997        pubkey
4998    } else {
4999        // Load keypair from wallet path and get pubkey
5000        let keypair = Keypair::read_from_file(&wallet_path)
5001            .map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
5002        keypair.pubkey()
5003    };
5004
5005    // Get balance
5006    let balance = client.get_balance(&account_pubkey)?;
5007
5008    // Format and display output
5009    if lamports {
5010        println!("{}", balance);
5011    } else {
5012        println!("{}", format_sol(balance));
5013    }
5014
5015    Ok(())
5016}
5017
5018fn epoch(cfg_override: &ConfigOverride) -> Result<()> {
5019    let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
5020
5021    // Create RPC client
5022    let client = RpcClient::new(cluster_url);
5023
5024    // Get epoch info
5025    let epoch_info = client.get_epoch_info()?;
5026
5027    // Print just the epoch number
5028    println!("{}", epoch_info.epoch);
5029
5030    Ok(())
5031}
5032
5033fn epoch_info(cfg_override: &ConfigOverride) -> Result<()> {
5034    let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
5035
5036    // Create RPC client
5037    let client = RpcClient::new(cluster_url);
5038
5039    // Get epoch info
5040    let epoch_info = client.get_epoch_info()?;
5041
5042    // Calculate epoch slot range
5043    let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
5044    let last_slot_in_epoch = first_slot_in_epoch + epoch_info.slots_in_epoch;
5045
5046    // Calculate completion stats
5047    let epoch_completed_percent =
5048        epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100.0;
5049    let remaining_slots = epoch_info.slots_in_epoch - epoch_info.slot_index;
5050
5051    // Display epoch information (matching Solana CLI format)
5052    println!("Block height: {}", epoch_info.block_height);
5053    println!("Slot: {}", epoch_info.absolute_slot);
5054    println!("Epoch: {}", epoch_info.epoch);
5055
5056    if let Some(tx_count) = epoch_info.transaction_count {
5057        println!("Transaction Count: {}", tx_count);
5058    }
5059
5060    println!(
5061        "Epoch Slot Range: [{}..{})",
5062        first_slot_in_epoch, last_slot_in_epoch
5063    );
5064    println!("Epoch Completed Percent: {:>3.3}%", epoch_completed_percent);
5065    println!(
5066        "Epoch Completed Slots: {}/{} ({} remaining)",
5067        epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots
5068    );
5069
5070    // Try to calculate epoch completed time
5071    // Get average slot time from performance samples (aggregate up to 60 samples)
5072    if let Ok(samples) = client.get_recent_performance_samples(Some(60)) {
5073        // Aggregate all samples to calculate average slot time
5074        let (total_slots, total_secs) =
5075            samples.iter().fold((0u64, 0u64), |(slots, secs), sample| {
5076                (
5077                    slots.saturating_add(sample.num_slots),
5078                    secs.saturating_add(sample.sample_period_secs as u64),
5079                )
5080            });
5081
5082        if total_slots > 0 {
5083            let avg_slot_time_ms = (total_secs * 1000) / total_slots;
5084
5085            // Calculate time_remaining using average slot time (always estimated)
5086            let remaining_secs = (remaining_slots * avg_slot_time_ms) / 1000;
5087
5088            // Calculate time_elapsed - try actual block times first, then estimate
5089            // Get the first actual block in the epoch and adjust for slot differences
5090            let start_block_time = client
5091                .get_blocks_with_limit(first_slot_in_epoch, 1)
5092                .ok()
5093                .and_then(|slots| slots.first().cloned())
5094                .and_then(|first_actual_block| {
5095                    client.get_block_time(first_actual_block).ok().map(|time| {
5096                        // Adjust backwards if first actual block is after expected start
5097                        let slot_diff = first_actual_block.saturating_sub(first_slot_in_epoch);
5098                        let time_adjustment = (slot_diff * avg_slot_time_ms / 1000) as i64;
5099                        time.saturating_sub(time_adjustment)
5100                    })
5101                });
5102
5103            let current_block_time = client.get_block_time(epoch_info.absolute_slot).ok();
5104
5105            let (elapsed_secs, is_estimated) = if let (Some(start_time), Some(current_time)) =
5106                (start_block_time, current_block_time)
5107            {
5108                // Use actual block times for elapsed
5109                ((current_time - start_time) as u64, false)
5110            } else {
5111                // Estimate elapsed using average slot time
5112                ((epoch_info.slot_index * avg_slot_time_ms) / 1000, true)
5113            };
5114
5115            // Total time = elapsed + remaining
5116            let total_secs = elapsed_secs + remaining_secs;
5117
5118            let estimated_marker = if is_estimated { "*" } else { "" };
5119            println!(
5120                "Epoch Completed Time: {}{}/{} ({} remaining)",
5121                format_duration_secs(elapsed_secs),
5122                estimated_marker,
5123                format_duration_secs(total_secs),
5124                format_duration_secs(remaining_secs)
5125            );
5126        }
5127    }
5128
5129    Ok(())
5130}
5131
5132/// Format seconds into human-readable duration (e.g., "1day 5h 49m 8s")
5133fn format_duration_secs(total_seconds: u64) -> String {
5134    let seconds = total_seconds % 60;
5135    let total_minutes = total_seconds / 60;
5136    let minutes = total_minutes % 60;
5137    let total_hours = total_minutes / 60;
5138    let hours = total_hours % 24;
5139    let days = total_hours / 24;
5140
5141    let mut parts = Vec::new();
5142    if days > 0 {
5143        parts.push(format!("{}day", days));
5144    }
5145    if hours > 0 {
5146        parts.push(format!("{}h", hours));
5147    }
5148    if minutes > 0 {
5149        parts.push(format!("{}m", minutes));
5150    }
5151    if seconds > 0 || parts.is_empty() {
5152        parts.push(format!("{}s", seconds));
5153    }
5154
5155    parts.join(" ")
5156}
5157
5158fn logs_subscribe(
5159    cfg_override: &ConfigOverride,
5160    include_votes: bool,
5161    address: Option<Vec<Pubkey>>,
5162) -> Result<()> {
5163    let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
5164
5165    // Convert HTTP(S) URL to WebSocket URL
5166    let ws_url = if cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1") {
5167        // Parse the URL to extract and increment the port
5168        cluster_url
5169            .replace("https://", "wss://")
5170            .replace("http://", "ws://")
5171            .replace(":8899", ":8900") // Default test validator ports
5172    } else {
5173        cluster_url
5174            .replace("https://", "wss://")
5175            .replace("http://", "ws://")
5176    };
5177
5178    println!("Connecting to {}", ws_url);
5179
5180    let filter = match (include_votes, address) {
5181        (true, Some(address)) => {
5182            RpcTransactionLogsFilter::Mentions(address.iter().map(|p| p.to_string()).collect())
5183        }
5184        (true, None) => RpcTransactionLogsFilter::AllWithVotes,
5185        (false, Some(address)) => {
5186            RpcTransactionLogsFilter::Mentions(address.iter().map(|p| p.to_string()).collect())
5187        }
5188        (false, None) => RpcTransactionLogsFilter::All,
5189    };
5190
5191    let (_client, receiver) = PubsubClient::logs_subscribe(
5192        &ws_url,
5193        filter,
5194        RpcTransactionLogsConfig {
5195            commitment: cfg_override.commitment.map(|c| CommitmentConfig {
5196                commitment: c.into(),
5197            }),
5198        },
5199    )?;
5200
5201    loop {
5202        match receiver.recv() {
5203            Ok(logs) => {
5204                println!("Transaction executed in slot {}:", logs.context.slot);
5205                println!("  Signature: {}", logs.value.signature);
5206                println!(
5207                    "  Status: {}",
5208                    logs.value
5209                        .err
5210                        .map(|err| err.to_string())
5211                        .unwrap_or_else(|| "Ok".to_string())
5212                );
5213                println!("  Log Messages:");
5214                for log in logs.value.logs {
5215                    println!("    {log}");
5216                }
5217            }
5218            Err(err) => {
5219                return Err(anyhow!("Disconnected: {err}"));
5220            }
5221        }
5222    }
5223}
5224
5225#[cfg(test)]
5226mod tests {
5227    use super::*;
5228
5229    #[test]
5230    #[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
5231    fn test_init_reserved_word() {
5232        init(
5233            &ConfigOverride {
5234                cluster: None,
5235                wallet: None,
5236                commitment: None,
5237            },
5238            "await".to_string(),
5239            true,
5240            true,
5241            PackageManager::default(),
5242            false,
5243            ProgramTemplate::default(),
5244            TestTemplate::default(),
5245            false,
5246            true,
5247        )
5248        .unwrap();
5249    }
5250
5251    #[test]
5252    #[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
5253    fn test_init_reserved_word_from_syn() {
5254        init(
5255            &ConfigOverride {
5256                cluster: None,
5257                wallet: None,
5258                commitment: None,
5259            },
5260            "fn".to_string(),
5261            true,
5262            true,
5263            PackageManager::default(),
5264            false,
5265            ProgramTemplate::default(),
5266            TestTemplate::default(),
5267            false,
5268            true,
5269        )
5270        .unwrap();
5271    }
5272
5273    #[test]
5274    #[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
5275    fn test_init_starting_with_digit() {
5276        init(
5277            &ConfigOverride {
5278                cluster: None,
5279                wallet: None,
5280                commitment: None,
5281            },
5282            "1project".to_string(),
5283            true,
5284            true,
5285            PackageManager::default(),
5286            false,
5287            ProgramTemplate::default(),
5288            TestTemplate::default(),
5289            false,
5290            true,
5291        )
5292        .unwrap();
5293    }
5294}