Skip to main content

anchor_cli/
lib.rs

1use {
2    crate::config::{
3        get_default_ledger_path, BootstrapMode, BuildConfig, Config, ConfigOverride, HookType,
4        Manifest, PackageManager, ProgramDeployment, ProgramWorkspace, ScriptsConfig,
5        SurfnetInfoResponse, SurfpoolConfig, TestValidator, ValidatorType, WithPath, SHUTDOWN_WAIT,
6        STARTUP_WAIT, SURFPOOL_HOST,
7    },
8    anchor_client::Cluster,
9    anchor_lang::{
10        prelude::UpgradeableLoaderState, solana_program::bpf_loader_upgradeable, AnchorDeserialize,
11    },
12    anchor_lang_idl::{
13        convert::convert_idl,
14        types::{Idl, IdlArrayLen, IdlDefinedFields, IdlType, IdlTypeDefTy},
15    },
16    anyhow::{anyhow, bail, Context, Result},
17    checks::{check_anchor_version, check_deps, check_idl_build_feature, check_overflow},
18    clap::{CommandFactory, Parser},
19    dirs::home_dir,
20    heck::{ToKebabCase, ToLowerCamelCase, ToPascalCase, ToSnakeCase},
21    regex::{Regex, RegexBuilder},
22    rust_template::{ProgramTemplate, TestTemplate},
23    semver::{Version, VersionReq},
24    serde_json::{json, Map, Value as JsonValue},
25    solana_cli_config::Config as SolanaCliConfig,
26    solana_commitment_config::CommitmentConfig,
27    solana_compute_budget_interface::ComputeBudgetInstruction,
28    solana_instruction::Instruction,
29    solana_keypair::Keypair,
30    solana_pubkey::Pubkey,
31    solana_pubsub_client::pubsub_client::{PubsubClient, PubsubClientSubscription},
32    solana_rpc_client::rpc_client::RpcClient,
33    solana_rpc_client_api::{
34        config::{RpcTransactionLogsConfig, RpcTransactionLogsFilter},
35        request::RpcRequest,
36        response::{Response as RpcResponse, RpcLogsResponse},
37    },
38    solana_signer::{EncodableKey, Signer},
39    std::{
40        collections::{BTreeMap, HashMap, HashSet},
41        ffi::OsString,
42        fs::{self, File},
43        io::prelude::*,
44        path::{Path, PathBuf},
45        process::{Child, Stdio},
46        string::ToString,
47        sync::LazyLock,
48    },
49};
50
51mod account;
52mod checks;
53pub mod config;
54mod keygen;
55mod metadata;
56mod program;
57pub mod rust_template;
58
59// Version of the docker image.
60pub const VERSION: &str = env!("CARGO_PKG_VERSION");
61pub const DOCKER_BUILDER_VERSION: &str = VERSION;
62/// Default RPC port
63pub const DEFAULT_RPC_PORT: u16 = 8899;
64
65/// WebSocket port offset for solana-test-validator (RPC port + 1)
66pub const WEBSOCKET_PORT_OFFSET: u16 = 1;
67
68pub static AVM_HOME: LazyLock<PathBuf> = LazyLock::new(|| {
69    if let Ok(avm_home) = std::env::var("AVM_HOME") {
70        PathBuf::from(avm_home)
71    } else {
72        let mut user_home = dirs::home_dir().expect("Could not find home directory");
73        user_home.push(".avm");
74        user_home
75    }
76});
77
78#[derive(Debug, Parser)]
79#[clap(version = VERSION)]
80pub struct Opts {
81    #[clap(flatten)]
82    pub cfg_override: ConfigOverride,
83    #[clap(subcommand)]
84    pub command: Command,
85}
86
87#[derive(Debug, Parser)]
88pub enum Command {
89    /// Initializes a workspace.
90    Init {
91        /// Workspace name
92        name: String,
93        /// Use JavaScript instead of TypeScript
94        #[clap(short, long)]
95        javascript: bool,
96        /// Don't install JavaScript dependencies
97        #[clap(long)]
98        no_install: bool,
99        /// Package Manager to use
100        #[clap(value_enum, long, default_value = "yarn")]
101        package_manager: PackageManager,
102        /// Don't initialize git
103        #[clap(long)]
104        no_git: bool,
105        /// Rust program template to use
106        #[clap(value_enum, short, long, default_value = "multiple")]
107        template: ProgramTemplate,
108        /// Test template to use
109        #[clap(value_enum, long, default_value = "litesvm")]
110        test_template: TestTemplate,
111        /// Initialize even if there are files
112        #[clap(long, action)]
113        force: bool,
114        /// Install Solana agent skills
115        #[clap(long)]
116        install_agent_skills: bool,
117    },
118    /// Builds the workspace.
119    #[clap(name = "build", alias = "b")]
120    Build {
121        /// True if the build should not fail even if there are no "CHECK" comments
122        #[clap(long)]
123        skip_lint: bool,
124        /// Skip checking for program ID mismatch between keypair and declare_id
125        #[clap(long)]
126        ignore_keys: bool,
127        /// Do not build the IDL
128        #[clap(long)]
129        no_idl: bool,
130        /// Output directory for the IDL.
131        #[clap(short, long)]
132        idl: Option<String>,
133        /// Output directory for the TypeScript IDL.
134        #[clap(short = 't', long)]
135        idl_ts: Option<String>,
136        /// True if the build artifact needs to be deterministic and verifiable.
137        #[clap(short, long)]
138        verifiable: bool,
139        /// Name of the program to build
140        #[clap(short, long)]
141        program_name: Option<String>,
142        /// Version of the Solana toolchain to use. For --verifiable builds
143        /// only.
144        #[clap(short, long)]
145        solana_version: Option<String>,
146        /// Docker image to use. For --verifiable builds only.
147        #[clap(short, long)]
148        docker_image: Option<String>,
149        /// Bootstrap docker image from scratch, installing all requirements for
150        /// verifiable builds. Only works for debian-based images.
151        #[clap(value_enum, short, long, default_value = "none")]
152        bootstrap: BootstrapMode,
153        /// Environment variables to pass into the docker container
154        #[clap(short, long, required = false)]
155        env: Vec<String>,
156        /// Arguments to pass to the underlying `cargo build-sbf` command
157        #[clap(required = false, last = true)]
158        cargo_args: Vec<String>,
159        /// Suppress doc strings in IDL output
160        #[clap(long)]
161        no_docs: bool,
162    },
163    /// Expands macros (wrapper around cargo expand)
164    ///
165    /// Use it in a program folder to expand program
166    ///
167    /// Use it in a workspace but outside a program
168    /// folder to expand the entire workspace
169    Expand {
170        /// Expand only this program
171        #[clap(short, long)]
172        program_name: Option<String>,
173        /// Arguments to pass to the underlying `cargo expand` command
174        #[clap(required = false, last = true)]
175        cargo_args: Vec<String>,
176    },
177    /// Verifies the on-chain bytecode matches the locally compiled artifact.
178    /// Run this command inside a program subdirectory, i.e., in the dir
179    /// containing the program's Cargo.toml.
180    Verify {
181        /// The program ID to verify.
182        program_id: Pubkey,
183        /// The URL of the repository to verify against. Conflicts with `--current-dir`.
184        #[clap(long, conflicts_with = "current_dir")]
185        repo_url: Option<String>,
186        /// The commit hash to verify against. Requires `--repo-url`.
187        #[clap(long, requires = "repo_url")]
188        commit_hash: Option<String>,
189        /// Verify against the source code in the current directory. Conflicts with `--repo-url`.
190        #[clap(long)]
191        current_dir: bool,
192        /// Name of the program to run the command on. Defaults to the package name.
193        #[clap(long)]
194        program_name: Option<String>,
195        /// Any additional arguments to pass to `solana-verify`.
196        #[clap(raw = true)]
197        args: Vec<String>,
198    },
199    #[clap(name = "test", alias = "t")]
200    /// Runs integration tests.
201    Test {
202        /// Build and test only this program
203        #[clap(short, long)]
204        program_name: Option<String>,
205        /// Use this flag if you want to run tests against previously deployed
206        /// programs.
207        #[clap(long)]
208        skip_deploy: bool,
209        /// True if the build should not fail even if there are
210        /// no "CHECK" comments where normally required
211        #[clap(long)]
212        skip_lint: bool,
213        /// Flag to skip starting a local validator, if the configured cluster
214        /// url is a localnet.
215        #[clap(long)]
216        skip_local_validator: bool,
217        /// Flag to skip building the program in the workspace,
218        /// use this to save time when running test and the program code is not altered.
219        #[clap(long)]
220        skip_build: bool,
221        /// Do not build the IDL
222        #[clap(long)]
223        no_idl: bool,
224        /// Flag to keep the local validator running after tests
225        /// to be able to check the transactions.
226        #[clap(long)]
227        detach: bool,
228        /// Run the test suites under the specified path
229        #[clap(long)]
230        run: Vec<String>,
231        /// Validator type to use for local testing
232        #[clap(value_enum, long, default_value = "surfpool")]
233        validator: ValidatorType,
234        args: Vec<String>,
235        /// Environment variables to pass into the docker container
236        #[clap(short, long, required = false)]
237        env: Vec<String>,
238        /// Arguments to pass to the underlying `cargo build-sbf` command.
239        #[clap(required = false, last = true)]
240        cargo_args: Vec<String>,
241    },
242    /// Creates a new program.
243    New {
244        /// Program name
245        name: String,
246        /// Rust program template to use
247        #[clap(value_enum, short, long, default_value = "multiple")]
248        template: ProgramTemplate,
249        /// Create new program even if there is already one
250        #[clap(long, action)]
251        force: bool,
252    },
253    /// Commands for interacting with interface definitions.
254    Idl {
255        #[clap(subcommand)]
256        subcmd: IdlCommand,
257    },
258    /// Remove all artifacts from the generated directories except program keypairs.
259    Clean,
260    /// Deploys each program in the workspace.
261    #[clap(hide = true)]
262    #[deprecated(since = "0.32.0", note = "use `anchor program deploy` instead")]
263    Deploy {
264        /// Only deploy this program
265        #[clap(short, long)]
266        program_name: Option<String>,
267        /// Keypair of the program (filepath) (requires program-name)
268        #[clap(long, requires = "program_name")]
269        program_keypair: Option<String>,
270        /// If true, deploy from path target/verifiable
271        #[clap(short, long)]
272        verifiable: bool,
273        /// Don't upload IDL during deployment (IDL is uploaded by default)
274        #[clap(long)]
275        no_idl: bool,
276        /// Arguments to pass to the underlying `solana program deploy` command.
277        #[clap(required = false, last = true)]
278        solana_args: Vec<String>,
279    },
280    /// Runs the deploy migration script.
281    Migrate,
282    /// Deploys, initializes an IDL, and migrates all in one command.
283    /// Upgrades a single program. The configured wallet must be the upgrade
284    /// authority.
285    #[clap(hide = true)]
286    #[deprecated(since = "0.32.0", note = "use `anchor program upgrade` instead")]
287    Upgrade {
288        /// The program to upgrade.
289        #[clap(short, long)]
290        program_id: Pubkey,
291        /// Filepath to the new program binary.
292        program_filepath: String,
293        /// Max times to retry on failure.
294        #[clap(long, default_value = "0")]
295        max_retries: u32,
296        /// Arguments to pass to the underlying `solana program deploy` command.
297        #[clap(required = false, last = true)]
298        solana_args: Vec<String>,
299    },
300    /// Request an airdrop of SOL
301    Airdrop {
302        /// Amount of SOL to airdrop
303        amount: f64,
304        /// Recipient address (defaults to configured wallet)
305        pubkey: Option<Pubkey>,
306    },
307    /// Cluster commands.
308    Cluster {
309        #[clap(subcommand)]
310        subcmd: ClusterCommand,
311    },
312    /// Configuration management commands.
313    Config {
314        #[clap(subcommand)]
315        subcmd: ConfigCommand,
316    },
317    /// Starts a node shell with an Anchor client setup according to the local
318    /// config.
319    Shell,
320    /// Runs the script defined by the current workspace's Anchor.toml.
321    Run {
322        /// The name of the script to run.
323        script: String,
324        /// Argument to pass to the underlying script.
325        #[clap(required = false, last = true)]
326        script_args: Vec<String>,
327    },
328    /// Program keypair commands.
329    Keys {
330        #[clap(subcommand)]
331        subcmd: KeysCommand,
332    },
333    /// Localnet commands.
334    Localnet {
335        /// Flag to skip building the program in the workspace,
336        /// use this to save time when running test and the program code is not altered.
337        #[clap(long)]
338        skip_build: bool,
339        /// Use this flag if you want to run tests against previously deployed
340        /// programs.
341        #[clap(long)]
342        skip_deploy: bool,
343        /// True if the build should not fail even if there are
344        /// no "CHECK" comments where normally required
345        #[clap(long)]
346        skip_lint: bool,
347        /// Skip checking for program ID mismatch between keypair and declare_id
348        #[clap(long)]
349        ignore_keys: bool,
350        /// Validator type to use for local testing
351        #[clap(value_enum, long, default_value = "surfpool")]
352        validator: ValidatorType,
353        /// Environment variables to pass into the docker container
354        #[clap(short, long, required = false)]
355        env: Vec<String>,
356        /// Arguments to pass to the underlying `cargo build-sbf` command.
357        #[clap(required = false, last = true)]
358        cargo_args: Vec<String>,
359    },
360    /// Fetch and deserialize an account using the IDL provided.
361    Account {
362        /// Account struct to deserialize (format: <program_name>.<Account>)
363        account_type: String,
364        /// Address of the account to deserialize
365        address: Pubkey,
366        /// IDL to use (defaults to workspace IDL)
367        #[clap(long)]
368        idl: Option<String>,
369    },
370    /// Generates shell completions.
371    Completions {
372        #[clap(value_enum)]
373        shell: clap_complete::Shell,
374    },
375    /// Get your public key
376    Address,
377    /// Get your balance
378    Balance {
379        /// Account to check balance for (defaults to configured wallet)
380        pubkey: Option<Pubkey>,
381        /// Display balance in lamports instead of SOL
382        #[clap(long)]
383        lamports: bool,
384    },
385    /// Get current epoch
386    Epoch,
387    /// Get information about the current epoch
388    #[clap(name = "epoch-info")]
389    EpochInfo,
390    /// Stream transaction logs
391    Logs {
392        /// Include vote transactions when monitoring all transactions
393        #[clap(long)]
394        include_votes: bool,
395        /// Addresses to filter logs by
396        #[clap(long)]
397        address: Option<Vec<Pubkey>>,
398    },
399    /// Show the contents of an account
400    ShowAccount {
401        #[clap(flatten)]
402        cmd: account::ShowAccountCommand,
403    },
404    /// Keypair generation and management
405    Keygen {
406        #[clap(subcommand)]
407        subcmd: KeygenCommand,
408    },
409    /// Program deployment and management commands
410    Program {
411        #[clap(subcommand)]
412        subcmd: ProgramCommand,
413    },
414}
415
416#[derive(Debug, Parser)]
417pub enum KeygenCommand {
418    /// Generate a new keypair
419    New {
420        /// Path to generated keypair file
421        #[clap(short = 'o', long)]
422        outfile: Option<String>,
423        /// Overwrite the output file if it exists
424        #[clap(short, long)]
425        force: bool,
426        /// Do not prompt for a passphrase
427        #[clap(long)]
428        no_passphrase: bool,
429        /// Do not display the generated pubkey
430        #[clap(long)]
431        silent: bool,
432        /// Number of words in the mnemonic phrase [possible values: 12, 15, 18, 21, 24]
433        #[clap(short = 'w', long, default_value = "12")]
434        word_count: usize,
435    },
436    /// Display the pubkey for a given keypair
437    Pubkey {
438        /// Keypair filepath
439        keypair: Option<String>,
440    },
441    /// Recover a keypair from a seed phrase
442    Recover {
443        /// Path to recovered keypair file
444        #[clap(short = 'o', long)]
445        outfile: Option<String>,
446        /// Overwrite the output file if it exists
447        #[clap(short, long)]
448        force: bool,
449        /// Skip seed phrase validation
450        #[clap(long)]
451        skip_seed_phrase_validation: bool,
452        /// Do not prompt for a passphrase
453        #[clap(long)]
454        no_passphrase: bool,
455    },
456    /// Verify a keypair can sign and verify a message
457    Verify {
458        /// Public key to verify
459        pubkey: Pubkey,
460        /// Keypair filepath (defaults to configured wallet)
461        keypair: Option<String>,
462    },
463}
464
465#[derive(Debug, Parser)]
466pub enum KeysCommand {
467    /// List all of the program keys.
468    List,
469    /// Sync program `declare_id!` pubkeys with the program's actual pubkey.
470    Sync {
471        /// Only sync the given program instead of all programs
472        #[clap(short, long)]
473        program_name: Option<String>,
474    },
475}
476
477#[derive(Debug, Parser)]
478pub enum ProgramCommand {
479    /// Deploy an upgradeable program
480    Deploy {
481        /// Program filepath (e.g., target/deploy/my_program.so).
482        /// If not provided, discovers programs from workspace
483        program_filepath: Option<String>,
484        /// Program name to deploy (from workspace). Used when program_filepath is not provided
485        #[clap(short, long)]
486        program_name: Option<String>,
487        /// Program keypair filepath (defaults to target/deploy/{program_name}-keypair.json)
488        #[clap(long)]
489        program_keypair: Option<String>,
490        /// Upgrade authority keypair (defaults to configured wallet)
491        #[clap(long)]
492        upgrade_authority: Option<String>,
493        /// Program id to deploy to (derived from program-keypair if not specified)
494        #[clap(long)]
495        program_id: Option<Pubkey>,
496        /// Buffer account to use for deployment
497        #[clap(long)]
498        buffer: Option<Pubkey>,
499        /// Maximum transaction length (BPF loader upgradeable limit)
500        #[clap(long)]
501        max_len: Option<usize>,
502        /// Don't upload IDL during deployment (IDL is uploaded by default)
503        #[clap(long)]
504        no_idl: bool,
505        /// Make the program immutable after deployment (cannot be upgraded)
506        #[clap(long = "final")]
507        make_final: bool,
508        /// Additional arguments to configure deployment (e.g., --with-compute-unit-price 1000)
509        #[clap(required = false, last = true)]
510        solana_args: Vec<String>,
511    },
512    /// Write a program into a buffer account
513    WriteBuffer {
514        /// Program filepath (e.g., target/deploy/my_program.so).
515        /// If not provided, discovers program from workspace using program_name
516        program_filepath: Option<String>,
517        /// Program name to write (from workspace). Used when program_filepath is not provided
518        #[clap(short, long)]
519        program_name: Option<String>,
520        /// Buffer account keypair (defaults to new keypair)
521        #[clap(long)]
522        buffer: Option<String>,
523        /// Buffer authority (defaults to configured wallet)
524        #[clap(long)]
525        buffer_authority: Option<String>,
526        /// Maximum transaction length
527        #[clap(long)]
528        max_len: Option<usize>,
529    },
530    /// Set a new buffer authority
531    SetBufferAuthority {
532        /// Buffer account address
533        buffer: Pubkey,
534        /// New buffer authority
535        new_buffer_authority: Pubkey,
536    },
537    /// Set a new program authority
538    SetUpgradeAuthority {
539        /// Program id
540        program_id: Pubkey,
541        /// New upgrade authority pubkey
542        #[clap(long)]
543        new_upgrade_authority: Option<Pubkey>,
544        /// New upgrade authority signer (keypair file). Required unless --skip-new-upgrade-authority-signer-check is used.
545        /// When provided, both current and new authority will sign (checked mode, recommended)
546        #[clap(long)]
547        new_upgrade_authority_signer: Option<String>,
548        /// Skip new upgrade authority signer check. Allows setting authority with only current authority signature.
549        /// WARNING: Less safe - use only if you're confident the pubkey is correct
550        #[clap(long)]
551        skip_new_upgrade_authority_signer_check: bool,
552        /// Make the program immutable (cannot be upgraded)
553        #[clap(long = "final")]
554        make_final: bool,
555        /// Current upgrade authority keypair (defaults to configured wallet)
556        #[clap(long)]
557        upgrade_authority: Option<String>,
558    },
559    /// Display information about a buffer or program
560    Show {
561        /// Account address (buffer or program)
562        account: Pubkey,
563        /// Get account information from the Solana config file
564        #[clap(long)]
565        get_programs: bool,
566        /// Get account information from the Solana config file
567        #[clap(long)]
568        get_buffers: bool,
569        /// Show all accounts
570        #[clap(long)]
571        all: bool,
572    },
573    /// Upgrade an upgradeable program
574    Upgrade {
575        /// Program id to upgrade
576        program_id: Pubkey,
577        /// Program filepath (e.g., target/deploy/my_program.so). If not provided, discovers from workspace
578        #[clap(long)]
579        program_filepath: Option<String>,
580        /// Program name to upgrade (from workspace). Used when program_filepath is not provided
581        #[clap(short, long)]
582        program_name: Option<String>,
583        /// Existing buffer account to upgrade from. If not provided, auto-discovers program from workspace
584        #[clap(long)]
585        buffer: Option<Pubkey>,
586        /// Upgrade authority (defaults to configured wallet)
587        #[clap(long)]
588        upgrade_authority: Option<String>,
589        /// Max times to retry on failure
590        #[clap(long, default_value = "0")]
591        max_retries: u32,
592        /// Additional arguments to configure deployment (e.g., --with-compute-unit-price 1000)
593        #[clap(required = false, last = true)]
594        solana_args: Vec<String>,
595    },
596    /// Write the program data to a file
597    Dump {
598        /// Program account address
599        account: Pubkey,
600        /// Output file path
601        output_file: String,
602    },
603    /// Close a program or buffer account and withdraw all lamports
604    Close {
605        /// Account address to close (buffer or program).
606        /// If not provided, discovers program from workspace using program_name
607        account: Option<Pubkey>,
608        /// Program name to close (from workspace). Used when account is not provided
609        #[clap(short, long)]
610        program_name: Option<String>,
611        /// Authority keypair (defaults to configured wallet)
612        #[clap(long)]
613        authority: Option<String>,
614        /// Recipient address for reclaimed lamports (defaults to authority)
615        #[clap(long)]
616        recipient: Option<Pubkey>,
617        /// Bypass warning prompts
618        #[clap(long)]
619        bypass_warning: bool,
620    },
621    /// Extend the length of an upgradeable program
622    Extend {
623        /// Program id to extend.
624        /// If not provided, discovers program from workspace using program_name
625        program_id: Option<Pubkey>,
626        /// Program name to extend (from workspace). Used when program_id is not provided
627        #[clap(short, long)]
628        program_name: Option<String>,
629        /// Additional bytes to allocate
630        additional_bytes: usize,
631    },
632}
633
634#[derive(Debug, Parser)]
635pub enum IdlCommand {
636    /// Initializes a program's IDL account. Can only be run once.
637    Init {
638        /// Program id to initialize IDL for.
639        /// If not provided, discovers program ID from IDL.
640        program_id: Option<Pubkey>,
641        #[clap(short, long)]
642        filepath: String,
643        #[clap(long)]
644        priority_fee: Option<u64>,
645        /// Create non-canonical metadata account (third-party metadata)
646        #[clap(long)]
647        non_canonical: bool,
648        /// Allow running against a localnet cluster (disabled by default)
649        #[clap(long)]
650        #[cfg(feature = "idl-localnet-testing")]
651        allow_localnet: bool,
652    },
653    /// Upgrades the IDL to the new file. An alias for first writing and then
654    /// then setting the idl buffer account.
655    Upgrade {
656        /// Program id to upgrade IDL for.
657        /// If not provided, discovers program ID from IDL.
658        program_id: Option<Pubkey>,
659        #[clap(short, long)]
660        filepath: String,
661        #[clap(long)]
662        priority_fee: Option<u64>,
663        /// Allow running against a localnet cluster (disabled by default)
664        #[clap(long)]
665        #[cfg(feature = "idl-localnet-testing")]
666        allow_localnet: bool,
667    },
668    /// Generates the IDL for the program using the compilation method.
669    #[clap(alias = "b")]
670    Build {
671        // Program name to build the IDL of(current dir's program if not specified)
672        #[clap(short, long)]
673        program_name: Option<String>,
674        /// Output file for the IDL (stdout if not specified)
675        #[clap(short, long)]
676        out: Option<String>,
677        /// Output file for the TypeScript IDL
678        #[clap(short = 't', long)]
679        out_ts: Option<String>,
680        /// Suppress doc strings in output
681        #[clap(long)]
682        no_docs: bool,
683        /// Do not check for safety comments
684        #[clap(long)]
685        skip_lint: bool,
686        /// Arguments to pass to the underlying `cargo test` command
687        #[clap(required = false, last = true)]
688        cargo_args: Vec<String>,
689    },
690    /// Fetches an IDL for the given program from a cluster.
691    Fetch {
692        program_id: Pubkey,
693        /// Output file for the IDL (stdout if not specified).
694        #[clap(short, long)]
695        out: Option<String>,
696        /// Fetch non-canonical metadata account (third-party metadata)
697        #[clap(long)]
698        non_canonical: bool,
699    },
700    /// Convert legacy IDLs (pre Anchor 0.30) to the new IDL spec
701    Convert {
702        /// Path to the IDL file
703        path: String,
704        /// Output file for the IDL (stdout if not specified)
705        #[clap(short, long)]
706        out: Option<String>,
707        /// Program id to initialize IDL for.
708        /// If not provided, discovers program ID from IDL.
709        #[clap(short, long)]
710        program_id: Option<Pubkey>,
711    },
712    /// Generate TypeScript type for the IDL
713    Type {
714        /// Path to the IDL file
715        path: String,
716        /// Output file for the IDL (stdout if not specified)
717        #[clap(short, long)]
718        out: Option<String>,
719    },
720    /// Close a metadata account and recover rent
721    Close {
722        /// The program ID
723        program_id: Pubkey,
724        /// The seed used for the metadata account (default: "idl")
725        #[clap(long, default_value = "idl")]
726        seed: String,
727        /// Priority fees in micro-lamports per compute unit
728        #[clap(long)]
729        priority_fee: Option<u64>,
730    },
731    /// Create a buffer account for metadata
732    CreateBuffer {
733        /// Path to the metadata file
734        #[clap(short, long)]
735        filepath: String,
736        /// Priority fees in micro-lamports per compute unit
737        #[clap(long)]
738        priority_fee: Option<u64>,
739    },
740    /// Set a new authority on a buffer account
741    SetBufferAuthority {
742        /// The buffer account address
743        buffer: Pubkey,
744        /// The new authority
745        #[clap(short, long)]
746        new_authority: Pubkey,
747        /// Priority fees in micro-lamports per compute unit
748        #[clap(long)]
749        priority_fee: Option<u64>,
750    },
751    /// Write metadata using a buffer account
752    WriteBuffer {
753        /// The program ID
754        program_id: Pubkey,
755        /// The buffer account address
756        #[clap(short, long)]
757        buffer: Pubkey,
758        /// The seed to use for the metadata account (default: "idl")
759        #[clap(long, default_value = "idl")]
760        seed: String,
761        /// Close the buffer after writing
762        #[clap(long)]
763        close_buffer: bool,
764        /// Priority fees in micro-lamports per compute unit
765        #[clap(long)]
766        priority_fee: Option<u64>,
767    },
768}
769
770#[derive(Debug, Parser)]
771pub enum ClusterCommand {
772    /// Prints common cluster urls.
773    List,
774}
775
776#[derive(Debug, Parser)]
777pub enum ConfigCommand {
778    /// Get configuration settings from the local Anchor.toml
779    Get,
780    /// Set configuration settings in the local Anchor.toml
781    Set {
782        /// Cluster to connect to (custom URL). Use -um, -ud, -ut, -ul for standard clusters
783        #[clap(short = 'u', long = "url")]
784        url: Option<String>,
785        /// Path to wallet keypair file to update the Anchor.toml file with
786        #[clap(short = 'k', long = "keypair")]
787        keypair: Option<String>,
788    },
789}
790
791fn get_keypair(path: &str) -> Result<Keypair> {
792    solana_keypair::read_keypair_file(path)
793        .map_err(|_| anyhow!("Unable to read keypair file ({path})"))
794}
795
796/// Format lamports as SOL with trailing zeros removed
797fn format_sol(lamports: u64) -> String {
798    let sol = lamports as f64 / 1_000_000_000.0;
799    let formatted = format!("{:.8}", sol);
800
801    // Remove trailing zeros and decimal point if not needed
802    let trimmed = formatted.trim_end_matches('0').trim_end_matches('.');
803    format!("{} SOL", trimmed)
804}
805
806/// Get cluster URL and wallet path from Anchor config, CLI overrides, or Solana CLI config
807fn get_cluster_and_wallet(cfg_override: &ConfigOverride) -> Result<(String, String)> {
808    // Try to get from Anchor workspace config first
809    if let Ok(Some(cfg)) = Config::discover(cfg_override) {
810        return Ok((
811            cfg.provider.cluster.url().to_string(),
812            cfg.provider.wallet.to_string(),
813        ));
814    }
815
816    // Try to load Solana CLI config
817    let (cluster_url, wallet_path) =
818        if let Some(config_file) = solana_cli_config::CONFIG_FILE.as_ref() {
819            match SolanaCliConfig::load(config_file) {
820                Ok(cli_config) => (
821                    cli_config.json_rpc_url.clone(),
822                    cli_config.keypair_path.clone(),
823                ),
824                Err(_) => {
825                    // Fallback to defaults if Solana CLI config doesn't exist
826                    (
827                        "https://api.mainnet-beta.solana.com".to_string(),
828                        dirs::home_dir()
829                            .map(|home| {
830                                home.join(".config/solana/id.json")
831                                    .to_string_lossy()
832                                    .to_string()
833                            })
834                            .unwrap_or_else(|| "~/.config/solana/id.json".to_string()),
835                    )
836                }
837            }
838        } else {
839            // If CONFIG_FILE is None, use defaults
840            (
841                "https://api.mainnet-beta.solana.com".to_string(),
842                dirs::home_dir()
843                    .map(|home| {
844                        home.join(".config/solana/id.json")
845                            .to_string_lossy()
846                            .to_string()
847                    })
848                    .unwrap_or_else(|| "~/.config/solana/id.json".to_string()),
849            )
850        };
851
852    // Apply cluster override if provided
853    let final_cluster = if let Some(cluster) = &cfg_override.cluster {
854        cluster.url().to_string()
855    } else {
856        cluster_url
857    };
858
859    Ok((final_cluster, wallet_path))
860}
861
862/// Get the recommended priority fee from the RPC client, falling back to 0 if unavailable
863pub fn get_recommended_micro_lamport_fee(client: &RpcClient) -> u64 {
864    let mut fees = match client.get_recent_prioritization_fees(&[]) {
865        // Fees may be empty or query may fail, e.g. on localnet
866        Err(e) => {
867            eprintln!("Warning: failed to fetch prioritization fees, defaulting to 0: {e}");
868            return 0;
869        }
870        Ok(f) if f.is_empty() => {
871            return 0;
872        }
873        Ok(f) => f,
874    };
875
876    // Get the median fee from the most recent 150 slots' prioritization fee
877    fees.sort_unstable_by_key(|fee| fee.prioritization_fee);
878    let median_index = fees.len() / 2;
879
880    if fees.len() % 2 == 0 {
881        (fees[median_index - 1].prioritization_fee + fees[median_index].prioritization_fee) / 2
882    } else {
883        fees[median_index].prioritization_fee
884    }
885}
886
887/// Prepend a compute unit ix, if the priority fee is greater than 0.
888pub fn prepend_compute_unit_ix(
889    instructions: Vec<Instruction>,
890    client: &RpcClient,
891    priority_fee: Option<u64>,
892) -> Vec<Instruction> {
893    let priority_fee = priority_fee.unwrap_or_else(|| get_recommended_micro_lamport_fee(client));
894
895    if priority_fee > 0 {
896        let mut instructions_appended = instructions.clone();
897        instructions_appended.insert(
898            0,
899            ComputeBudgetInstruction::set_compute_unit_price(priority_fee),
900        );
901        instructions_appended
902    } else {
903        instructions
904    }
905}
906
907pub fn entry(opts: Opts) -> Result<()> {
908    let restore_cbs = override_toolchain(&opts.cfg_override)?;
909    let result = process_command(opts);
910    restore_toolchain(restore_cbs)?;
911
912    result
913}
914
915/// Functions to restore toolchain entries
916type RestoreToolchainCallbacks = Vec<Box<dyn FnOnce() -> Result<()>>>;
917
918/// Override the toolchain from `Anchor.toml`.
919///
920/// Returns the previous versions to restore back to.
921fn override_toolchain(cfg_override: &ConfigOverride) -> Result<RestoreToolchainCallbacks> {
922    let mut restore_cbs: RestoreToolchainCallbacks = vec![];
923
924    let cfg = Config::discover(cfg_override)?;
925    if let Some(cfg) = cfg {
926        fn parse_version(text: &str) -> Option<String> {
927            Some(
928                Regex::new(r"(\d+\.\d+\.\S+)")
929                    .unwrap()
930                    .captures_iter(text)
931                    .next()?
932                    .get(0)?
933                    .as_str()
934                    .to_string(),
935            )
936        }
937
938        fn get_current_version(cmd_name: &str) -> Result<String> {
939            let output = std::process::Command::new(cmd_name)
940                .arg("--version")
941                .output()?;
942            if !output.status.success() {
943                return Err(anyhow!("Failed to run `{cmd_name} --version`"));
944            }
945
946            let output_version = std::str::from_utf8(&output.stdout)?;
947            parse_version(output_version)
948                .ok_or_else(|| anyhow!("Failed to parse the version of `{cmd_name}`"))
949        }
950
951        if let Some(solana_version) = &cfg.toolchain.solana_version {
952            let current_version = get_current_version("solana")?;
953            if solana_version != &current_version {
954                // We are overriding with `solana-install` command instead of using the binaries
955                // from `~/.local/share/solana/install/releases` because we use multiple Solana
956                // binaries in various commands.
957                fn override_solana_version(version: String) -> Result<bool> {
958                    // There is a deprecation warning message starting with `1.18.19` which causes
959                    // parsing problems https://github.com/solana-foundation/anchor/issues/3147
960                    let (cmd_name, domain) =
961                        if Version::parse(&version)? < Version::parse("1.18.19")? {
962                            ("solana-install", "solana.com")
963                        } else {
964                            ("agave-install", "anza.xyz")
965                        };
966
967                    // Install the command if it's not installed
968                    if get_current_version(cmd_name).is_err() {
969                        // `solana-install` and `agave-install` are not usable at the same time i.e.
970                        // using one of them makes the other unusable with the default installation,
971                        // causing the installation process to run each time users switch between
972                        // `agave` supported versions. For example, if the user's active Solana
973                        // version is `1.18.17`, and he specifies `solana_version = "2.0.6"`, this
974                        // code path will run each time an Anchor command gets executed.
975                        eprintln!(
976                            "Command not installed: `{cmd_name}`. \
977                            See https://github.com/anza-xyz/agave/wiki/Agave-Transition, \
978                            installing..."
979                        );
980                        let install_script = std::process::Command::new("curl")
981                            .args([
982                                "-sSfL",
983                                &format!("https://release.{domain}/v{version}/install"),
984                            ])
985                            .output()?;
986                        let is_successful = std::process::Command::new("sh")
987                            .args(["-c", std::str::from_utf8(&install_script.stdout)?])
988                            .spawn()?
989                            .wait_with_output()?
990                            .status
991                            .success();
992                        if !is_successful {
993                            return Err(anyhow!("Failed to install `{cmd_name}`"));
994                        }
995                    }
996
997                    let output = std::process::Command::new(cmd_name).arg("list").output()?;
998                    if !output.status.success() {
999                        return Err(anyhow!("Failed to list installed `solana` versions"));
1000                    }
1001
1002                    // Hide the installation progress if the version is already installed
1003                    let is_installed = std::str::from_utf8(&output.stdout)?
1004                        .lines()
1005                        .filter_map(parse_version)
1006                        .any(|line_version| line_version == version);
1007                    let (stderr, stdout) = if is_installed {
1008                        (Stdio::null(), Stdio::null())
1009                    } else {
1010                        (Stdio::inherit(), Stdio::inherit())
1011                    };
1012
1013                    std::process::Command::new(cmd_name)
1014                        .arg("init")
1015                        .arg(&version)
1016                        .stderr(stderr)
1017                        .stdout(stdout)
1018                        .spawn()?
1019                        .wait()
1020                        .map(|status| status.success())
1021                        .map_err(|err| anyhow!("Failed to run `{cmd_name}` command: {err}"))
1022                }
1023
1024                match override_solana_version(solana_version.to_owned())? {
1025                    true => restore_cbs.push(Box::new(|| {
1026                        match override_solana_version(current_version)? {
1027                            true => Ok(()),
1028                            false => Err(anyhow!("Failed to restore `solana` version")),
1029                        }
1030                    })),
1031                    false => eprintln!(
1032                        "Failed to override `solana` version to {solana_version}, using \
1033                         {current_version} instead"
1034                    ),
1035                }
1036            }
1037        }
1038
1039        // Anchor version override should be handled last
1040        if let Some(anchor_version) = &cfg.toolchain.anchor_version {
1041            // Anchor binary name prefix(applies to binaries that are installed via `avm`)
1042            const ANCHOR_BINARY_PREFIX: &str = "anchor-";
1043
1044            // Get the current version from the executing binary name if possible because commit
1045            // based toolchain overrides do not have version information.
1046            let current_version = std::env::args()
1047                .next()
1048                .expect("First arg should exist")
1049                .parse::<PathBuf>()?
1050                .file_name()
1051                .and_then(|name| name.to_str())
1052                .expect("File name should be valid Unicode")
1053                .split_once(ANCHOR_BINARY_PREFIX)
1054                .map(|(_, version)| version)
1055                .unwrap_or(VERSION)
1056                .to_owned();
1057            if anchor_version != &current_version {
1058                let binary_path = home_dir()
1059                    .unwrap()
1060                    .join(".avm")
1061                    .join("bin")
1062                    .join(format!("{ANCHOR_BINARY_PREFIX}{anchor_version}"));
1063
1064                if !binary_path.exists() {
1065                    eprintln!(
1066                        "`anchor` {anchor_version} is not installed with `avm`. Installing...\n"
1067                    );
1068
1069                    if let Err(e) = install_with_avm(anchor_version, false) {
1070                        eprintln!(
1071                            "Failed to install `anchor`: {e}, using {current_version} instead"
1072                        );
1073                        return Ok(restore_cbs);
1074                    }
1075                }
1076
1077                let exit_code = std::process::Command::new(binary_path)
1078                    .args(std::env::args_os().skip(1))
1079                    .spawn()?
1080                    .wait()?
1081                    .code()
1082                    .unwrap_or(1);
1083                restore_toolchain(restore_cbs)?;
1084                std::process::exit(exit_code);
1085            }
1086        }
1087    }
1088
1089    Ok(restore_cbs)
1090}
1091
1092/// Installs Anchor using AVM, passing `--force` (and optionally) installing
1093/// `solana-verify`.
1094fn install_with_avm(version: &str, verify: bool) -> Result<()> {
1095    let mut cmd = std::process::Command::new("avm");
1096    cmd.arg("install");
1097    cmd.arg(version);
1098    cmd.arg("--force");
1099    if verify {
1100        cmd.arg("--verify");
1101    }
1102    let status = cmd.status().context("running AVM")?;
1103    if !status.success() {
1104        bail!("failed to install `anchor` {version} with avm");
1105    }
1106    Ok(())
1107}
1108
1109/// Restore toolchain to how it was before the command was run.
1110fn restore_toolchain(restore_cbs: RestoreToolchainCallbacks) -> Result<()> {
1111    for restore_toolchain in restore_cbs {
1112        if let Err(e) = restore_toolchain() {
1113            eprintln!("Toolchain error: {e}");
1114        }
1115    }
1116
1117    Ok(())
1118}
1119
1120/// Get the system's default license - what 'npm init' would use.
1121fn get_npm_init_license() -> Result<String> {
1122    let npm_init_license_output = std::process::Command::new("npm")
1123        .arg("config")
1124        .arg("get")
1125        .arg("init-license")
1126        .output()?;
1127
1128    if !npm_init_license_output.status.success() {
1129        return Err(anyhow!("Failed to get npm init license"));
1130    }
1131
1132    let license = String::from_utf8(npm_init_license_output.stdout)?;
1133    Ok(license.trim().to_string())
1134}
1135
1136fn process_command(opts: Opts) -> Result<()> {
1137    match opts.command {
1138        Command::Init {
1139            name,
1140            javascript,
1141            no_install,
1142            package_manager,
1143            no_git,
1144            template,
1145            test_template,
1146            force,
1147            install_agent_skills,
1148        } => init(
1149            &opts.cfg_override,
1150            name,
1151            javascript,
1152            no_install,
1153            package_manager,
1154            no_git,
1155            template,
1156            test_template,
1157            force,
1158            install_agent_skills,
1159        ),
1160        Command::New {
1161            name,
1162            template,
1163            force,
1164        } => new(&opts.cfg_override, name, template, force),
1165        Command::Build {
1166            no_idl,
1167            idl,
1168            idl_ts,
1169            verifiable,
1170            program_name,
1171            solana_version,
1172            docker_image,
1173            bootstrap,
1174            cargo_args,
1175            env,
1176            skip_lint,
1177            ignore_keys,
1178            no_docs,
1179        } => build(
1180            &opts.cfg_override,
1181            no_idl,
1182            idl,
1183            idl_ts,
1184            verifiable,
1185            skip_lint,
1186            ignore_keys,
1187            program_name,
1188            solana_version,
1189            docker_image,
1190            bootstrap,
1191            None,
1192            None,
1193            env,
1194            cargo_args,
1195            no_docs,
1196        ),
1197        Command::Verify {
1198            program_id,
1199            repo_url,
1200            commit_hash,
1201            current_dir,
1202            program_name,
1203            args,
1204        } => verify(
1205            program_id,
1206            repo_url,
1207            commit_hash,
1208            current_dir,
1209            program_name,
1210            args,
1211        ),
1212        Command::Clean => clean(&opts.cfg_override),
1213        #[allow(deprecated)]
1214        Command::Deploy {
1215            program_name,
1216            program_keypair,
1217            verifiable,
1218            no_idl,
1219            solana_args,
1220        } => {
1221            eprintln!(
1222                "Warning: 'anchor deploy' is deprecated. Use 'anchor program deploy' instead."
1223            );
1224            deploy(
1225                &opts.cfg_override,
1226                program_name,
1227                program_keypair,
1228                verifiable,
1229                no_idl,
1230                solana_args,
1231            )
1232        }
1233        Command::Expand {
1234            program_name,
1235            cargo_args,
1236        } => expand(&opts.cfg_override, program_name, &cargo_args),
1237        #[allow(deprecated)]
1238        Command::Upgrade {
1239            program_id,
1240            program_filepath,
1241            max_retries,
1242            solana_args,
1243        } => {
1244            eprintln!(
1245                "Warning: 'anchor upgrade' is deprecated. Use 'anchor program upgrade' instead."
1246            );
1247            upgrade(
1248                &opts.cfg_override,
1249                program_id,
1250                program_filepath,
1251                max_retries,
1252                solana_args,
1253            )
1254        }
1255        Command::Idl { subcmd } => idl(&opts.cfg_override, subcmd),
1256        Command::Migrate => migrate(&opts.cfg_override),
1257        Command::Test {
1258            program_name,
1259            skip_deploy,
1260            skip_local_validator,
1261            skip_build,
1262            no_idl,
1263            detach,
1264            run,
1265            validator,
1266            args,
1267            env,
1268            cargo_args,
1269            skip_lint,
1270        } => test(
1271            &opts.cfg_override,
1272            program_name,
1273            skip_deploy,
1274            skip_local_validator,
1275            skip_build,
1276            skip_lint,
1277            no_idl,
1278            detach,
1279            run,
1280            validator,
1281            args,
1282            env,
1283            cargo_args,
1284        ),
1285        Command::Airdrop { amount, pubkey } => airdrop(&opts.cfg_override, amount, pubkey),
1286        Command::Cluster { subcmd } => cluster(subcmd),
1287        Command::Config { subcmd } => config_cmd(&opts.cfg_override, subcmd),
1288        Command::Shell => shell(&opts.cfg_override),
1289        Command::Run {
1290            script,
1291            script_args,
1292        } => run(&opts.cfg_override, script, script_args),
1293        Command::Keys { subcmd } => keys(&opts.cfg_override, subcmd),
1294        Command::Localnet {
1295            skip_build,
1296            skip_deploy,
1297            skip_lint,
1298            ignore_keys,
1299            validator,
1300            env,
1301            cargo_args,
1302        } => localnet(
1303            &opts.cfg_override,
1304            skip_build,
1305            skip_deploy,
1306            skip_lint,
1307            ignore_keys,
1308            validator,
1309            env,
1310            cargo_args,
1311        ),
1312        Command::Account {
1313            account_type,
1314            address,
1315            idl,
1316        } => account(&opts.cfg_override, account_type, address, idl),
1317        Command::Completions { shell } => {
1318            clap_complete::generate(
1319                shell,
1320                &mut Opts::command(),
1321                "anchor",
1322                &mut std::io::stdout(),
1323            );
1324            Ok(())
1325        }
1326        Command::Address => address(&opts.cfg_override),
1327        Command::Balance { pubkey, lamports } => balance(&opts.cfg_override, pubkey, lamports),
1328        Command::Epoch => epoch(&opts.cfg_override),
1329        Command::EpochInfo => epoch_info(&opts.cfg_override),
1330        Command::Logs {
1331            include_votes,
1332            address,
1333        } => logs_subscribe(&opts.cfg_override, include_votes, address),
1334        Command::ShowAccount { cmd } => account::show_account(&opts.cfg_override, cmd),
1335        Command::Keygen { subcmd } => keygen::keygen(&opts.cfg_override, subcmd),
1336        Command::Program { subcmd } => program::program(&opts.cfg_override, subcmd),
1337    }
1338}
1339
1340#[allow(clippy::too_many_arguments)]
1341fn init(
1342    cfg_override: &ConfigOverride,
1343    name: String,
1344    javascript: bool,
1345    no_install: bool,
1346    package_manager: PackageManager,
1347    no_git: bool,
1348    template: ProgramTemplate,
1349    test_template: TestTemplate,
1350    force: bool,
1351    install_agent_skills: bool,
1352) -> Result<()> {
1353    if !force && Config::discover(cfg_override)?.is_some() {
1354        return Err(anyhow!("Workspace already initialized"));
1355    }
1356
1357    // We need to format different cases for the dir and the name
1358    let rust_name = name.to_snake_case();
1359    let project_name = if name == rust_name {
1360        rust_name.clone()
1361    } else {
1362        name.to_kebab_case()
1363    };
1364
1365    // Additional keywords that have not been added to the `syn` crate as reserved words
1366    // https://github.com/dtolnay/syn/pull/1098
1367    let extra_keywords = ["async", "await", "try"];
1368    // Anchor converts to snake case before writing the program name
1369    if syn::parse_str::<syn::Ident>(&rust_name).is_err()
1370        || extra_keywords.contains(&rust_name.as_str())
1371    {
1372        return Err(anyhow!(
1373            "Anchor workspace name must be a valid Rust identifier. It may not be a Rust reserved word, start with a digit, or include certain disallowed characters. See https://doc.rust-lang.org/reference/identifiers.html for more detail.",
1374        ));
1375    }
1376
1377    if force {
1378        fs::create_dir_all(&project_name)?;
1379    } else {
1380        fs::create_dir(&project_name)?;
1381    }
1382    std::env::set_current_dir(&project_name)?;
1383    fs::create_dir_all("app")?;
1384
1385    let mut cfg = Config::default();
1386
1387    let test_script = test_template.get_test_script(javascript, &package_manager);
1388    cfg.scripts.insert("test".to_owned(), test_script);
1389
1390    let package_manager_cmd = package_manager.to_string();
1391    cfg.toolchain.package_manager = Some(package_manager);
1392
1393    let mut localnet = BTreeMap::new();
1394    let program_id = rust_template::get_or_create_program_id(&rust_name);
1395    localnet.insert(
1396        rust_name,
1397        ProgramDeployment {
1398            address: program_id,
1399            path: None,
1400            idl: None,
1401        },
1402    );
1403    cfg.programs.insert(Cluster::Localnet, localnet);
1404    let toml = cfg.to_string();
1405    fs::write("Anchor.toml", toml)?;
1406
1407    // Initialize .gitignore file
1408    fs::write(".gitignore", rust_template::git_ignore())?;
1409
1410    // Initialize .prettierignore file
1411    fs::write(".prettierignore", rust_template::prettier_ignore())?;
1412
1413    // Remove the default program if `--force` is passed
1414    if force {
1415        fs::remove_dir_all(
1416            std::env::current_dir()?
1417                .join("programs")
1418                .join(&project_name),
1419        )?;
1420    }
1421
1422    // Build the program.
1423    rust_template::create_program(&project_name, template, Some(&test_template))?;
1424
1425    // Build the migrations directory.
1426    let migrations_path = Path::new("migrations");
1427    fs::create_dir_all(migrations_path)?;
1428
1429    let license = get_npm_init_license()?;
1430
1431    let jest = TestTemplate::Jest == test_template;
1432    if javascript {
1433        // Build javascript config
1434        let mut package_json = File::create("package.json")?;
1435        package_json.write_all(rust_template::package_json(jest, license).as_bytes())?;
1436
1437        let mut deploy = File::create(migrations_path.join("deploy.js"))?;
1438        deploy.write_all(rust_template::deploy_script().as_bytes())?;
1439    } else {
1440        // Build typescript config
1441        let mut ts_config = File::create("tsconfig.json")?;
1442        ts_config.write_all(rust_template::ts_config(jest).as_bytes())?;
1443
1444        let mut ts_package_json = File::create("package.json")?;
1445        ts_package_json.write_all(rust_template::ts_package_json(jest, license).as_bytes())?;
1446
1447        let mut deploy = File::create(migrations_path.join("deploy.ts"))?;
1448        deploy.write_all(rust_template::ts_deploy_script().as_bytes())?;
1449    }
1450
1451    test_template.create_test_files(&project_name, javascript, &program_id.to_string())?;
1452
1453    if !no_install {
1454        let package_manager_result = install_node_modules(&package_manager_cmd)?;
1455
1456        if !package_manager_result.status.success() && package_manager_cmd != "npm" {
1457            println!("Failed {package_manager_cmd} install will attempt to npm install");
1458            install_node_modules("npm")?;
1459        } else {
1460            eprintln!("Failed to install node modules");
1461        }
1462    }
1463
1464    if !no_git {
1465        let git_result = std::process::Command::new("git")
1466            .arg("init")
1467            .stdout(Stdio::inherit())
1468            .stderr(Stdio::inherit())
1469            .output()
1470            .map_err(|e| anyhow::format_err!("git init failed: {}", e))?;
1471        if !git_result.status.success() {
1472            eprintln!("Failed to automatically initialize a new git repository");
1473        }
1474    }
1475
1476    if install_agent_skills {
1477        install_solana_skill();
1478    }
1479
1480    println!("{project_name} initialized");
1481
1482    Ok(())
1483}
1484
1485fn install_solana_skill() {
1486    const SKILL_REPO: &str = "https://github.com/solana-foundation/solana-dev-skill";
1487    const SKILL_NAME: &str = "solana-dev";
1488
1489    // Skip if globally installed (active across all projects already)
1490    let global_path = home_dir()
1491        .unwrap_or_default()
1492        .join(".agents")
1493        .join("skills")
1494        .join(SKILL_NAME);
1495    if global_path.exists() {
1496        return;
1497    }
1498
1499    // Skip if already project-scoped (could be anchor init --force on existing folder)
1500    let project_path = Path::new(".agents").join("skills").join(SKILL_NAME);
1501    if project_path.exists() {
1502        return;
1503    }
1504
1505    println!("Installing Solana dev skill for Agents from {SKILL_REPO}");
1506
1507    let status = std::process::Command::new("npx")
1508        .args([
1509            "--yes",
1510            "skills@1.4.4",
1511            "add",
1512            SKILL_REPO,
1513            "--skill",
1514            "*",
1515            "-y",
1516        ])
1517        .stdout(Stdio::inherit())
1518        .stderr(Stdio::inherit())
1519        .status();
1520
1521    match status {
1522        Ok(s) if s.success() => {
1523            println!("Solana dev skill installed successfully");
1524        }
1525        _ => {
1526            eprintln!(
1527                "Warning: Failed to install Solana dev skill. Install manually with:\n  npx \
1528                 skills add {SKILL_REPO}"
1529            );
1530        }
1531    }
1532}
1533
1534fn install_node_modules(cmd: &str) -> Result<std::process::Output> {
1535    if cfg!(target_os = "windows") {
1536        std::process::Command::new("cmd")
1537            .arg(format!("/C {cmd} install"))
1538            .stdout(Stdio::inherit())
1539            .stderr(Stdio::inherit())
1540            .output()
1541            .map_err(|e| anyhow::format_err!("{} install failed: {}", cmd, e))
1542    } else {
1543        std::process::Command::new(cmd)
1544            .arg("install")
1545            .stdout(Stdio::inherit())
1546            .stderr(Stdio::inherit())
1547            .output()
1548            .map_err(|e| anyhow::format_err!("{} install failed: {}", cmd, e))
1549    }
1550}
1551
1552// Creates a new program crate in the `programs/<name>` directory.
1553fn new(
1554    cfg_override: &ConfigOverride,
1555    name: String,
1556    template: ProgramTemplate,
1557    force: bool,
1558) -> Result<()> {
1559    with_workspace(cfg_override, |cfg| -> Result<()> {
1560        match cfg.path().parent() {
1561            None => {
1562                println!("Unable to make new program");
1563            }
1564            Some(parent) => {
1565                std::env::set_current_dir(parent)?;
1566
1567                let cluster = cfg.provider.cluster.clone();
1568                let programs = cfg.programs.entry(cluster).or_default();
1569                if programs.contains_key(&name) {
1570                    if !force {
1571                        return Err(anyhow!("Program already exists"));
1572                    }
1573
1574                    // Delete all files within the program folder
1575                    fs::remove_dir_all(std::env::current_dir()?.join("programs").join(&name))?;
1576                }
1577
1578                rust_template::create_program(&name, template, None)?;
1579
1580                programs.insert(
1581                    name.clone(),
1582                    ProgramDeployment {
1583                        address: rust_template::get_or_create_program_id(&name),
1584                        path: None,
1585                        idl: None,
1586                    },
1587                );
1588
1589                let toml = cfg.to_string();
1590                fs::write("Anchor.toml", toml)?;
1591
1592                println!("Created new program.");
1593            }
1594        };
1595        Ok(())
1596    })?
1597}
1598
1599/// Array of (path, content) tuple.
1600pub type Files = Vec<(PathBuf, String)>;
1601
1602/// Create files from the given (path, content) tuple array.
1603///
1604/// # Example
1605///
1606/// ```ignore
1607/// crate_files(vec![("programs/my_program/src/lib.rs".into(), "// Content".into())])?;
1608/// ```
1609pub fn create_files(files: &Files) -> Result<()> {
1610    for (path, content) in files {
1611        let path = path
1612            .display()
1613            .to_string()
1614            .replace('/', std::path::MAIN_SEPARATOR_STR);
1615        let path = Path::new(&path);
1616        if path.exists() {
1617            continue;
1618        }
1619
1620        match path.extension() {
1621            Some(_) => {
1622                fs::create_dir_all(path.parent().unwrap())?;
1623                fs::write(path, content)?;
1624            }
1625            None => fs::create_dir_all(path)?,
1626        }
1627    }
1628
1629    Ok(())
1630}
1631
1632/// Override or create files from the given (path, content) tuple array.
1633///
1634/// # Example
1635///
1636/// ```ignore
1637/// override_or_create_files(vec![("programs/my_program/src/lib.rs".into(), "// Content".into())])?;
1638/// ```
1639pub fn override_or_create_files(files: &Files) -> Result<()> {
1640    for (path, content) in files {
1641        let path = Path::new(path);
1642        if path.exists() {
1643            let mut f = fs::OpenOptions::new()
1644                .write(true)
1645                .truncate(true)
1646                .open(path)?;
1647            f.write_all(content.as_bytes())?;
1648            f.flush()?;
1649        } else {
1650            fs::create_dir_all(path.parent().unwrap())?;
1651            fs::write(path, content)?;
1652        }
1653    }
1654
1655    Ok(())
1656}
1657
1658pub fn expand(
1659    cfg_override: &ConfigOverride,
1660    program_name: Option<String>,
1661    cargo_args: &[String],
1662) -> Result<()> {
1663    // Change to the workspace member directory, if needed.
1664    if let Some(program_name) = program_name.as_ref() {
1665        cd_member(cfg_override, program_name)?;
1666    }
1667
1668    let workspace_cfg = Config::discover(cfg_override)?
1669        .ok_or_else(|| anyhow!("The 'anchor expand' command requires an Anchor workspace."))?;
1670    let cfg_parent = workspace_cfg.path().parent().expect("Invalid Anchor.toml");
1671    let cargo = Manifest::discover()?;
1672
1673    let expansions_path = cfg_parent.join(".anchor").join("expanded-macros");
1674    fs::create_dir_all(&expansions_path)?;
1675
1676    match cargo {
1677        // No Cargo.toml found, expand entire workspace
1678        None => expand_all(&workspace_cfg, expansions_path, cargo_args),
1679        // Cargo.toml is at root of workspace, expand entire workspace
1680        Some(cargo) if cargo.path().parent() == workspace_cfg.path().parent() => {
1681            expand_all(&workspace_cfg, expansions_path, cargo_args)
1682        }
1683        // Reaching this arm means Cargo.toml belongs to a single package. Expand it.
1684        Some(cargo) => expand_program(
1685            // If we found Cargo.toml, it must be in a directory so unwrap is safe
1686            cargo.path().parent().unwrap().to_path_buf(),
1687            expansions_path,
1688            cargo_args,
1689        ),
1690    }
1691}
1692
1693fn expand_all(
1694    workspace_cfg: &WithPath<Config>,
1695    expansions_path: PathBuf,
1696    cargo_args: &[String],
1697) -> Result<()> {
1698    let cur_dir = std::env::current_dir()?;
1699    for p in workspace_cfg.get_rust_program_list()? {
1700        expand_program(p, expansions_path.clone(), cargo_args)?;
1701    }
1702    std::env::set_current_dir(cur_dir)?;
1703    Ok(())
1704}
1705
1706fn expand_program(
1707    program_path: PathBuf,
1708    expansions_path: PathBuf,
1709    cargo_args: &[String],
1710) -> Result<()> {
1711    let cargo = Manifest::from_path(program_path.join("Cargo.toml"))
1712        .map_err(|_| anyhow!("Could not find Cargo.toml for program"))?;
1713
1714    let target_dir_arg = {
1715        let mut target_dir_arg = OsString::from("--target-dir=");
1716        target_dir_arg.push(expansions_path.join("expand-target"));
1717        target_dir_arg
1718    };
1719
1720    let package_name = &cargo
1721        .package
1722        .as_ref()
1723        .ok_or_else(|| anyhow!("Cargo config is missing a package"))?
1724        .name;
1725    let program_expansions_path = expansions_path.join(package_name);
1726    fs::create_dir_all(&program_expansions_path)?;
1727
1728    let exit = std::process::Command::new("cargo")
1729        .arg("expand")
1730        .arg(target_dir_arg)
1731        .arg(format!("--package={package_name}"))
1732        .args(cargo_args)
1733        .stderr(Stdio::inherit())
1734        .output()
1735        .map_err(|e| anyhow::format_err!("{}", e))?;
1736    if !exit.status.success() {
1737        eprintln!("'anchor expand' failed. Perhaps you have not installed 'cargo-expand'? https://github.com/dtolnay/cargo-expand#installation");
1738        std::process::exit(exit.status.code().unwrap_or(1));
1739    }
1740
1741    let version = cargo.version();
1742    let time = chrono::Utc::now().to_string().replace(' ', "_");
1743    let file_path = program_expansions_path.join(format!("{package_name}-{version}-{time}.rs"));
1744    fs::write(&file_path, &exit.stdout).map_err(|e| anyhow::format_err!("{}", e))?;
1745
1746    println!(
1747        "Expanded {} into file {}\n",
1748        package_name,
1749        file_path.to_string_lossy()
1750    );
1751    Ok(())
1752}
1753
1754#[allow(clippy::too_many_arguments)]
1755pub fn build(
1756    cfg_override: &ConfigOverride,
1757    no_idl: bool,
1758    idl: Option<String>,
1759    idl_ts: Option<String>,
1760    verifiable: bool,
1761    skip_lint: bool,
1762    ignore_keys: bool,
1763    program_name: Option<String>,
1764    solana_version: Option<String>,
1765    docker_image: Option<String>,
1766    bootstrap: BootstrapMode,
1767    stdout: Option<File>, // Used for the package registry server.
1768    stderr: Option<File>, // Used for the package registry server.
1769    env_vars: Vec<String>,
1770    cargo_args: Vec<String>,
1771    no_docs: bool,
1772) -> Result<()> {
1773    // Change to the workspace member directory, if needed.
1774    if let Some(program_name) = program_name.as_ref() {
1775        cd_member(cfg_override, program_name)?;
1776    }
1777    let cfg = Config::discover(cfg_override)?
1778        .ok_or_else(|| anyhow!("The 'anchor build' command requires an Anchor workspace."))?;
1779    let cfg_parent = cfg.path().parent().expect("Invalid Anchor.toml");
1780
1781    // Require overflow checks
1782    let workspace_cargo_toml_path = cfg_parent.join("Cargo.toml");
1783    if workspace_cargo_toml_path.exists() {
1784        check_overflow(workspace_cargo_toml_path)?;
1785    }
1786
1787    // Check whether there is a mismatch between CLI and crate/package versions
1788    check_anchor_version(&cfg).ok();
1789    check_deps(&cfg).ok();
1790
1791    // Check for program ID mismatches before building (skip if --ignore-keys is used), Always skipped in anchor test
1792    if !ignore_keys {
1793        check_program_id_mismatch(&cfg, program_name.clone())?;
1794    }
1795
1796    let idl_out = match idl {
1797        Some(idl) => Some(PathBuf::from(idl)),
1798        None => Some(cfg_parent.join("target").join("idl")),
1799    };
1800    fs::create_dir_all(idl_out.as_ref().unwrap())?;
1801
1802    let idl_ts_out = match idl_ts {
1803        Some(idl_ts) => Some(PathBuf::from(idl_ts)),
1804        None => Some(cfg_parent.join("target").join("types")),
1805    };
1806    fs::create_dir_all(idl_ts_out.as_ref().unwrap())?;
1807
1808    if !cfg.workspace.types.is_empty() {
1809        fs::create_dir_all(cfg_parent.join(&cfg.workspace.types))?;
1810    };
1811
1812    cfg.run_hooks(HookType::PreBuild)?;
1813
1814    let cargo = Manifest::discover()?;
1815    let build_config = BuildConfig {
1816        verifiable,
1817        solana_version: solana_version.or_else(|| cfg.toolchain.solana_version.clone()),
1818        docker_image: docker_image.unwrap_or_else(|| cfg.docker()),
1819        bootstrap,
1820    };
1821    match cargo {
1822        // No Cargo.toml so build the entire workspace.
1823        None => build_all(
1824            &cfg,
1825            cfg.path(),
1826            no_idl,
1827            idl_out,
1828            idl_ts_out,
1829            &build_config,
1830            stdout,
1831            stderr,
1832            env_vars,
1833            cargo_args,
1834            skip_lint,
1835            no_docs,
1836        )?,
1837        // If the Cargo.toml is at the root, build the entire workspace.
1838        Some(cargo) if cargo.path().parent() == cfg.path().parent() => build_all(
1839            &cfg,
1840            cfg.path(),
1841            no_idl,
1842            idl_out,
1843            idl_ts_out,
1844            &build_config,
1845            stdout,
1846            stderr,
1847            env_vars,
1848            cargo_args,
1849            skip_lint,
1850            no_docs,
1851        )?,
1852        // Cargo.toml represents a single package. Build it.
1853        Some(cargo) => build_rust_cwd(
1854            &cfg,
1855            cargo.path().to_path_buf(),
1856            no_idl,
1857            idl_out,
1858            idl_ts_out,
1859            &build_config,
1860            stdout,
1861            stderr,
1862            env_vars,
1863            cargo_args,
1864            skip_lint,
1865            no_docs,
1866        )?,
1867    }
1868    cfg.run_hooks(HookType::PostBuild)?;
1869
1870    set_workspace_dir_or_exit();
1871
1872    Ok(())
1873}
1874
1875#[allow(clippy::too_many_arguments)]
1876fn build_all(
1877    cfg: &WithPath<Config>,
1878    cfg_path: &Path,
1879    no_idl: bool,
1880    idl_out: Option<PathBuf>,
1881    idl_ts_out: Option<PathBuf>,
1882    build_config: &BuildConfig,
1883    stdout: Option<File>, // Used for the package registry server.
1884    stderr: Option<File>, // Used for the package registry server.
1885    env_vars: Vec<String>,
1886    cargo_args: Vec<String>,
1887    skip_lint: bool,
1888    no_docs: bool,
1889) -> Result<()> {
1890    let cur_dir = std::env::current_dir()?;
1891    let r = match cfg_path.parent() {
1892        None => Err(anyhow!("Invalid Anchor.toml at {}", cfg_path.display())),
1893        Some(_parent) => {
1894            for p in cfg.get_rust_program_list()? {
1895                build_rust_cwd(
1896                    cfg,
1897                    p.join("Cargo.toml"),
1898                    no_idl,
1899                    idl_out.clone(),
1900                    idl_ts_out.clone(),
1901                    build_config,
1902                    stdout.as_ref().map(|f| f.try_clone()).transpose()?,
1903                    stderr.as_ref().map(|f| f.try_clone()).transpose()?,
1904                    env_vars.clone(),
1905                    cargo_args.clone(),
1906                    skip_lint,
1907                    no_docs,
1908                )?;
1909            }
1910            Ok(())
1911        }
1912    };
1913    std::env::set_current_dir(cur_dir)?;
1914    r
1915}
1916
1917// Runs the build command outside of a workspace.
1918#[allow(clippy::too_many_arguments)]
1919fn build_rust_cwd(
1920    cfg: &WithPath<Config>,
1921    cargo_toml: PathBuf,
1922    no_idl: bool,
1923    idl_out: Option<PathBuf>,
1924    idl_ts_out: Option<PathBuf>,
1925    build_config: &BuildConfig,
1926    stdout: Option<File>,
1927    stderr: Option<File>,
1928    env_vars: Vec<String>,
1929    cargo_args: Vec<String>,
1930    skip_lint: bool,
1931    no_docs: bool,
1932) -> Result<()> {
1933    match cargo_toml.parent() {
1934        None => return Err(anyhow!("Unable to find parent")),
1935        Some(p) => std::env::set_current_dir(p)?,
1936    };
1937    match build_config.verifiable {
1938        false => _build_rust_cwd(
1939            cfg, no_idl, idl_out, idl_ts_out, skip_lint, no_docs, cargo_args,
1940        ),
1941        true => build_cwd_verifiable(
1942            cfg,
1943            cargo_toml,
1944            build_config,
1945            stdout,
1946            stderr,
1947            skip_lint,
1948            env_vars,
1949            cargo_args,
1950            no_docs,
1951        ),
1952    }
1953}
1954
1955// Builds an anchor program in a docker image and copies the build artifacts
1956// into the `target/` directory.
1957#[allow(clippy::too_many_arguments)]
1958fn build_cwd_verifiable(
1959    cfg: &WithPath<Config>,
1960    cargo_toml: PathBuf,
1961    build_config: &BuildConfig,
1962    stdout: Option<File>,
1963    stderr: Option<File>,
1964    skip_lint: bool,
1965    env_vars: Vec<String>,
1966    cargo_args: Vec<String>,
1967    no_docs: bool,
1968) -> Result<()> {
1969    // Create output dirs.
1970    let workspace_dir = cfg.path().parent().unwrap().canonicalize()?;
1971    let target_dir = workspace_dir.join("target");
1972    fs::create_dir_all(target_dir.join("verifiable"))?;
1973    fs::create_dir_all(target_dir.join("idl"))?;
1974    fs::create_dir_all(target_dir.join("types"))?;
1975    if !&cfg.workspace.types.is_empty() {
1976        fs::create_dir_all(workspace_dir.join(&cfg.workspace.types))?;
1977    }
1978
1979    let container_name = "anchor-program";
1980
1981    // Build the binary in docker.
1982    let result = docker_build(
1983        cfg,
1984        container_name,
1985        cargo_toml,
1986        build_config,
1987        stdout,
1988        stderr,
1989        env_vars,
1990        cargo_args.clone(),
1991    );
1992
1993    match &result {
1994        Err(e) => {
1995            eprintln!("Error during Docker build: {e:?}");
1996        }
1997        Ok(_) => {
1998            // Build the idl.
1999            println!("Extracting the IDL");
2000            let idl = generate_idl(cfg, skip_lint, no_docs, &cargo_args)?;
2001            // Write out the JSON file.
2002            println!("Writing the IDL file");
2003            let out_file = workspace_dir
2004                .join("target")
2005                .join("idl")
2006                .join(&idl.metadata.name)
2007                .with_extension("json");
2008            write_idl(&idl, OutFile::File(out_file))?;
2009
2010            // Write out the TypeScript type.
2011            println!("Writing the .ts file");
2012            let ts_file = workspace_dir
2013                .join("target")
2014                .join("types")
2015                .join(&idl.metadata.name)
2016                .with_extension("ts");
2017            fs::write(&ts_file, idl_ts(&idl)?)?;
2018
2019            // Copy out the TypeScript type.
2020            if !&cfg.workspace.types.is_empty() {
2021                fs::copy(
2022                    ts_file,
2023                    workspace_dir
2024                        .join(&cfg.workspace.types)
2025                        .join(idl.metadata.name)
2026                        .with_extension("ts"),
2027                )?;
2028            }
2029
2030            println!("Build success");
2031        }
2032    }
2033
2034    result
2035}
2036
2037#[allow(clippy::too_many_arguments)]
2038fn docker_build(
2039    cfg: &WithPath<Config>,
2040    container_name: &str,
2041    cargo_toml: PathBuf,
2042    build_config: &BuildConfig,
2043    stdout: Option<File>,
2044    stderr: Option<File>,
2045    env_vars: Vec<String>,
2046    cargo_args: Vec<String>,
2047) -> Result<()> {
2048    let binary_name = Manifest::from_path(&cargo_toml)?.lib_name()?;
2049
2050    // Docker vars.
2051    let workdir = Path::new("/workdir");
2052    let volume_mount = format!(
2053        "{}:{}",
2054        cfg.path().parent().unwrap().canonicalize()?.display(),
2055        workdir.to_str().unwrap(),
2056    );
2057    println!("Using image {:?}", build_config.docker_image);
2058
2059    // Start the docker image running detached in the background.
2060    let target_dir = workdir.join("docker-target");
2061    println!("Run docker image");
2062    let exit = std::process::Command::new("docker")
2063        .args([
2064            "run",
2065            "-it",
2066            "-d",
2067            "--name",
2068            container_name,
2069            "--env",
2070            &format!(
2071                "CARGO_TARGET_DIR={}",
2072                target_dir.as_path().to_str().unwrap()
2073            ),
2074            "-v",
2075            &volume_mount,
2076            "-w",
2077            workdir.to_str().unwrap(),
2078            &build_config.docker_image,
2079            "bash",
2080        ])
2081        .stdout(Stdio::inherit())
2082        .stderr(Stdio::inherit())
2083        .output()
2084        .map_err(|e| anyhow::format_err!("Docker build failed: {}", e))?;
2085    if !exit.status.success() {
2086        return Err(anyhow!("Failed to build program"));
2087    }
2088
2089    let result = docker_prep(container_name, build_config).and_then(|_| {
2090        let cfg_parent = cfg.path().parent().unwrap();
2091        docker_build_bpf(
2092            container_name,
2093            cargo_toml.as_path(),
2094            cfg_parent,
2095            target_dir.as_path(),
2096            binary_name,
2097            stdout,
2098            stderr,
2099            env_vars,
2100            cargo_args,
2101        )
2102    });
2103
2104    // Cleanup regardless of errors
2105    docker_cleanup(container_name, target_dir.as_path())?;
2106
2107    // Done.
2108    result
2109}
2110
2111fn docker_prep(container_name: &str, build_config: &BuildConfig) -> Result<()> {
2112    // Set the solana version in the container, if given. Otherwise use the
2113    // default.
2114    match build_config.bootstrap {
2115        BootstrapMode::Debian => {
2116            // Install build requirements
2117            docker_exec(container_name, &["apt", "update"])?;
2118            docker_exec(
2119                container_name,
2120                &["apt", "install", "-y", "curl", "build-essential"],
2121            )?;
2122
2123            // Install Rust
2124            docker_exec(
2125                container_name,
2126                &["curl", "https://sh.rustup.rs", "-sfo", "rustup.sh"],
2127            )?;
2128            docker_exec(container_name, &["sh", "rustup.sh", "-y"])?;
2129            docker_exec(container_name, &["rm", "-f", "rustup.sh"])?;
2130        }
2131        BootstrapMode::None => {}
2132    }
2133
2134    if let Some(solana_version) = &build_config.solana_version {
2135        println!("Using solana version: {solana_version}");
2136
2137        // Install Solana CLI
2138        docker_exec(
2139            container_name,
2140            &[
2141                "curl",
2142                "-sSfL",
2143                &format!("https://release.anza.xyz/v{solana_version}/install",),
2144                "-o",
2145                "solana_installer.sh",
2146            ],
2147        )?;
2148        docker_exec(container_name, &["sh", "solana_installer.sh"])?;
2149        docker_exec(container_name, &["rm", "-f", "solana_installer.sh"])?;
2150    }
2151    Ok(())
2152}
2153
2154#[allow(clippy::too_many_arguments)]
2155fn docker_build_bpf(
2156    container_name: &str,
2157    cargo_toml: &Path,
2158    cfg_parent: &Path,
2159    target_dir: &Path,
2160    binary_name: String,
2161    stdout: Option<File>,
2162    stderr: Option<File>,
2163    env_vars: Vec<String>,
2164    cargo_args: Vec<String>,
2165) -> Result<()> {
2166    let manifest_path =
2167        pathdiff::diff_paths(cargo_toml.canonicalize()?, cfg_parent.canonicalize()?)
2168            .ok_or_else(|| anyhow!("Unable to diff paths"))?;
2169    println!(
2170        "Building {} manifest: {:?}",
2171        binary_name,
2172        manifest_path.display()
2173    );
2174
2175    // Execute the build.
2176    let exit = std::process::Command::new("docker")
2177        .args([
2178            "exec",
2179            "--env",
2180            "PATH=/root/.local/share/solana/install/active_release/bin:/root/.cargo/bin:/usr/\
2181             local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
2182        ])
2183        .args(
2184            env_vars
2185                .iter()
2186                .map(|x| ["--env", x.as_str()])
2187                .collect::<Vec<[&str; 2]>>()
2188                .concat(),
2189        )
2190        .args([container_name, "cargo"])
2191        .args(BUILD_SUBCOMMAND)
2192        .args(["--manifest-path", &manifest_path.display().to_string()])
2193        .args(cargo_args)
2194        .stdout(match stdout {
2195            None => Stdio::inherit(),
2196            Some(f) => f.into(),
2197        })
2198        .stderr(match stderr {
2199            None => Stdio::inherit(),
2200            Some(f) => f.into(),
2201        })
2202        .output()
2203        .map_err(|e| anyhow::format_err!("Docker build failed: {}", e))?;
2204    if !exit.status.success() {
2205        return Err(anyhow!("Failed to build program"));
2206    }
2207
2208    // Copy the binary out of the docker image.
2209    println!("Copying out the build artifacts");
2210    let out_file = cfg_parent
2211        .canonicalize()?
2212        .join(
2213            Path::new("target")
2214                .join("verifiable")
2215                .join(&binary_name)
2216                .with_extension("so"),
2217        )
2218        .display()
2219        .to_string();
2220
2221    // This requires the target directory of any built program to be located at
2222    // the root of the workspace.
2223    let mut bin_path = target_dir.join("deploy");
2224    bin_path.push(format!("{binary_name}.so"));
2225    let bin_artifact = format!(
2226        "{}:{}",
2227        container_name,
2228        bin_path.as_path().to_str().unwrap()
2229    );
2230    let exit = std::process::Command::new("docker")
2231        .args(["cp", &bin_artifact, &out_file])
2232        .stdout(Stdio::inherit())
2233        .stderr(Stdio::inherit())
2234        .output()
2235        .map_err(|e| anyhow::format_err!("{}", e))?;
2236    if !exit.status.success() {
2237        Err(anyhow!(
2238            "Failed to copy binary out of docker. Is the target directory set correctly?"
2239        ))
2240    } else {
2241        Ok(())
2242    }
2243}
2244
2245fn docker_cleanup(container_name: &str, target_dir: &Path) -> Result<()> {
2246    // Wipe the generated docker-target dir.
2247    println!("Cleaning up the docker target directory");
2248    docker_exec(container_name, &["rm", "-rf", target_dir.to_str().unwrap()])?;
2249
2250    // Remove the docker image.
2251    println!("Removing the docker container");
2252    let exit = std::process::Command::new("docker")
2253        .args(["rm", "-f", container_name])
2254        .stdout(Stdio::inherit())
2255        .stderr(Stdio::inherit())
2256        .output()
2257        .map_err(|e| anyhow::format_err!("{}", e))?;
2258    if !exit.status.success() {
2259        println!("Unable to remove the docker container");
2260        std::process::exit(exit.status.code().unwrap_or(1));
2261    }
2262    Ok(())
2263}
2264
2265fn docker_exec(container_name: &str, args: &[&str]) -> Result<()> {
2266    let exit = std::process::Command::new("docker")
2267        .args([&["exec", container_name], args].concat())
2268        .stdout(Stdio::inherit())
2269        .stderr(Stdio::inherit())
2270        .output()
2271        .map_err(|e| anyhow!("Failed to run command \"{:?}\": {:?}", args, e))?;
2272    if !exit.status.success() {
2273        Err(anyhow!("Failed to run command: {:?}", args))
2274    } else {
2275        Ok(())
2276    }
2277}
2278
2279#[allow(clippy::too_many_arguments)]
2280fn _build_rust_cwd(
2281    cfg: &WithPath<Config>,
2282    no_idl: bool,
2283    idl_out: Option<PathBuf>,
2284    idl_ts_out: Option<PathBuf>,
2285    skip_lint: bool,
2286    no_docs: bool,
2287    cargo_args: Vec<String>,
2288) -> Result<()> {
2289    let exit = std::process::Command::new("cargo")
2290        .args(BUILD_SUBCOMMAND)
2291        .args(cargo_args.clone())
2292        .stdout(Stdio::inherit())
2293        .stderr(Stdio::inherit())
2294        .output()
2295        .map_err(|e| anyhow::format_err!("{}", e))?;
2296    if !exit.status.success() {
2297        std::process::exit(exit.status.code().unwrap_or(1));
2298    }
2299
2300    // Generate IDL
2301    if !no_idl {
2302        let idl = generate_idl(cfg, skip_lint, no_docs, &cargo_args)?;
2303
2304        // JSON out path.
2305        let out = match idl_out {
2306            None => PathBuf::from(".")
2307                .join(&idl.metadata.name)
2308                .with_extension("json"),
2309            Some(o) => PathBuf::from(&o.join(&idl.metadata.name).with_extension("json")),
2310        };
2311        // TS out path.
2312        let ts_out = match idl_ts_out {
2313            None => PathBuf::from(".")
2314                .join(&idl.metadata.name)
2315                .with_extension("ts"),
2316            Some(o) => PathBuf::from(&o.join(&idl.metadata.name).with_extension("ts")),
2317        };
2318
2319        // Write out the JSON file.
2320        write_idl(&idl, OutFile::File(out))?;
2321        // Write out the TypeScript type.
2322        fs::write(&ts_out, idl_ts(&idl)?)?;
2323
2324        // Copy out the TypeScript type.
2325        let cfg_parent = cfg.path().parent().expect("Invalid Anchor.toml");
2326        if !&cfg.workspace.types.is_empty() {
2327            fs::copy(
2328                &ts_out,
2329                cfg_parent
2330                    .join(&cfg.workspace.types)
2331                    .join(&idl.metadata.name)
2332                    .with_extension("ts"),
2333            )?;
2334        }
2335    }
2336
2337    Ok(())
2338}
2339
2340/// Subcommand and any arguments to be passed to cargo
2341const BUILD_SUBCOMMAND: &[&str] = &["build-sbf", "--tools-version", "v1.52"];
2342
2343pub fn verify(
2344    program_id: Pubkey,
2345    repo_url: Option<String>,
2346    commit_hash: Option<String>,
2347    current_dir: bool,
2348    program_name: Option<String>,
2349    args: Vec<String>,
2350) -> Result<()> {
2351    let mut command_args = Vec::new();
2352
2353    match (current_dir, repo_url) {
2354        (true, _) => {
2355            let current_path = std::env::current_dir()?
2356                .to_str()
2357                .ok_or_else(|| anyhow!("Invalid current directory path"))?
2358                .to_owned();
2359            command_args.push(current_path);
2360            command_args.push("--current-dir".into());
2361        }
2362        (false, Some(url)) => {
2363            command_args.push(url);
2364        }
2365        (false, None) => {
2366            return Err(anyhow!(
2367                "You must provide either --repo-url or --current-dir"
2368            ));
2369        }
2370    }
2371
2372    if let Some(commit) = commit_hash {
2373        command_args.push("--commit-hash".into());
2374        command_args.push(commit);
2375    }
2376
2377    if let Some(name) = program_name {
2378        command_args.push("--library-name".into());
2379        command_args.push(name);
2380    }
2381
2382    command_args.push("--program-id".into());
2383    command_args.push(program_id.to_string());
2384
2385    command_args.extend(args);
2386
2387    println!("Verifying program {program_id}");
2388    let verify_path = AVM_HOME.join("bin").join("solana-verify");
2389    if !verify_path.exists() {
2390        install_with_avm(env!("CARGO_PKG_VERSION"), true)
2391            .context("installing Anchor with solana-verify")?;
2392    }
2393
2394    let status = std::process::Command::new(verify_path)
2395        .arg("verify-from-repo")
2396        .args(&command_args)
2397        .stdout(std::process::Stdio::inherit())
2398        .stderr(std::process::Stdio::inherit())
2399        .status()
2400        .with_context(|| "Failed to run `solana-verify`")?;
2401
2402    if !status.success() {
2403        return Err(anyhow!("Failed to verify program"));
2404    }
2405
2406    Ok(())
2407}
2408
2409fn cd_member(cfg_override: &ConfigOverride, program_name: &str) -> Result<()> {
2410    // Change directories to the given `program_name`, using either Anchor or Cargo workspace
2411    let programs = program::get_programs_from_workspace(cfg_override, None)?;
2412
2413    for program in programs {
2414        let cargo_toml = program.path.join("Cargo.toml");
2415        if !cargo_toml.exists() {
2416            return Err(anyhow!(
2417                "Did not find Cargo.toml at the path: {}",
2418                program.path.display()
2419            ));
2420        }
2421
2422        let manifest = Manifest::from_path(&cargo_toml)?;
2423        let pkg_name = manifest.package().name();
2424        if program_name == pkg_name || program_name == program.lib_name {
2425            std::env::set_current_dir(&program.path)?;
2426            return Ok(());
2427        }
2428    }
2429
2430    Err(anyhow!("{} is not part of the workspace", program_name,))
2431}
2432
2433fn idl(cfg_override: &ConfigOverride, subcmd: IdlCommand) -> Result<()> {
2434    match subcmd {
2435        IdlCommand::Init {
2436            program_id,
2437            filepath,
2438            priority_fee,
2439            non_canonical,
2440            #[cfg(feature = "idl-localnet-testing")]
2441            allow_localnet,
2442        } => {
2443            #[cfg(feature = "idl-localnet-testing")]
2444            let allow_localnet = allow_localnet;
2445            #[cfg(not(feature = "idl-localnet-testing"))]
2446            let allow_localnet = false;
2447            idl_init(
2448                program_id,
2449                cfg_override,
2450                filepath,
2451                priority_fee,
2452                non_canonical,
2453                allow_localnet,
2454            )
2455        }
2456        IdlCommand::Upgrade {
2457            program_id,
2458            filepath,
2459            priority_fee,
2460            #[cfg(feature = "idl-localnet-testing")]
2461            allow_localnet,
2462        } => {
2463            #[cfg(feature = "idl-localnet-testing")]
2464            let allow_localnet = allow_localnet;
2465            #[cfg(not(feature = "idl-localnet-testing"))]
2466            let allow_localnet = false;
2467            idl_upgrade(
2468                program_id,
2469                cfg_override,
2470                filepath,
2471                priority_fee,
2472                allow_localnet,
2473            )
2474        }
2475        IdlCommand::Build {
2476            program_name,
2477            out,
2478            out_ts,
2479            no_docs,
2480            skip_lint,
2481            cargo_args,
2482        } => idl_build(
2483            cfg_override,
2484            program_name,
2485            out,
2486            out_ts,
2487            no_docs,
2488            skip_lint,
2489            cargo_args,
2490        ),
2491        IdlCommand::Fetch {
2492            program_id: address,
2493            out,
2494            non_canonical,
2495        } => idl_fetch(cfg_override, address, out, non_canonical),
2496        IdlCommand::Convert {
2497            path,
2498            out,
2499            program_id,
2500        } => idl_convert(path, out, program_id),
2501        IdlCommand::Type { path, out } => idl_type(path, out),
2502        IdlCommand::Close {
2503            program_id,
2504            seed,
2505            priority_fee,
2506        } => idl_close_metadata(cfg_override, program_id, seed, priority_fee),
2507        IdlCommand::CreateBuffer {
2508            filepath,
2509            priority_fee,
2510        } => idl_create_buffer(cfg_override, filepath, priority_fee),
2511        IdlCommand::SetBufferAuthority {
2512            buffer,
2513            new_authority,
2514            priority_fee,
2515        } => idl_set_buffer_authority(cfg_override, buffer, new_authority, priority_fee),
2516        IdlCommand::WriteBuffer {
2517            program_id,
2518            buffer,
2519            seed,
2520            close_buffer,
2521            priority_fee,
2522        } => idl_write_buffer_metadata(
2523            cfg_override,
2524            program_id,
2525            buffer,
2526            seed,
2527            close_buffer,
2528            priority_fee,
2529        ),
2530    }
2531}
2532
2533fn idl_init(
2534    program_id: Option<Pubkey>,
2535    cfg_override: &ConfigOverride,
2536    idl_filepath: String,
2537    priority_fee: Option<u64>,
2538    non_canonical: bool,
2539    allow_localnet: bool,
2540) -> Result<()> {
2541    // Get cluster URL and wallet path from Anchor config
2542    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2543
2544    let is_localnet = cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1");
2545    if is_localnet && !allow_localnet {
2546        #[cfg(feature = "idl-localnet-testing")]
2547        println!(
2548            "Skipping IDL initialization on localnet. To deploy on localnet, use --allow-localnet"
2549        );
2550        #[cfg(not(feature = "idl-localnet-testing"))]
2551        println!("Skipping IDL initialization on localnet");
2552        return Ok(());
2553    }
2554
2555    let program_id = match program_id {
2556        Some(id) => id.to_string(),
2557        _ => {
2558            let idl = fs::read(&idl_filepath)?;
2559            let idl = convert_idl(&idl)?;
2560            idl.address
2561        }
2562    };
2563
2564    let command = metadata::IdlCommand::funded(
2565        cluster_url,
2566        wallet_path,
2567        priority_fee,
2568        metadata::FundedIdlSubcommand::Write {
2569            program_id,
2570            idl_filepath,
2571            non_canonical,
2572        },
2573    );
2574
2575    if !command.status()?.success() {
2576        return Err(anyhow!("Failed to initialize IDL"));
2577    }
2578
2579    println!("IDL initialized.");
2580    Ok(())
2581}
2582
2583// Currently identical to `idl_init`, other than not accepting `non_canonical`
2584fn idl_upgrade(
2585    program_id: Option<Pubkey>,
2586    cfg_override: &ConfigOverride,
2587    idl_filepath: String,
2588    priority_fee: Option<u64>,
2589    allow_localnet: bool,
2590) -> Result<()> {
2591    // Get cluster URL and wallet path from Anchor config
2592    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2593
2594    let is_localnet = cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1");
2595    if is_localnet && !allow_localnet {
2596        #[cfg(feature = "idl-localnet-testing")]
2597        println!("Skipping IDL upgrade on localnet. To deploy on localnet, use --allow-localnet");
2598        #[cfg(not(feature = "idl-localnet-testing"))]
2599        println!("Skipping IDL upgrade on localnet");
2600        return Ok(());
2601    }
2602
2603    let program_id = match program_id {
2604        Some(id) => id.to_string(),
2605        _ => {
2606            let idl = fs::read(&idl_filepath)?;
2607            let idl = convert_idl(&idl)?;
2608            idl.address
2609        }
2610    };
2611
2612    let command = metadata::IdlCommand::funded(
2613        cluster_url,
2614        wallet_path,
2615        priority_fee,
2616        metadata::FundedIdlSubcommand::Write {
2617            program_id,
2618            idl_filepath,
2619            non_canonical: false,
2620        },
2621    );
2622    if !command.status()?.success() {
2623        return Err(anyhow!("Failed to initialize IDL"));
2624    }
2625
2626    println!("IDL upgraded.");
2627    Ok(())
2628}
2629
2630fn idl_build(
2631    cfg_override: &ConfigOverride,
2632    program_name: Option<String>,
2633    out: Option<String>,
2634    out_ts: Option<String>,
2635    no_docs: bool,
2636    skip_lint: bool,
2637    cargo_args: Vec<String>,
2638) -> Result<()> {
2639    let cfg = Config::discover(cfg_override)?
2640        .ok_or_else(|| anyhow!("The 'anchor idl build' command requires an Anchor workspace."))?;
2641    let current_dir = std::env::current_dir()?;
2642    let program_path = match program_name {
2643        Some(name) => cfg.get_program(&name)?.path,
2644        None => {
2645            let programs = cfg.read_all_programs()?;
2646            if programs.len() == 1 {
2647                programs.into_iter().next().unwrap().path
2648            } else {
2649                programs
2650                    .into_iter()
2651                    .find(|program| program.path == current_dir)
2652                    .ok_or_else(|| anyhow!("Not in a program directory"))?
2653                    .path
2654            }
2655        }
2656    };
2657    std::env::set_current_dir(program_path)?;
2658    let idl = generate_idl(&cfg, skip_lint, no_docs, &cargo_args)?;
2659    std::env::set_current_dir(current_dir)?;
2660
2661    let out = match out {
2662        Some(path) => OutFile::File(PathBuf::from(path)),
2663        None => OutFile::Stdout,
2664    };
2665    write_idl(&idl, out)?;
2666
2667    if let Some(path) = out_ts {
2668        fs::write(path, idl_ts(&idl)?)?;
2669    }
2670
2671    Ok(())
2672}
2673
2674/// Generate IDL with method decided by whether manifest file has `idl-build` feature or not.
2675fn generate_idl(
2676    cfg: &WithPath<Config>,
2677    skip_lint: bool,
2678    no_docs: bool,
2679    cargo_args: &[String],
2680) -> Result<Idl> {
2681    check_idl_build_feature()?;
2682
2683    anchor_lang_idl::build::IdlBuilder::new()
2684        .resolution(cfg.features.resolution)
2685        .skip_lint(cfg.features.skip_lint || skip_lint)
2686        .no_docs(no_docs)
2687        .cargo_args(cargo_args.into())
2688        .build()
2689}
2690
2691fn idl_fetch(
2692    cfg_override: &ConfigOverride,
2693    address: Pubkey,
2694    out: Option<String>,
2695    non_canonical: bool,
2696) -> Result<()> {
2697    let (cluster_url, _) = get_cluster_and_wallet(cfg_override)?;
2698    let command = metadata::IdlCommand::unfunded(
2699        cluster_url,
2700        metadata::UnfundedIdlSubcommand::Fetch {
2701            program_id: address.to_string(),
2702            out,
2703            non_canonical,
2704        },
2705    );
2706
2707    if !command.status()?.success() {
2708        return Err(anyhow!("Failed to fetch IDL"));
2709    }
2710    Ok(())
2711}
2712
2713fn idl_convert(path: String, out: Option<String>, program_id: Option<Pubkey>) -> Result<()> {
2714    let idl = fs::read(path)?;
2715
2716    // Set the `metadata.address` field based on the given `program_id`
2717    let idl = match program_id {
2718        Some(program_id) => {
2719            let mut idl = serde_json::from_slice::<serde_json::Value>(&idl)?;
2720            idl.as_object_mut()
2721                .ok_or_else(|| anyhow!("IDL must be an object"))?
2722                .insert(
2723                    "metadata".into(),
2724                    serde_json::json!({ "address": program_id.to_string() }),
2725                );
2726            serde_json::to_vec(&idl)?
2727        }
2728        _ => idl,
2729    };
2730
2731    let idl = convert_idl(&idl)?;
2732    let out = match out {
2733        None => OutFile::Stdout,
2734        Some(out) => OutFile::File(PathBuf::from(out)),
2735    };
2736    write_idl(&idl, out)
2737}
2738
2739fn idl_type(path: String, out: Option<String>) -> Result<()> {
2740    let idl = fs::read(path)?;
2741    let idl = convert_idl(&idl)?;
2742    let types = idl_ts(&idl)?;
2743    match out {
2744        Some(out) => fs::write(out, types)?,
2745        _ => println!("{types}"),
2746    };
2747    Ok(())
2748}
2749
2750fn idl_close_metadata(
2751    cfg_override: &ConfigOverride,
2752    program_id: Pubkey,
2753    seed: String,
2754    priority_fee: Option<u64>,
2755) -> Result<()> {
2756    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2757    let command = metadata::IdlCommand::funded(
2758        cluster_url,
2759        wallet_path,
2760        priority_fee,
2761        metadata::FundedIdlSubcommand::Close {
2762            program_id: program_id.to_string(),
2763            seed,
2764        },
2765    );
2766
2767    if !command.status()?.success() {
2768        return Err(anyhow!("Failed to close metadata account"));
2769    }
2770
2771    println!("Metadata account closed successfully.");
2772    Ok(())
2773}
2774
2775fn idl_create_buffer(
2776    cfg_override: &ConfigOverride,
2777    filepath: String,
2778    priority_fee: Option<u64>,
2779) -> Result<()> {
2780    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2781    let command = metadata::IdlCommand::funded(
2782        cluster_url,
2783        wallet_path,
2784        priority_fee,
2785        metadata::FundedIdlSubcommand::CreateBuffer { filepath },
2786    );
2787
2788    if !command.status()?.success() {
2789        return Err(anyhow!("Failed to create buffer"));
2790    }
2791
2792    println!("Buffer created successfully.");
2793    Ok(())
2794}
2795
2796fn idl_set_buffer_authority(
2797    cfg_override: &ConfigOverride,
2798    buffer: Pubkey,
2799    new_authority: Pubkey,
2800    priority_fee: Option<u64>,
2801) -> Result<()> {
2802    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2803    let command = metadata::IdlCommand::funded(
2804        cluster_url,
2805        wallet_path,
2806        priority_fee,
2807        metadata::FundedIdlSubcommand::SetBufferAuthority {
2808            buffer: buffer.to_string(),
2809            new_authority: new_authority.to_string(),
2810        },
2811    );
2812
2813    if !command.status()?.success() {
2814        return Err(anyhow!("Failed to set buffer authority"));
2815    }
2816
2817    println!("Buffer authority set successfully.");
2818    Ok(())
2819}
2820
2821fn idl_write_buffer_metadata(
2822    cfg_override: &ConfigOverride,
2823    program_id: Pubkey,
2824    buffer: Pubkey,
2825    seed: String,
2826    close_buffer: bool,
2827    priority_fee: Option<u64>,
2828) -> Result<()> {
2829    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
2830    let command = metadata::IdlCommand::funded(
2831        cluster_url,
2832        wallet_path,
2833        priority_fee,
2834        metadata::FundedIdlSubcommand::WriteBuffer {
2835            program_id: program_id.to_string(),
2836            buffer: buffer.to_string(),
2837            seed,
2838            close_buffer,
2839        },
2840    );
2841
2842    if !command.status()?.success() {
2843        return Err(anyhow!("Failed to write metadata using buffer"));
2844    }
2845
2846    println!("Metadata written successfully using buffer.");
2847    Ok(())
2848}
2849
2850fn idl_ts(idl: &Idl) -> Result<String> {
2851    let idl_name = &idl.metadata.name;
2852    let type_name = idl_name.to_pascal_case();
2853    let idl = serde_json::to_string(idl)?;
2854
2855    // Convert every field of the IDL to camelCase
2856    let camel_idl = Regex::new(r#""\w+":"([\w\d]+)""#)?
2857        .captures_iter(&idl)
2858        .fold(idl.clone(), |acc, cur| {
2859            let name = cur.get(1).unwrap().as_str();
2860
2861            // Do not modify pubkeys
2862            if Pubkey::try_from(name).is_ok() {
2863                return acc;
2864            }
2865
2866            let camel_name = name.to_lower_camel_case();
2867            acc.replace(&format!(r#""{name}""#), &format!(r#""{camel_name}""#))
2868        });
2869
2870    // Pretty format
2871    let camel_idl = serde_json::to_string_pretty(&serde_json::from_str::<Idl>(&camel_idl)?)?;
2872
2873    Ok(format!(
2874        r#"/**
2875 * Program IDL in camelCase format in order to be used in JS/TS.
2876 *
2877 * Note that this is only a type helper and is not the actual IDL. The original
2878 * IDL can be found at `target/idl/{idl_name}.json`.
2879 */
2880export type {type_name} = {camel_idl};
2881"#
2882    ))
2883}
2884
2885fn write_idl(idl: &Idl, out: OutFile) -> Result<()> {
2886    let idl_json = serde_json::to_string_pretty(idl)?;
2887    match out {
2888        OutFile::Stdout => println!("{idl_json}"),
2889        OutFile::File(out) => fs::write(out, idl_json)?,
2890    };
2891
2892    Ok(())
2893}
2894fn account(
2895    cfg_override: &ConfigOverride,
2896    account_type: String,
2897    address: Pubkey,
2898    idl_filepath: Option<String>,
2899) -> Result<()> {
2900    let (program_name, account_type_name) = account_type
2901        .split_once('.') // Split at first occurrence of dot
2902        .and_then(|(x, y)| y.find('.').map_or_else(|| Some((x, y)), |_| None)) // ensures no dots in second substring
2903        .ok_or_else(|| {
2904            anyhow!(
2905                "Please enter the account struct in the following format: <program_name>.<Account>",
2906            )
2907        })?;
2908
2909    let idl = idl_filepath.map_or_else(
2910        || {
2911            Config::discover(cfg_override)?
2912                .ok_or_else(|| {
2913                    anyhow!(
2914                        "The 'anchor account' command requires an Anchor workspace with \
2915                         Anchor.toml for IDL type generation."
2916                    )
2917                })?
2918                .read_all_programs()
2919                .expect("Workspace must contain atleast one program.")
2920                .into_iter()
2921                .find(|p| p.lib_name == *program_name)
2922                .ok_or_else(|| anyhow!("Program {program_name} not found in workspace."))
2923                .map(|p| p.idl)?
2924                .ok_or_else(|| {
2925                    anyhow!(
2926                        "IDL not found. Please build the program atleast once to generate the IDL."
2927                    )
2928                })
2929        },
2930        |idl_path| {
2931            let idl = fs::read(idl_path)?;
2932            let idl = convert_idl(&idl)?;
2933            if idl.metadata.name != program_name {
2934                return Err(anyhow!("IDL does not match program {program_name}."));
2935            }
2936
2937            Ok(idl)
2938        },
2939    )?;
2940
2941    let cluster = match &cfg_override.cluster {
2942        Some(cluster) => cluster.clone(),
2943        None => Config::discover(cfg_override)?
2944            .map(|cfg| cfg.provider.cluster.clone())
2945            .unwrap_or(Cluster::Localnet),
2946    };
2947
2948    let data = create_client(cluster.url()).get_account_data(&address)?;
2949    let disc_len = idl
2950        .accounts
2951        .iter()
2952        .find(|acc| acc.name == account_type_name)
2953        .map(|acc| acc.discriminator.len())
2954        .ok_or_else(|| anyhow!("Account `{account_type_name}` not found in IDL"))?;
2955    let mut data_view = &data[disc_len..];
2956
2957    let deserialized_json =
2958        deserialize_idl_defined_type_to_json(&idl, account_type_name, &mut data_view)?;
2959
2960    println!(
2961        "{}",
2962        serde_json::to_string_pretty(&deserialized_json).unwrap()
2963    );
2964
2965    Ok(())
2966}
2967
2968// Deserializes user defined IDL types by munching the account data(recursively).
2969fn deserialize_idl_defined_type_to_json(
2970    idl: &Idl,
2971    defined_type_name: &str,
2972    data: &mut &[u8],
2973) -> Result<JsonValue, anyhow::Error> {
2974    let defined_type = &idl
2975        .accounts
2976        .iter()
2977        .find(|acc| acc.name == defined_type_name)
2978        .and_then(|acc| idl.types.iter().find(|ty| ty.name == acc.name))
2979        .or_else(|| idl.types.iter().find(|ty| ty.name == defined_type_name))
2980        .ok_or_else(|| anyhow!("Type `{}` not found in IDL.", defined_type_name))?
2981        .ty;
2982
2983    let mut deserialized_fields = Map::new();
2984
2985    match defined_type {
2986        IdlTypeDefTy::Struct { fields } => {
2987            if let Some(fields) = fields {
2988                match fields {
2989                    IdlDefinedFields::Named(fields) => {
2990                        for field in fields {
2991                            deserialized_fields.insert(
2992                                field.name.clone(),
2993                                deserialize_idl_type_to_json(&field.ty, data, idl)?,
2994                            );
2995                        }
2996                    }
2997                    IdlDefinedFields::Tuple(fields) => {
2998                        let mut values = Vec::new();
2999                        for field in fields {
3000                            values.push(deserialize_idl_type_to_json(field, data, idl)?);
3001                        }
3002                        deserialized_fields
3003                            .insert(defined_type_name.to_owned(), JsonValue::Array(values));
3004                    }
3005                }
3006            }
3007        }
3008        IdlTypeDefTy::Enum { variants } => {
3009            let repr = <u8 as AnchorDeserialize>::deserialize(data)?;
3010
3011            let variant = variants
3012                .get(repr as usize)
3013                .ok_or_else(|| anyhow!("Error while deserializing enum variant {repr}"))?;
3014
3015            let mut value = json!({});
3016
3017            if let Some(enum_field) = &variant.fields {
3018                match enum_field {
3019                    IdlDefinedFields::Named(fields) => {
3020                        let mut values = Map::new();
3021                        for field in fields {
3022                            values.insert(
3023                                field.name.clone(),
3024                                deserialize_idl_type_to_json(&field.ty, data, idl)?,
3025                            );
3026                        }
3027                        value = JsonValue::Object(values);
3028                    }
3029                    IdlDefinedFields::Tuple(fields) => {
3030                        let mut values = Vec::new();
3031                        for field in fields {
3032                            values.push(deserialize_idl_type_to_json(field, data, idl)?);
3033                        }
3034                        value = JsonValue::Array(values);
3035                    }
3036                }
3037            }
3038
3039            deserialized_fields.insert(variant.name.clone(), value);
3040        }
3041        IdlTypeDefTy::Type { alias } => {
3042            return deserialize_idl_type_to_json(alias, data, idl);
3043        }
3044    }
3045
3046    Ok(JsonValue::Object(deserialized_fields))
3047}
3048
3049// Deserializes a primitive type using AnchorDeserialize
3050fn deserialize_idl_type_to_json(
3051    idl_type: &IdlType,
3052    data: &mut &[u8],
3053    parent_idl: &Idl,
3054) -> Result<JsonValue, anyhow::Error> {
3055    if data.is_empty() {
3056        return Err(anyhow::anyhow!("Unable to parse from empty bytes"));
3057    }
3058
3059    Ok(match idl_type {
3060        IdlType::Bool => json!(<bool as AnchorDeserialize>::deserialize(data)?),
3061        IdlType::U8 => {
3062            json!(<u8 as AnchorDeserialize>::deserialize(data)?)
3063        }
3064        IdlType::I8 => {
3065            json!(<i8 as AnchorDeserialize>::deserialize(data)?)
3066        }
3067        IdlType::U16 => {
3068            json!(<u16 as AnchorDeserialize>::deserialize(data)?)
3069        }
3070        IdlType::I16 => {
3071            json!(<i16 as AnchorDeserialize>::deserialize(data)?)
3072        }
3073        IdlType::U32 => {
3074            json!(<u32 as AnchorDeserialize>::deserialize(data)?)
3075        }
3076        IdlType::I32 => {
3077            json!(<i32 as AnchorDeserialize>::deserialize(data)?)
3078        }
3079        IdlType::F32 => json!(<f32 as AnchorDeserialize>::deserialize(data)?),
3080        IdlType::U64 => {
3081            json!(<u64 as AnchorDeserialize>::deserialize(data)?)
3082        }
3083        IdlType::I64 => {
3084            json!(<i64 as AnchorDeserialize>::deserialize(data)?)
3085        }
3086        IdlType::F64 => json!(<f64 as AnchorDeserialize>::deserialize(data)?),
3087        IdlType::U128 => {
3088            json!(<u128 as AnchorDeserialize>::deserialize(data)?)
3089        }
3090        IdlType::I128 => {
3091            json!(<i128 as AnchorDeserialize>::deserialize(data)?)
3092        }
3093        IdlType::U256 => todo!("Upon completion of u256 IDL standard"),
3094        IdlType::I256 => todo!("Upon completion of i256 IDL standard"),
3095        IdlType::Bytes => JsonValue::Array(
3096            <Vec<u8> as AnchorDeserialize>::deserialize(data)?
3097                .iter()
3098                .map(|i| json!(*i))
3099                .collect(),
3100        ),
3101        IdlType::String => json!(<String as AnchorDeserialize>::deserialize(data)?),
3102        IdlType::Pubkey => {
3103            json!(<Pubkey as AnchorDeserialize>::deserialize(data)?.to_string())
3104        }
3105        IdlType::Array(ty, size) => match size {
3106            IdlArrayLen::Value(size) => {
3107                let mut array_data: Vec<JsonValue> = Vec::with_capacity(*size);
3108
3109                for _ in 0..*size {
3110                    array_data.push(deserialize_idl_type_to_json(ty, data, parent_idl)?);
3111                }
3112
3113                JsonValue::Array(array_data)
3114            }
3115            // TODO:
3116            IdlArrayLen::Generic(_) => unimplemented!("Generic array length is not yet supported"),
3117        },
3118        IdlType::Option(ty) => {
3119            let is_present = <u8 as AnchorDeserialize>::deserialize(data)?;
3120
3121            if is_present == 0 {
3122                JsonValue::String("None".to_string())
3123            } else {
3124                deserialize_idl_type_to_json(ty, data, parent_idl)?
3125            }
3126        }
3127        IdlType::Vec(ty) => {
3128            let size: usize = <u32 as AnchorDeserialize>::deserialize(data)?
3129                .try_into()
3130                .unwrap();
3131
3132            let mut vec_data: Vec<JsonValue> = Vec::with_capacity(size);
3133
3134            for _ in 0..size {
3135                vec_data.push(deserialize_idl_type_to_json(ty, data, parent_idl)?);
3136            }
3137
3138            JsonValue::Array(vec_data)
3139        }
3140        IdlType::Defined {
3141            name,
3142            generics: _generics,
3143        } => {
3144            // TODO: Generics
3145            deserialize_idl_defined_type_to_json(parent_idl, name, data)?
3146        }
3147        IdlType::Generic(generic) => json!(generic),
3148        _ => unimplemented!("{idl_type:?}"),
3149    })
3150}
3151
3152enum OutFile {
3153    Stdout,
3154    File(PathBuf),
3155}
3156
3157// Builds, deploys, and tests all workspace programs in a single command.
3158#[allow(clippy::too_many_arguments)]
3159fn test(
3160    cfg_override: &ConfigOverride,
3161    program_name: Option<String>,
3162    skip_deploy: bool,
3163    skip_local_validator: bool,
3164    skip_build: bool,
3165    skip_lint: bool,
3166    no_idl: bool,
3167    detach: bool,
3168    tests_to_run: Vec<String>,
3169    validator_type: ValidatorType,
3170    extra_args: Vec<String>,
3171    env_vars: Vec<String>,
3172    cargo_args: Vec<String>,
3173) -> Result<()> {
3174    let test_paths = tests_to_run
3175        .iter()
3176        .map(|path| {
3177            PathBuf::from(path)
3178                .canonicalize()
3179                .map_err(|_| anyhow!("Wrong path {}", path))
3180        })
3181        .collect::<Result<Vec<_>, _>>()?;
3182
3183    with_workspace(cfg_override, |cfg| -> Result<()> {
3184        // Set validator type based on CLI choice
3185        cfg.validator = Some(validator_type);
3186
3187        // Build if needed.
3188        if !skip_build {
3189            build(
3190                cfg_override,
3191                no_idl,
3192                None,
3193                None,
3194                false,
3195                skip_lint,
3196                true,
3197                program_name.clone(),
3198                None,
3199                None,
3200                BootstrapMode::None,
3201                None,
3202                None,
3203                env_vars,
3204                cargo_args,
3205                false,
3206            )?;
3207        }
3208
3209        let root = cfg.path().parent().unwrap().to_owned();
3210        cfg.add_test_config(root, test_paths)?;
3211
3212        // Run the deploy against the cluster in two cases:
3213        //
3214        // 1. The cluster is not localnet.
3215        // 2. The cluster is localnet, but we're not booting a local validator.
3216        //
3217        // In either case, skip the deploy if the user specifies.
3218        let is_localnet = cfg.provider.cluster == Cluster::Localnet;
3219        if (!is_localnet || skip_local_validator) && !skip_deploy {
3220            deploy(cfg_override, None, None, false, true, vec![])?;
3221        }
3222
3223        cfg.run_hooks(HookType::PreTest)?;
3224
3225        let mut is_first_suite = true;
3226        if let Some(test_script) = cfg.scripts.get_mut("test") {
3227            is_first_suite = false;
3228
3229            match program_name {
3230                Some(program_name) => {
3231                    if let Some((from, to)) = Regex::new("\\s(tests/\\S+\\.(js|ts))")
3232                        .unwrap()
3233                        .captures_iter(&test_script.clone())
3234                        .last()
3235                        .and_then(|c| c.get(1).and_then(|mtch| c.get(2).map(|ext| (mtch, ext))))
3236                        .map(|(mtch, ext)| {
3237                            (
3238                                mtch.as_str(),
3239                                format!("tests/{program_name}.{}", ext.as_str()),
3240                            )
3241                        })
3242                    {
3243                        println!("\nRunning tests of program `{program_name}`!");
3244                        // Replace the last path to the program name's path
3245                        *test_script = test_script.replace(from, &to);
3246                    }
3247                }
3248                _ => println!(
3249                    "\nFound a 'test' script in the Anchor.toml. Running it as a test suite!"
3250                ),
3251            }
3252
3253            run_test_suite(
3254                cfg,
3255                cfg.path(),
3256                is_localnet,
3257                skip_local_validator,
3258                skip_deploy,
3259                detach,
3260                validator_type,
3261                &cfg.test_validator,
3262                &cfg.scripts,
3263                &extra_args,
3264                &cfg.surfpool_config,
3265            )?;
3266        }
3267        if let Some(test_config) = &cfg.test_config {
3268            for test_suite in test_config.iter() {
3269                if !is_first_suite {
3270                    std::thread::sleep(std::time::Duration::from_millis(
3271                        test_suite
3272                            .1
3273                            .test
3274                            .as_ref()
3275                            .map(|val| val.shutdown_wait)
3276                            .unwrap_or(SHUTDOWN_WAIT) as u64,
3277                    ));
3278                } else {
3279                    is_first_suite = false;
3280                }
3281
3282                run_test_suite(
3283                    cfg,
3284                    test_suite.0,
3285                    is_localnet,
3286                    skip_local_validator,
3287                    skip_deploy,
3288                    detach,
3289                    validator_type,
3290                    &test_suite.1.test,
3291                    &test_suite.1.scripts,
3292                    &extra_args,
3293                    &cfg.surfpool_config,
3294                )?;
3295            }
3296        }
3297        cfg.run_hooks(HookType::PostTest)?;
3298        Ok(())
3299    })?
3300}
3301
3302#[allow(clippy::too_many_arguments)]
3303fn run_test_suite(
3304    cfg: &WithPath<Config>,
3305    test_suite_path: impl AsRef<Path>,
3306    is_localnet: bool,
3307    skip_local_validator: bool,
3308    skip_deploy: bool,
3309    detach: bool,
3310    validator_type: ValidatorType,
3311    test_validator: &Option<TestValidator>,
3312    scripts: &ScriptsConfig,
3313    extra_args: &[String],
3314    surfpool_config: &Option<SurfpoolConfig>,
3315) -> Result<()> {
3316    println!("\nRunning test suite: {:#?}\n", test_suite_path.as_ref());
3317    let mut validator_handle = None;
3318    if is_localnet && !skip_local_validator {
3319        match validator_type {
3320            ValidatorType::Surfpool => {
3321                let full_simnet_mode = false;
3322                let flags = Some(surfpool_flags(
3323                    cfg,
3324                    surfpool_config,
3325                    full_simnet_mode,
3326                    skip_deploy,
3327                    Some(test_suite_path.as_ref()),
3328                )?);
3329                validator_handle = Some(start_surfpool_validator(
3330                    flags,
3331                    surfpool_config,
3332                    full_simnet_mode,
3333                )?);
3334            }
3335            ValidatorType::Legacy => {
3336                let flags = match skip_deploy {
3337                    true => None,
3338                    false => Some(validator_flags(cfg, test_validator)?),
3339                };
3340                validator_handle = Some(start_solana_test_validator(
3341                    cfg,
3342                    test_validator,
3343                    flags,
3344                    true,
3345                )?);
3346            }
3347        }
3348    }
3349    let url = cluster_url(cfg, test_validator, surfpool_config);
3350
3351    let node_options = format!(
3352        "{} {}",
3353        match std::env::var_os("NODE_OPTIONS") {
3354            Some(value) => value
3355                .into_string()
3356                .map_err(std::env::VarError::NotUnicode)?,
3357            None => "".to_owned(),
3358        },
3359        get_node_dns_option()?,
3360    );
3361
3362    // Setup log reader - kept alive until end of scope
3363    let log_streams = match stream_logs(cfg, &url) {
3364        Ok(streams) => Some(streams),
3365        Err(e) => {
3366            eprintln!("Warning: Failed to setup program log streaming: {:#}", e);
3367            eprintln!("Program logs will still be visible in the test output.");
3368            None
3369        }
3370    };
3371
3372    // Run the tests.
3373    let test_result = {
3374        let cmd = scripts
3375            .get("test")
3376            .expect("Not able to find script for `test`")
3377            .clone();
3378        let script_args = format!("{cmd} {}", extra_args.join(" "));
3379
3380        std::process::Command::new("bash")
3381            .arg("-c")
3382            .arg(script_args)
3383            .env("ANCHOR_PROVIDER_URL", url)
3384            .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
3385            .env("NODE_OPTIONS", node_options)
3386            .stdout(Stdio::inherit())
3387            .stderr(Stdio::inherit())
3388            .output()
3389            .map_err(anyhow::Error::from)
3390            .context(cmd)
3391    };
3392
3393    // Keep validator running if needed.
3394    if test_result.is_ok() && detach {
3395        println!("Local validator still running. Press Ctrl + C quit.");
3396        std::io::stdin().lock().lines().next().unwrap().unwrap();
3397    }
3398
3399    // Check all errors and shut down.
3400    if let Some(mut child) = validator_handle {
3401        if let Err(err) = child.kill() {
3402            println!("Failed to kill subprocess {}: {}", child.id(), err);
3403        }
3404    }
3405
3406    // Explicitly shutdown log streams - closes WebSocket subscriptions
3407    if let Some(log_streams) = log_streams {
3408        for handle in log_streams {
3409            handle.shutdown();
3410        }
3411    }
3412
3413    // Must exist *after* shutting down the validator and log streams.
3414    match test_result {
3415        Ok(exit) => {
3416            if !exit.status.success() {
3417                std::process::exit(exit.status.code().unwrap());
3418            }
3419        }
3420        Err(err) => {
3421            println!("Failed to run test: {err:#}");
3422            return Err(err);
3423        }
3424    }
3425
3426    Ok(())
3427}
3428
3429// Returns the solana-test-validator flags. This will embed the workspace
3430// programs in the genesis block so we don't have to deploy every time. It also
3431// allows control of other solana-test-validator features.
3432fn validator_flags(
3433    cfg: &WithPath<Config>,
3434    test_validator: &Option<TestValidator>,
3435) -> Result<Vec<String>> {
3436    let programs = cfg.programs.get(&Cluster::Localnet);
3437
3438    let test_upgradeable_program = test_validator
3439        .as_ref()
3440        .map(|test_validator| test_validator.upgradeable)
3441        .unwrap_or(false);
3442
3443    let mut flags = Vec::new();
3444    for mut program in cfg.read_all_programs()? {
3445        let verifiable = false;
3446        let binary_path = program.binary_path(verifiable).display().to_string();
3447        // Use the [programs.cluster] override and fallback to the keypair
3448        // files if no override is given.
3449        let address = programs
3450            .and_then(|m| m.get(&program.lib_name))
3451            .map(|deployment| Ok(deployment.address.to_string()))
3452            .unwrap_or_else(|| program.pubkey().map(|p| p.to_string()))?;
3453
3454        if test_upgradeable_program {
3455            flags.push("--upgradeable-program".to_string());
3456            flags.push(address.clone());
3457            flags.push(binary_path);
3458            flags.push(cfg.wallet_kp()?.pubkey().to_string());
3459        } else {
3460            flags.push("--bpf-program".to_string());
3461            flags.push(address.clone());
3462            flags.push(binary_path);
3463        }
3464
3465        if let Some(idl) = program.idl.as_mut() {
3466            // Add program address to the IDL.
3467            idl.address = address;
3468
3469            // Persist it.
3470            let idl_out = Path::new("target")
3471                .join("idl")
3472                .join(&idl.metadata.name)
3473                .with_extension("json");
3474            write_idl(idl, OutFile::File(idl_out))?;
3475        }
3476    }
3477
3478    if let Some(test) = test_validator.as_ref() {
3479        if let Some(genesis) = &test.genesis {
3480            for entry in genesis {
3481                let program_path = Path::new(&entry.program);
3482                if !program_path.exists() {
3483                    return Err(anyhow!(
3484                        "Program in genesis configuration does not exist at path: {}",
3485                        program_path.display()
3486                    ));
3487                }
3488                if entry.upgradeable.unwrap_or(false) {
3489                    flags.push("--upgradeable-program".to_string());
3490                    flags.push(entry.address.clone());
3491                    flags.push(entry.program.clone());
3492                    flags.push(cfg.wallet_kp()?.pubkey().to_string());
3493                } else {
3494                    flags.push("--bpf-program".to_string());
3495                    flags.push(entry.address.clone());
3496                    flags.push(entry.program.clone());
3497                }
3498            }
3499        }
3500        if let Some(validator) = &test.validator {
3501            let entries = serde_json::to_value(validator)?;
3502            for (key, value) in entries.as_object().unwrap() {
3503                if key == "ledger" {
3504                    // Ledger flag is a special case as it is passed separately to the rest of
3505                    // these validator flags.
3506                    continue;
3507                };
3508                if key == "account" {
3509                    for entry in value.as_array().unwrap() {
3510                        // Push the account flag for each array entry
3511                        flags.push("--account".to_string());
3512                        flags.push(entry["address"].as_str().unwrap().to_string());
3513                        flags.push(entry["filename"].as_str().unwrap().to_string());
3514                    }
3515                } else if key == "account_dir" {
3516                    for entry in value.as_array().unwrap() {
3517                        flags.push("--account-dir".to_string());
3518                        flags.push(entry["directory"].as_str().unwrap().to_string());
3519                    }
3520                } else if key == "clone" {
3521                    // Client for fetching accounts data
3522                    let client = if let Some(url) = entries["url"].as_str() {
3523                        create_client(url)
3524                    } else {
3525                        return Err(anyhow!(
3526                            "Validator url for Solana's JSON RPC should be provided in order to \
3527                             clone accounts from it"
3528                        ));
3529                    };
3530
3531                    let pubkeys = value
3532                        .as_array()
3533                        .unwrap()
3534                        .iter()
3535                        .map(|entry| {
3536                            let address = entry["address"].as_str().unwrap();
3537                            Pubkey::try_from(address)
3538                                .map_err(|_| anyhow!("Invalid pubkey {}", address))
3539                        })
3540                        .collect::<Result<HashSet<Pubkey>>>()?
3541                        .into_iter()
3542                        .collect::<Vec<_>>();
3543                    let accounts = client.get_multiple_accounts(&pubkeys)?;
3544
3545                    for (pubkey, account) in pubkeys.into_iter().zip(accounts) {
3546                        match account {
3547                            Some(account) => {
3548                                // Use a different flag for program accounts to fix the problem
3549                                // described in https://github.com/anza-xyz/agave/issues/522
3550                                if account.owner == bpf_loader_upgradeable::id()
3551                                    // Only programs are supported with `--clone-upgradeable-program`
3552                                    && matches!(
3553                                        account.deserialize_data::<UpgradeableLoaderState>()?,
3554                                        UpgradeableLoaderState::Program { .. }
3555                                    )
3556                                {
3557                                    flags.push("--clone-upgradeable-program".to_string());
3558                                    flags.push(pubkey.to_string());
3559                                } else {
3560                                    flags.push("--clone".to_string());
3561                                    flags.push(pubkey.to_string());
3562                                }
3563                            }
3564                            _ => return Err(anyhow!("Account {} not found", pubkey)),
3565                        }
3566                    }
3567                } else if key == "deactivate_feature" {
3568                    // Verify that the feature flags are valid pubkeys
3569                    let pubkeys_result: Result<Vec<Pubkey>, _> = value
3570                        .as_array()
3571                        .unwrap()
3572                        .iter()
3573                        .map(|entry| {
3574                            let feature_flag = entry.as_str().unwrap();
3575                            Pubkey::try_from(feature_flag).map_err(|_| {
3576                                anyhow!("Invalid pubkey (feature flag) {}", feature_flag)
3577                            })
3578                        })
3579                        .collect();
3580                    let features = pubkeys_result?;
3581                    for feature in features {
3582                        flags.push("--deactivate-feature".to_string());
3583                        flags.push(feature.to_string());
3584                    }
3585                } else {
3586                    // Remaining validator flags are non-array types
3587                    flags.push(format!("--{}", key.replace('_', "-")));
3588                    if let serde_json::Value::String(v) = value {
3589                        flags.push(v.to_string());
3590                    } else {
3591                        flags.push(value.to_string());
3592                    }
3593                }
3594            }
3595        }
3596    }
3597
3598    Ok(flags)
3599}
3600
3601// Returns Surfpool flags.
3602// This flags will be passed to the Surfpool, it allows to configure the validator.
3603fn surfpool_flags(
3604    cfg: &WithPath<Config>,
3605    surfpool_config: &Option<SurfpoolConfig>,
3606    full_simnet_mode: bool,
3607    skip_deploy: bool,
3608    test_suite_path: Option<&Path>,
3609) -> Result<Vec<String>> {
3610    let programs = cfg.programs.get(&Cluster::Localnet);
3611    let mut flags = Vec::new();
3612
3613    for mut program in cfg.read_all_programs()? {
3614        let address = programs
3615            .and_then(|m| m.get(&program.lib_name))
3616            .map(|deployment| Ok(deployment.address.to_string()))
3617            .unwrap_or_else(|| program.pubkey().map(|p| p.to_string()))?;
3618        if let Some(idl) = program.idl.as_mut() {
3619            // Creating the idl files
3620            idl.address = address;
3621            let idl_out = Path::new("target")
3622                .join("idl")
3623                .join(&idl.metadata.name)
3624                .with_extension("json");
3625            write_idl(idl, OutFile::File(idl_out))?;
3626        }
3627    }
3628
3629    if let Some(config) = &surfpool_config {
3630        if let Some(airdrop_addresses) = &config.airdrop_addresses {
3631            for address in airdrop_addresses {
3632                flags.push("--airdrop".to_string());
3633                flags.push(address.to_string());
3634            }
3635        }
3636        if let Some(datasource_rpc_url) = &config.datasource_rpc_url {
3637            flags.push("--rpc-url".to_string());
3638            flags.push(datasource_rpc_url.to_string());
3639        }
3640
3641        let host = &config.host;
3642        flags.push("--host".to_string());
3643        flags.push(host.to_string());
3644
3645        let rpc_port = &config.rpc_port;
3646        flags.push("--port".to_string());
3647        flags.push(rpc_port.to_string());
3648
3649        if let Some(ws_port) = &config.ws_port {
3650            flags.push("--ws-port".to_string());
3651            flags.push(ws_port.to_string());
3652        }
3653
3654        if let Some(manifest_file_path) = &config.manifest_file_path {
3655            flags.push("--manifest-file-path".to_string());
3656            flags.push(manifest_file_path.to_string());
3657        }
3658
3659        if let Some(runbooks) = &config.runbooks {
3660            for runbook in runbooks {
3661                flags.push("--runbook".to_string());
3662                flags.push(runbook.to_string());
3663            }
3664        }
3665
3666        if let Some(slot_time) = &config.slot_time {
3667            flags.push("--slot-time".to_string());
3668            flags.push(slot_time.to_string());
3669        }
3670    }
3671
3672    let online = surfpool_config
3673        .as_ref()
3674        .and_then(|c| c.online)
3675        .unwrap_or(false);
3676    if !online {
3677        flags.push("--offline".to_string());
3678    }
3679
3680    let block_production_mode = surfpool_config
3681        .as_ref()
3682        .and_then(|c| c.block_production_mode.clone())
3683        .unwrap_or("transaction".into());
3684    flags.push("--block-production-mode".to_string());
3685    flags.push(block_production_mode);
3686
3687    flags.push("--log-level".to_string());
3688    flags.push(
3689        surfpool_config
3690            .as_ref()
3691            .and_then(|c| c.log_level.clone())
3692            .unwrap_or("none".into()),
3693    );
3694
3695    if !full_simnet_mode {
3696        flags.push("--no-tui".to_string());
3697        flags.push("--disable-instruction-profiling".to_string());
3698        flags.push("--max-profiles".to_string());
3699        flags.push("1".to_string());
3700        flags.push("--no-studio".to_string());
3701    }
3702
3703    match skip_deploy {
3704        true => flags.push("--no-deploy".to_string()),
3705        false => {
3706            // automatically generate in-memory runbooks
3707            flags.push("--legacy-anchor-compatibility".to_string());
3708            if let Some(test_suite_path) = test_suite_path {
3709                flags.push("--anchor-test-config-path".to_string());
3710                flags.push(test_suite_path.display().to_string());
3711            }
3712        }
3713    }
3714
3715    Ok(flags)
3716}
3717
3718/// Handle for a log streaming thread.
3719///
3720/// Manages a WebSocket subscription and its associated receiver thread.
3721/// Call `shutdown()` to cleanly stop the thread.
3722struct LogStreamHandle {
3723    subscription: PubsubClientSubscription<RpcResponse<RpcLogsResponse>>,
3724}
3725
3726impl LogStreamHandle {
3727    /// Explicitly shutdown the log stream
3728    fn shutdown(self) {
3729        // Send unsubscribe in a background thread to avoid blocking
3730        // PubsubClientSubscription::send_unsubscribe() can block indefinitely if WebSocket is stuck
3731        // The receiver threads will exit when the subscription closes
3732        std::thread::spawn(move || {
3733            let _ = self.subscription.send_unsubscribe();
3734        });
3735    }
3736}
3737
3738/// Spawns a thread to receive logs from a subscription and write them to a file
3739fn spawn_log_receiver_thread<R>(receiver: R, log_file_path: PathBuf)
3740where
3741    R: IntoIterator<Item = RpcResponse<RpcLogsResponse>> + Send + 'static,
3742{
3743    std::thread::spawn(move || {
3744        if let Ok(mut file) = File::create(&log_file_path) {
3745            for response in receiver {
3746                let _ = writeln!(
3747                    file,
3748                    "Transaction executed in slot {}:",
3749                    response.context.slot
3750                );
3751                let _ = writeln!(file, "  Signature: {}", response.value.signature);
3752                let _ = writeln!(
3753                    file,
3754                    "  Status: {}",
3755                    response
3756                        .value
3757                        .err
3758                        .map(|err| err.to_string())
3759                        .unwrap_or_else(|| "Ok".to_string())
3760                );
3761                let _ = writeln!(file, "  Log Messages:");
3762                for log in response.value.logs {
3763                    let _ = writeln!(file, "    {}", log);
3764                }
3765                let _ = writeln!(file); // Empty line between transactions
3766                let _ = file.flush();
3767            }
3768        } else {
3769            eprintln!("Failed to create log file: {:?}", log_file_path);
3770        }
3771    });
3772}
3773
3774fn stream_logs(config: &WithPath<Config>, rpc_url: &str) -> Result<Vec<LogStreamHandle>> {
3775    // Determine validator type to use appropriate logging
3776    match &config.validator {
3777        Some(ValidatorType::Surfpool) => {
3778            // For Surfpool, we don't need to stream logs via external commands
3779            // Surfpool handles its own logging to .surfpool/logs/ directory
3780            if config
3781                .surfpool_config
3782                .as_ref()
3783                .and_then(|s| {
3784                    s.log_level
3785                        .as_ref()
3786                        .map(|l| l.to_ascii_lowercase().ne("none"))
3787                })
3788                .unwrap_or(false)
3789            {
3790                println!("Surfpool validator logs: .surfpool/logs/ directory");
3791            }
3792            Ok(vec![])
3793        }
3794        Some(ValidatorType::Legacy) | None => stream_solana_logs(config, rpc_url),
3795    }
3796}
3797
3798fn stream_solana_logs(config: &WithPath<Config>, rpc_url: &str) -> Result<Vec<LogStreamHandle>> {
3799    let program_logs_dir = Path::new(".anchor").join("program-logs");
3800    if program_logs_dir.exists() {
3801        fs::remove_dir_all(&program_logs_dir)?;
3802    }
3803    fs::create_dir_all(&program_logs_dir)?;
3804
3805    // For solana-test-validator, the WebSocket port is RPC port + WEBSOCKET_PORT_OFFSET
3806    // Extract port from rpc_url and construct WebSocket URL
3807    let ws_url = if rpc_url.contains("127.0.0.1") || rpc_url.contains("localhost") {
3808        // Local validator: increment port by 1 for WebSocket
3809        let rpc_port = rpc_url
3810            .rsplit_once(':')
3811            .and_then(|(_, port)| port.parse::<u16>().ok())
3812            .unwrap_or(DEFAULT_RPC_PORT);
3813
3814        let ws_port = rpc_port + WEBSOCKET_PORT_OFFSET;
3815        let url = format!("ws://127.0.0.1:{}", ws_port);
3816        url
3817    } else {
3818        // Remote cluster: use same URL but replace http(s) with ws(s)
3819        rpc_url
3820            .replace("https://", "wss://")
3821            .replace("http://", "ws://")
3822    };
3823
3824    // Give the WebSocket endpoint a moment to be ready (especially for local validators)
3825    std::thread::sleep(std::time::Duration::from_millis(1500));
3826
3827    let mut handles = vec![];
3828
3829    // Subscribe to logs for all workspace programs
3830    for program in config.read_all_programs()? {
3831        let idl_path = Path::new("target")
3832            .join("idl")
3833            .join(&program.lib_name)
3834            .with_extension("json");
3835        let idl = fs::read(&idl_path)?;
3836        let idl = convert_idl(&idl)?;
3837
3838        let log_file_path =
3839            program_logs_dir.join(format!("{}.{}.log", idl.address, program.lib_name));
3840        let program_address = idl.address.clone();
3841
3842        // Subscribe to logs using PubsubClient
3843        let (client, receiver) = match PubsubClient::logs_subscribe(
3844            &ws_url,
3845            RpcTransactionLogsFilter::Mentions(vec![program_address.clone()]),
3846            RpcTransactionLogsConfig {
3847                commitment: Some(CommitmentConfig::confirmed()),
3848            },
3849        ) {
3850            Ok(result) => result,
3851            Err(e) => {
3852                eprintln!(
3853                    "Warning: Failed to subscribe to logs for program {}: {}",
3854                    program.lib_name, e
3855                );
3856                continue;
3857            }
3858        };
3859
3860        // Spawn thread to write logs to file
3861        spawn_log_receiver_thread(receiver, log_file_path);
3862
3863        handles.push(LogStreamHandle {
3864            subscription: client,
3865        });
3866    }
3867
3868    // Also subscribe to logs for genesis programs
3869    if let Some(test) = config.test_validator.as_ref() {
3870        if let Some(genesis) = &test.genesis {
3871            for entry in genesis {
3872                let log_file_path = program_logs_dir.join(&entry.address).with_extension("log");
3873                let address = entry.address.clone();
3874
3875                // Subscribe to logs using PubsubClient
3876                let (client, receiver) = match PubsubClient::logs_subscribe(
3877                    &ws_url,
3878                    RpcTransactionLogsFilter::Mentions(vec![address.clone()]),
3879                    RpcTransactionLogsConfig {
3880                        commitment: Some(CommitmentConfig::confirmed()),
3881                    },
3882                ) {
3883                    Ok(result) => result,
3884                    Err(e) => {
3885                        eprintln!(
3886                            "Warning: Failed to subscribe to logs for genesis program {}: {}",
3887                            &entry.address, e
3888                        );
3889                        continue;
3890                    }
3891                };
3892
3893                // Spawn thread to write logs to file
3894                spawn_log_receiver_thread(receiver, log_file_path);
3895
3896                handles.push(LogStreamHandle {
3897                    subscription: client,
3898                });
3899            }
3900        }
3901    }
3902
3903    Ok(handles)
3904}
3905
3906fn start_surfpool_validator(
3907    flags: Option<Vec<String>>,
3908    surfpool_config: &Option<SurfpoolConfig>,
3909    full_simnet_mode: bool,
3910) -> Result<Child> {
3911    let rpc_url = surfpool_rpc_url(surfpool_config);
3912
3913    let (test_validator_stdout, test_validator_stderr) = match full_simnet_mode {
3914        true => (Stdio::inherit(), Stdio::inherit()),
3915        false => (Stdio::null(), Stdio::null()),
3916    };
3917
3918    let mut validator_handle = std::process::Command::new("surfpool")
3919        .arg("start")
3920        .args(flags.unwrap_or_default())
3921        .stdout(test_validator_stdout)
3922        .stderr(test_validator_stderr)
3923        .spawn()
3924        .map_err(|e| anyhow!("Failed to spawn `surfpool`: {e}"))?;
3925
3926    let client = create_client(rpc_url.clone());
3927
3928    let mut count = 0;
3929
3930    let ms_wait = surfpool_config
3931        .as_ref()
3932        .map(|surfpool| surfpool.startup_wait)
3933        .unwrap_or(STARTUP_WAIT);
3934
3935    while count < ms_wait {
3936        let r = client.get_latest_blockhash();
3937        if r.is_ok() {
3938            break;
3939        }
3940        std::thread::sleep(std::time::Duration::from_millis(100));
3941        count += 100;
3942    }
3943
3944    if count >= ms_wait {
3945        eprintln!(
3946            "Unable to get latest blockhash. Surfpool validator does not look started. Check \
3947             .surfpool/logs/ directory for errors. Consider increasing [surfpool.startup_wait] in \
3948             Anchor.toml."
3949        );
3950        validator_handle.kill()?;
3951        std::process::exit(1);
3952    }
3953
3954    loop {
3955        let resp = client
3956            .send::<RpcResponse<SurfnetInfoResponse>>(
3957                RpcRequest::Custom {
3958                    method: "surfnet_getSurfnetInfo",
3959                },
3960                serde_json::Value::Null,
3961            )?
3962            .value;
3963
3964        // break out if all runbooks are completed
3965        if resp
3966            .runbook_executions
3967            .iter()
3968            .all(|ex| ex.completed_at.is_some())
3969        {
3970            break;
3971        }
3972        std::thread::sleep(std::time::Duration::from_millis(500));
3973    }
3974    Ok(validator_handle)
3975}
3976
3977fn start_solana_test_validator(
3978    cfg: &Config,
3979    test_validator: &Option<TestValidator>,
3980    flags: Option<Vec<String>>,
3981    test_log_stdout: bool,
3982) -> Result<Child> {
3983    let (test_ledger_directory, test_ledger_log_filename) =
3984        test_validator_file_paths(test_validator)?;
3985
3986    // Start a validator for testing.
3987    let (test_validator_stdout, test_validator_stderr) = match test_log_stdout {
3988        true => {
3989            let test_validator_stdout_file =
3990                File::create(&test_ledger_log_filename).with_context(|| {
3991                    format!(
3992                        "Failed to create validator log file {}",
3993                        test_ledger_log_filename.display()
3994                    )
3995                })?;
3996            let test_validator_sterr_file = test_validator_stdout_file.try_clone()?;
3997            (
3998                Stdio::from(test_validator_stdout_file),
3999                Stdio::from(test_validator_sterr_file),
4000            )
4001        }
4002        false => (Stdio::inherit(), Stdio::inherit()),
4003    };
4004
4005    let rpc_url = test_validator_rpc_url(test_validator);
4006
4007    let rpc_port = cfg
4008        .test_validator
4009        .as_ref()
4010        .and_then(|test| test.validator.as_ref().map(|v| v.rpc_port))
4011        .unwrap_or(DEFAULT_RPC_PORT);
4012    if !portpicker::is_free(rpc_port) {
4013        return Err(anyhow!(
4014            "Your configured rpc port: {rpc_port} is already in use"
4015        ));
4016    }
4017    let faucet_port = cfg
4018        .test_validator
4019        .as_ref()
4020        .and_then(|test| test.validator.as_ref().and_then(|v| v.faucet_port))
4021        .unwrap_or(solana_faucet::faucet::FAUCET_PORT);
4022    if !portpicker::is_free(faucet_port) {
4023        return Err(anyhow!(
4024            "Your configured faucet port: {faucet_port} is already in use"
4025        ));
4026    }
4027
4028    let mut validator_handle = std::process::Command::new("solana-test-validator")
4029        .arg("--ledger")
4030        .arg(test_ledger_directory)
4031        .arg("--mint")
4032        .arg(cfg.wallet_kp()?.pubkey().to_string())
4033        .args(flags.unwrap_or_default())
4034        .stdout(test_validator_stdout)
4035        .stderr(test_validator_stderr)
4036        .spawn()
4037        .map_err(|e| anyhow!("Failed to spawn `solana-test-validator`: {e}"))?;
4038
4039    // Wait for the validator to be ready.
4040    let client = create_client(rpc_url);
4041    let mut count = 0;
4042    let ms_wait = test_validator
4043        .as_ref()
4044        .map(|test| test.startup_wait)
4045        .unwrap_or(STARTUP_WAIT);
4046    while count < ms_wait {
4047        let r = client.get_latest_blockhash();
4048        if r.is_ok() {
4049            break;
4050        }
4051        std::thread::sleep(std::time::Duration::from_millis(100));
4052        count += 100;
4053    }
4054    if count >= ms_wait {
4055        eprintln!(
4056            "Unable to get latest blockhash. Test validator does not look started. Check \
4057             {test_ledger_log_filename:?} for errors. Consider increasing [test.startup_wait] in \
4058             Anchor.toml."
4059        );
4060        validator_handle.kill()?;
4061        std::process::exit(1);
4062    }
4063    Ok(validator_handle)
4064}
4065
4066// Return the URL that solana-test-validator should be running on given the
4067// configuration
4068fn test_validator_rpc_url(test_validator: &Option<TestValidator>) -> String {
4069    match test_validator {
4070        Some(TestValidator {
4071            validator: Some(validator),
4072            ..
4073        }) => format!("http://{}:{}", validator.bind_address, validator.rpc_port),
4074        _ => "http://127.0.0.1:8899".to_string(),
4075    }
4076}
4077
4078// Returns the URL that surfpool should be running for the given configuration
4079fn surfpool_rpc_url(surfpool_config: &Option<SurfpoolConfig>) -> String {
4080    match surfpool_config {
4081        Some(SurfpoolConfig { host, rpc_port, .. }) => format!("http://{}:{}", host, rpc_port),
4082        _ => format!("http://{}:{}", SURFPOOL_HOST, DEFAULT_RPC_PORT),
4083    }
4084}
4085
4086// Setup and return paths to the solana-test-validator ledger directory and log
4087// files given the configuration
4088fn test_validator_file_paths(test_validator: &Option<TestValidator>) -> Result<(PathBuf, PathBuf)> {
4089    let ledger_path = match test_validator {
4090        Some(TestValidator {
4091            validator: Some(validator),
4092            ..
4093        }) => PathBuf::from(&validator.ledger),
4094        _ => get_default_ledger_path(),
4095    };
4096
4097    if !ledger_path.is_relative() {
4098        // Prevent absolute paths to avoid someone using / or similar, as the
4099        // directory gets removed
4100        eprintln!("Ledger directory {ledger_path:?} must be relative");
4101        std::process::exit(1);
4102    }
4103    if ledger_path.exists() {
4104        fs::remove_dir_all(&ledger_path).with_context(|| {
4105            format!(
4106                "Failed to remove ledger directory {}",
4107                ledger_path.display()
4108            )
4109        })?;
4110    }
4111
4112    fs::create_dir_all(&ledger_path).with_context(|| {
4113        format!(
4114            "Failed to create ledger directory {}",
4115            ledger_path.display()
4116        )
4117    })?;
4118
4119    let log_path = ledger_path.join("test-ledger-log.txt");
4120    Ok((ledger_path, log_path))
4121}
4122
4123fn cluster_url(
4124    cfg: &Config,
4125    test_validator: &Option<TestValidator>,
4126    surfpool_config: &Option<SurfpoolConfig>,
4127) -> String {
4128    let is_localnet = cfg.provider.cluster == Cluster::Localnet;
4129    match is_localnet {
4130        // Cluster is Localnet, determine which validator to use
4131        true => match &cfg.validator {
4132            Some(ValidatorType::Surfpool) => surfpool_rpc_url(surfpool_config),
4133            Some(ValidatorType::Legacy) | None => test_validator_rpc_url(test_validator),
4134        },
4135        false => cfg.provider.cluster.url().to_string(),
4136    }
4137}
4138
4139fn clean(cfg_override: &ConfigOverride) -> Result<()> {
4140    // Get workspace root - either from Anchor.toml or use current directory
4141    let workspace_root = if let Ok(Some(cfg)) = Config::discover(cfg_override) {
4142        cfg.path()
4143            .parent()
4144            .expect("Invalid Anchor.toml")
4145            .to_path_buf()
4146    } else {
4147        // No Anchor.toml - use current directory for Cargo workspace
4148        std::env::current_dir()?
4149    };
4150
4151    let dot_anchor_dir = workspace_root.join(".anchor");
4152    let target_dir = workspace_root.join("target");
4153    let deploy_dir = target_dir.join("deploy");
4154
4155    if dot_anchor_dir.exists() {
4156        fs::remove_dir_all(&dot_anchor_dir)
4157            .map_err(|e| anyhow!("Could not remove directory {:?}: {}", dot_anchor_dir, e))?;
4158    }
4159
4160    if target_dir.exists() {
4161        for entry in fs::read_dir(target_dir)? {
4162            let path = entry?.path();
4163            if path.is_dir() && path != deploy_dir {
4164                fs::remove_dir_all(&path)
4165                    .map_err(|e| anyhow!("Could not remove directory {}: {}", path.display(), e))?;
4166            } else if path.is_file() {
4167                fs::remove_file(&path)
4168                    .map_err(|e| anyhow!("Could not remove file {}: {}", path.display(), e))?;
4169            }
4170        }
4171    } else {
4172        println!("skipping target directory: not found")
4173    }
4174
4175    if deploy_dir.exists() {
4176        for file in fs::read_dir(deploy_dir)? {
4177            let path = file?.path();
4178            if path.extension() != Some(&OsString::from("json")) {
4179                fs::remove_file(&path)
4180                    .map_err(|e| anyhow!("Could not remove file {}: {}", path.display(), e))?;
4181            }
4182        }
4183    } else {
4184        println!("skipping deploy directory: not found")
4185    }
4186
4187    Ok(())
4188}
4189
4190fn deploy(
4191    cfg_override: &ConfigOverride,
4192    program_name: Option<String>,
4193    program_keypair: Option<String>,
4194    verifiable: bool,
4195    no_idl: bool,
4196    solana_args: Vec<String>,
4197) -> Result<()> {
4198    // Execute the code within the workspace
4199    with_workspace(cfg_override, |cfg| -> Result<()> {
4200        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4201        let keypair = cfg.provider.wallet.to_string();
4202
4203        // Augment the given solana args with recommended defaults.
4204        let client = create_client(&url);
4205        let solana_args = add_recommended_deployment_solana_args(&client, solana_args)?;
4206
4207        cfg.run_hooks(HookType::PreDeploy)?;
4208        // Deploy the programs.
4209        println!("Deploying cluster: {url}");
4210        println!("Upgrade authority: {keypair}");
4211
4212        for program in cfg.get_programs(program_name)? {
4213            let binary_path = program.binary_path(verifiable).display().to_string();
4214
4215            println!("Deploying program {:?}...", program.lib_name);
4216            println!("Program path: {binary_path}...");
4217
4218            let program_keypair_filepath = match &program_keypair {
4219                Some(path) => path.clone(),
4220                None => program.keypair_file()?.path().display().to_string(),
4221            };
4222
4223            // Deploy using our native implementation
4224            program::program_deploy(
4225                cfg_override,
4226                Some(strip_workspace_prefix(binary_path)),
4227                None, // program_name - not needed since we have filepath
4228                Some(strip_workspace_prefix(program_keypair_filepath)),
4229                None, // upgrade_authority - uses wallet from config
4230                None, // program_id - derived from program_keypair
4231                None, // buffer
4232                None, // max_len
4233                no_idl,
4234                false, // make_final
4235                solana_args.clone(),
4236            )?;
4237        }
4238
4239        println!("Deploy success");
4240        cfg.run_hooks(HookType::PostDeploy)?;
4241
4242        Ok(())
4243    })?
4244}
4245
4246fn upgrade(
4247    cfg_override: &ConfigOverride,
4248    program_id: Pubkey,
4249    program_filepath: String,
4250    max_retries: u32,
4251    solana_args: Vec<String>,
4252) -> Result<()> {
4253    // Use our native upgrade implementation
4254    program::program_upgrade(
4255        cfg_override,
4256        program_id,
4257        Some(program_filepath),
4258        None, // program_name - not needed since we have filepath
4259        None, // buffer
4260        None, // upgrade_authority - uses wallet from config
4261        max_retries,
4262        solana_args,
4263    )
4264}
4265
4266fn migrate(cfg_override: &ConfigOverride) -> Result<()> {
4267    with_workspace(cfg_override, |cfg| -> Result<()> {
4268        println!("Running migration deploy script");
4269
4270        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4271        let cur_dir = std::env::current_dir()?;
4272        let migrations_dir = cur_dir.join("migrations");
4273        let deploy_ts = Path::new("deploy.ts");
4274
4275        let use_ts = Path::new("tsconfig.json").exists() && migrations_dir.join(deploy_ts).exists();
4276
4277        if !Path::new(".anchor").exists() {
4278            fs::create_dir(".anchor")?;
4279        }
4280        std::env::set_current_dir(".anchor")?;
4281
4282        let exit = if use_ts {
4283            let module_path = migrations_dir.join(deploy_ts);
4284            let deploy_script_host_str =
4285                rust_template::deploy_ts_script_host(&url, &module_path.display().to_string());
4286            fs::write(deploy_ts, deploy_script_host_str)?;
4287
4288            let pkg_manager_cmd = match &cfg.toolchain.package_manager {
4289                Some(pkg_manager) => pkg_manager.to_string(),
4290                None => PackageManager::default().to_string(),
4291            };
4292
4293            std::process::Command::new(pkg_manager_cmd)
4294                .args([
4295                    "run",
4296                    "ts-node",
4297                    &fs::canonicalize(deploy_ts)?.to_string_lossy(),
4298                ])
4299                .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
4300                .stdout(Stdio::inherit())
4301                .stderr(Stdio::inherit())
4302                .output()?
4303        } else {
4304            let deploy_js = deploy_ts.with_extension("js");
4305            let module_path = migrations_dir.join(&deploy_js);
4306            let deploy_script_host_str =
4307                rust_template::deploy_js_script_host(&url, &module_path.display().to_string());
4308            fs::write(&deploy_js, deploy_script_host_str)?;
4309
4310            std::process::Command::new("node")
4311                .arg(&deploy_js)
4312                .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
4313                .stdout(Stdio::inherit())
4314                .stderr(Stdio::inherit())
4315                .output()?
4316        };
4317
4318        if !exit.status.success() {
4319            eprintln!("Deploy failed.");
4320            std::process::exit(exit.status.code().unwrap());
4321        }
4322
4323        println!("Deploy complete.");
4324        Ok(())
4325    })?
4326}
4327
4328fn set_workspace_dir_or_exit() {
4329    // First try to find Anchor workspace
4330    let d = match Config::discover(&ConfigOverride::default()) {
4331        Err(err) => {
4332            println!("Workspace configuration error: {err}");
4333            std::process::exit(1);
4334        }
4335        Ok(d) => d,
4336    };
4337
4338    match d {
4339        None => {
4340            // No Anchor.toml found - check for Cargo workspace with Solana programs
4341            let current_dir = match std::env::current_dir() {
4342                Ok(dir) => dir,
4343                Err(_) => {
4344                    println!("Unable to determine current directory");
4345                    std::process::exit(1);
4346                }
4347            };
4348
4349            let cargo_toml_path = current_dir.join("Cargo.toml");
4350            if !cargo_toml_path.exists() {
4351                println!(
4352                    "Not in a Solana workspace. This command requires either Anchor.toml or a \
4353                     Cargo workspace with Solana programs."
4354                );
4355                std::process::exit(1);
4356            }
4357
4358            // Check if this is a workspace and has Solana programs
4359            match program::discover_solana_programs(None) {
4360                Ok(programs) if !programs.is_empty() => {
4361                    // Found Solana programs in Cargo workspace - stay in current directory
4362                    // (already in the right place)
4363                }
4364                _ => {
4365                    println!(
4366                        "Not in a Solana workspace. This command requires either Anchor.toml or a \
4367                         Cargo workspace with Solana programs."
4368                    );
4369                    std::process::exit(1);
4370                }
4371            }
4372        }
4373        Some(cfg) => {
4374            // Found Anchor.toml - change to workspace root
4375            match cfg.path().parent() {
4376                None => {
4377                    println!("Unable to make new program");
4378                }
4379                Some(parent) => {
4380                    if std::env::set_current_dir(parent).is_err() {
4381                        println!(
4382                            "Not in a Solana workspace. This command requires either Anchor.toml \
4383                             or a Cargo workspace with Solana programs."
4384                        );
4385                        std::process::exit(1);
4386                    }
4387                }
4388            };
4389        }
4390    }
4391}
4392
4393fn airdrop(cfg_override: &ConfigOverride, amount: f64, pubkey: Option<Pubkey>) -> Result<()> {
4394    // Get cluster URL and wallet path
4395    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
4396
4397    // Create RPC client
4398    let client = RpcClient::new(cluster_url);
4399
4400    // Determine recipient
4401    let recipient_pubkey = if let Some(pubkey) = pubkey {
4402        pubkey
4403    } else {
4404        // Load keypair from wallet path and get pubkey
4405        let keypair = Keypair::read_from_file(&wallet_path)
4406            .map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
4407        keypair.pubkey()
4408    };
4409
4410    // Convert SOL to lamports
4411    let lamports = (amount * 1_000_000_000.0) as u64;
4412
4413    // Request airdrop
4414    println!("Requesting airdrop of {} SOL...", amount);
4415    let signature = client
4416        .request_airdrop(&recipient_pubkey, lamports)
4417        .map_err(|e| anyhow!("Airdrop request failed: {}", e))?;
4418
4419    println!("Signature: {}", signature);
4420    println!("Waiting for confirmation...");
4421
4422    // Wait for confirmation
4423    client
4424        .confirm_transaction(&signature)
4425        .map_err(|e| anyhow!("Transaction confirmation failed: {}", e))?;
4426
4427    // Get and display the new balance
4428    let balance = client.get_balance(&recipient_pubkey)?;
4429    println!("{}", format_sol(balance));
4430
4431    Ok(())
4432}
4433
4434fn cluster(_cmd: ClusterCommand) -> Result<()> {
4435    println!("Cluster Endpoints:\n");
4436    println!("* Mainnet - https://api.mainnet-beta.solana.com");
4437    println!("* Devnet  - https://api.devnet.solana.com");
4438    println!("* Testnet - https://api.testnet.solana.com");
4439    Ok(())
4440}
4441
4442fn config_cmd(cfg_override: &ConfigOverride, cmd: ConfigCommand) -> Result<()> {
4443    match cmd {
4444        ConfigCommand::Get => config_get(cfg_override),
4445        ConfigCommand::Set { url, keypair } => config_set(cfg_override, url, keypair),
4446    }
4447}
4448
4449fn config_get(cfg_override: &ConfigOverride) -> Result<()> {
4450    with_workspace(cfg_override, |cfg| -> Result<()> {
4451        println!("Anchor Configuration:");
4452        println!();
4453        println!("Cluster: {}", cfg.provider.cluster.url());
4454        println!("Wallet:  {}", cfg.provider.wallet);
4455        Ok(())
4456    })?
4457}
4458
4459fn config_set(
4460    cfg_override: &ConfigOverride,
4461    url: Option<String>,
4462    keypair: Option<String>,
4463) -> Result<()> {
4464    // Find the Anchor.toml file
4465    let anchor_toml_path = match Config::discover(cfg_override)? {
4466        Some(cfg) => cfg.path().parent().unwrap().join("Anchor.toml"),
4467        None => bail!("Not in an Anchor workspace"),
4468    };
4469
4470    // Read the current Anchor.toml
4471    let mut toml_content =
4472        fs::read_to_string(&anchor_toml_path).context("Failed to read Anchor.toml")?;
4473    let mut toml_doc: toml::Value =
4474        toml::from_str(&toml_content).context("Failed to parse Anchor.toml")?;
4475
4476    let mut updated = false;
4477
4478    // Update cluster URL if provided
4479    if let Some(cluster_url) = url {
4480        let expanded_url = match cluster_url.as_str() {
4481            "m" => "https://api.mainnet-beta.solana.com".to_string(),
4482            "d" => "https://api.devnet.solana.com".to_string(),
4483            "t" => "https://api.testnet.solana.com".to_string(),
4484            "l" => "http://127.0.0.1:8899".to_string(),
4485            _ => cluster_url,
4486        };
4487
4488        if let Some(provider) = toml_doc.get_mut("provider").and_then(|v| v.as_table_mut()) {
4489            provider.insert(
4490                "cluster".to_string(),
4491                toml::Value::String(expanded_url.clone()),
4492            );
4493            println!("Updated cluster to: {}", expanded_url);
4494            updated = true;
4495        }
4496    }
4497
4498    // Update wallet path if provided
4499    if let Some(keypair_path) = keypair {
4500        let expanded_path = shellexpand::tilde(&keypair_path).to_string();
4501
4502        // Check if the wallet file exists
4503        if !Path::new(&expanded_path).exists() {
4504            eprintln!("Warning: Wallet file does not exist: {}", expanded_path);
4505        }
4506
4507        if let Some(provider) = toml_doc.get_mut("provider").and_then(|v| v.as_table_mut()) {
4508            provider.insert(
4509                "wallet".to_string(),
4510                toml::Value::String(expanded_path.clone()),
4511            );
4512            println!("Updated wallet to: {}", expanded_path);
4513            updated = true;
4514        }
4515    }
4516
4517    if updated {
4518        // Write the updated config back to Anchor.toml
4519        toml_content =
4520            toml::to_string_pretty(&toml_doc).context("Failed to serialize Anchor.toml")?;
4521        fs::write(&anchor_toml_path, toml_content).context("Failed to write Anchor.toml")?;
4522        println!("\nConfiguration updated successfully!");
4523    } else {
4524        println!("No changes made. Use --url or --keypair to update settings.");
4525    }
4526
4527    Ok(())
4528}
4529
4530fn shell(cfg_override: &ConfigOverride) -> Result<()> {
4531    with_workspace(cfg_override, |cfg| -> Result<()> {
4532        let programs = {
4533            // Create idl map from all workspace programs.
4534            let mut idls: HashMap<String, Idl> = cfg
4535                .read_all_programs()?
4536                .iter()
4537                .filter(|program| program.idl.is_some())
4538                .map(|program| {
4539                    (
4540                        program.idl.as_ref().unwrap().metadata.name.clone(),
4541                        program.idl.clone().unwrap(),
4542                    )
4543                })
4544                .collect();
4545            // Insert all manually specified idls into the idl map.
4546            if let Some(programs) = cfg.programs.get(&cfg.provider.cluster) {
4547                let _ = programs
4548                    .iter()
4549                    .map(|(name, pd)| {
4550                        if let Some(idl_fp) = &pd.idl {
4551                            let file_str =
4552                                fs::read_to_string(idl_fp).expect("Unable to read IDL file");
4553                            let idl = serde_json::from_str(&file_str).expect("Idl not readable");
4554                            idls.insert(name.clone(), idl);
4555                        }
4556                    })
4557                    .collect::<Vec<_>>();
4558            }
4559
4560            // Finalize program list with all programs with IDLs.
4561            match cfg.programs.get(&cfg.provider.cluster) {
4562                None => Vec::new(),
4563                Some(programs) => programs
4564                    .iter()
4565                    .filter_map(|(name, program_deployment)| {
4566                        Some(ProgramWorkspace {
4567                            name: name.to_string(),
4568                            program_id: program_deployment.address,
4569                            idl: match idls.get(name) {
4570                                None => return None,
4571                                Some(idl) => idl.clone(),
4572                            },
4573                        })
4574                    })
4575                    .collect::<Vec<ProgramWorkspace>>(),
4576            }
4577        };
4578        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4579        let js_code = rust_template::node_shell(&url, &cfg.provider.wallet.to_string(), programs)?;
4580        let mut child = std::process::Command::new("node")
4581            .args(["-e", &js_code, "-i", "--experimental-repl-await"])
4582            .stdout(Stdio::inherit())
4583            .stderr(Stdio::inherit())
4584            .spawn()
4585            .map_err(|e| anyhow::format_err!("{}", e))?;
4586
4587        if !child.wait()?.success() {
4588            println!("Error running node shell");
4589            return Ok(());
4590        }
4591        Ok(())
4592    })?
4593}
4594
4595fn run(cfg_override: &ConfigOverride, script: String, script_args: Vec<String>) -> Result<()> {
4596    with_workspace(cfg_override, |cfg| -> Result<()> {
4597        let url = cluster_url(cfg, &cfg.test_validator, &cfg.surfpool_config);
4598        let script = cfg
4599            .scripts
4600            .get(&script)
4601            .ok_or_else(|| anyhow!("Unable to find script"))?;
4602        let script_with_args = format!("{script} {}", script_args.join(" "));
4603        let exit = std::process::Command::new("bash")
4604            .arg("-c")
4605            .arg(&script_with_args)
4606            .env("ANCHOR_PROVIDER_URL", url)
4607            .env("ANCHOR_WALLET", cfg.provider.wallet.to_string())
4608            .stdout(Stdio::inherit())
4609            .stderr(Stdio::inherit())
4610            .output()
4611            .unwrap();
4612        if !exit.status.success() {
4613            std::process::exit(exit.status.code().unwrap_or(1));
4614        }
4615        Ok(())
4616    })?
4617}
4618
4619fn keys(cfg_override: &ConfigOverride, cmd: KeysCommand) -> Result<()> {
4620    match cmd {
4621        KeysCommand::List => keys_list(cfg_override),
4622        KeysCommand::Sync { program_name } => keys_sync(cfg_override, program_name),
4623    }
4624}
4625
4626fn keys_list(cfg_override: &ConfigOverride) -> Result<()> {
4627    with_workspace(cfg_override, |cfg| -> Result<()> {
4628        for program in cfg.read_all_programs()? {
4629            let pubkey = program.pubkey()?;
4630            println!("{}: {}", program.lib_name, pubkey);
4631        }
4632        Ok(())
4633    })?
4634}
4635
4636/// Sync program `declare_id!` pubkeys with the pubkey from `target/deploy/<KEYPAIR>.json`.
4637fn keys_sync(cfg_override: &ConfigOverride, program_name: Option<String>) -> Result<()> {
4638    with_workspace(cfg_override, |cfg| -> Result<()> {
4639        let declare_id_regex = RegexBuilder::new(r#"^(([\w]+::)*)declare_id!\("(\w*)"\)"#)
4640            .multi_line(true)
4641            .build()
4642            .unwrap();
4643
4644        let cfg_cluster = cfg.provider.cluster.to_owned();
4645        println!("Syncing program ids for the configured cluster ({cfg_cluster})\n");
4646
4647        let mut changed_src = false;
4648        for program in cfg.get_programs(program_name)? {
4649            // Get the pubkey from the keypair file
4650            let actual_program_id = program.pubkey()?.to_string();
4651
4652            // Handle declaration in program files
4653            let src_path = program.path.join("src");
4654            let files_to_check = vec![src_path.join("lib.rs"), src_path.join("id.rs")];
4655
4656            for path in files_to_check {
4657                let mut content = match fs::read_to_string(&path) {
4658                    Ok(content) => content,
4659                    Err(_) => continue,
4660                };
4661
4662                let incorrect_program_id = declare_id_regex
4663                    .captures(&content)
4664                    .and_then(|captures| captures.get(3))
4665                    .filter(|program_id_match| program_id_match.as_str() != actual_program_id);
4666                if let Some(program_id_match) = incorrect_program_id {
4667                    println!("Found incorrect program id declaration in {path:?}");
4668
4669                    // Update the program id
4670                    content.replace_range(program_id_match.range(), &actual_program_id);
4671                    fs::write(&path, content)?;
4672
4673                    changed_src = true;
4674                    println!("Updated to {actual_program_id}\n");
4675                    break;
4676                }
4677            }
4678
4679            // Handle declaration in Anchor.toml
4680            'outer: for (cluster, programs) in &mut cfg.programs {
4681                // Only change if the configured cluster matches the program's cluster
4682                if cluster != &cfg_cluster {
4683                    continue;
4684                }
4685
4686                for (name, deployment) in programs {
4687                    // Skip other programs
4688                    if name != &program.lib_name {
4689                        continue;
4690                    }
4691
4692                    if deployment.address.to_string() != actual_program_id {
4693                        println!(
4694                            "Found incorrect program id declaration in Anchor.toml for the \
4695                             program `{name}`"
4696                        );
4697
4698                        // Update the program id
4699                        deployment.address = Pubkey::try_from(actual_program_id.as_str()).unwrap();
4700                        fs::write(cfg.path(), cfg.to_string())?;
4701
4702                        println!("Updated to {actual_program_id}\n");
4703                        break 'outer;
4704                    }
4705                }
4706            }
4707        }
4708
4709        println!("All program id declarations are synced.");
4710        if changed_src {
4711            println!("Please rebuild the program to update the generated artifacts.")
4712        }
4713
4714        Ok(())
4715    })?
4716}
4717
4718/// Check if there's a mismatch between the program keypair and the `declare_id!` in the source code.
4719/// Returns an error if a mismatch is detected, prompting the user to run `anchor keys sync`.
4720fn check_program_id_mismatch(cfg: &WithPath<Config>, program_name: Option<String>) -> Result<()> {
4721    let declare_id_regex = RegexBuilder::new(r#"^(([\w]+::)*)declare_id!\("(\w*)"\)"#)
4722        .multi_line(true)
4723        .build()
4724        .unwrap();
4725
4726    for program in cfg.get_programs(program_name)? {
4727        // Get the pubkey from the keypair file
4728        let actual_program_id = program.pubkey()?.to_string();
4729
4730        // Check declaration in program files
4731        let src_path = program.path.join("src");
4732        let files_to_check = vec![src_path.join("lib.rs"), src_path.join("id.rs")];
4733
4734        for path in files_to_check {
4735            let content = match fs::read_to_string(&path) {
4736                Ok(content) => content,
4737                Err(_) => continue,
4738            };
4739
4740            let incorrect_program_id = declare_id_regex
4741                .captures(&content)
4742                .and_then(|captures| captures.get(3))
4743                .filter(|program_id_match| program_id_match.as_str() != actual_program_id);
4744
4745            if let Some(program_id_match) = incorrect_program_id {
4746                let declared_id = program_id_match.as_str();
4747                return Err(anyhow!(
4748                    "Program ID mismatch detected for program '{}':\n  Keypair file has: {}\n  \
4749                     Source code has:  {}\n\nPlease run 'anchor keys sync' to update the program \
4750                     ID in your source code or use the '--ignore-keys' flag to skip this check.",
4751                    program.lib_name,
4752                    actual_program_id,
4753                    declared_id
4754                ));
4755            }
4756        }
4757    }
4758
4759    Ok(())
4760}
4761
4762#[allow(clippy::too_many_arguments)]
4763fn localnet(
4764    cfg_override: &ConfigOverride,
4765    skip_build: bool,
4766    skip_deploy: bool,
4767    skip_lint: bool,
4768    ignore_keys: bool,
4769    validator_type: ValidatorType,
4770    env_vars: Vec<String>,
4771    cargo_args: Vec<String>,
4772) -> Result<()> {
4773    with_workspace(cfg_override, |cfg| -> Result<()> {
4774        // Build if needed.
4775        if !skip_build {
4776            build(
4777                cfg_override,
4778                false,
4779                None,
4780                None,
4781                false,
4782                skip_lint,
4783                ignore_keys,
4784                None,
4785                None,
4786                None,
4787                BootstrapMode::None,
4788                None,
4789                None,
4790                env_vars,
4791                cargo_args,
4792                false,
4793            )?;
4794        }
4795
4796        let validator_handle: Option<Child> = match validator_type {
4797            ValidatorType::Surfpool => {
4798                let full_simnet_mode = true;
4799                let flags = Some(surfpool_flags(
4800                    cfg,
4801                    &cfg.surfpool_config,
4802                    full_simnet_mode,
4803                    skip_deploy,
4804                    None,
4805                )?);
4806                Some(start_surfpool_validator(
4807                    flags,
4808                    &cfg.surfpool_config,
4809                    full_simnet_mode,
4810                )?)
4811            }
4812            ValidatorType::Legacy => {
4813                let flags = match skip_deploy {
4814                    true => None,
4815                    false => Some(validator_flags(cfg, &cfg.test_validator)?),
4816                };
4817                Some(start_solana_test_validator(
4818                    cfg,
4819                    &cfg.test_validator,
4820                    flags,
4821                    false,
4822                )?)
4823            }
4824        };
4825
4826        // Setup log reader.
4827        let url = test_validator_rpc_url(&cfg.test_validator);
4828        let log_streams = match stream_logs(cfg, &url) {
4829            Ok(streams) => {
4830                println!(
4831                    "Log streams set up successfully ({} streams)",
4832                    streams.len()
4833                );
4834                Some(streams)
4835            }
4836            Err(e) => {
4837                eprintln!("Warning: Failed to setup program log streaming: {:#}", e);
4838                eprintln!("  Program logs will still be visible in the validator output.");
4839                None
4840            }
4841        };
4842
4843        std::io::stdin().lock().lines().next().unwrap().unwrap();
4844
4845        // Check all errors and shut down.
4846        if let Some(mut handle) = validator_handle {
4847            if let Err(err) = handle.kill() {
4848                println!("Failed to kill subprocess {}: {}", handle.id(), err);
4849            }
4850        }
4851
4852        // Explicitly shutdown log streams - closes WebSocket subscriptions
4853        if let Some(log_streams) = log_streams {
4854            for handle in log_streams {
4855                handle.shutdown();
4856            }
4857        }
4858
4859        Ok(())
4860    })?
4861}
4862
4863// with_workspace ensures the current working directory is always the top level
4864// workspace directory, i.e., where the `Anchor.toml` file is located, before
4865// and after the closure invocation.
4866//
4867// The closure passed into this function must never change the working directory
4868// to be outside the workspace. Doing so will have undefined behavior.
4869fn with_workspace<R>(
4870    cfg_override: &ConfigOverride,
4871    f: impl FnOnce(&mut WithPath<Config>) -> R,
4872) -> Result<R> {
4873    set_workspace_dir_or_exit();
4874
4875    let mut cfg = Config::discover(cfg_override)
4876        .map_err(|e| anyhow!("Workspace configuration error: {}", e))?
4877        .ok_or_else(|| anyhow!("This command requires an Anchor workspace."))?;
4878
4879    let r = f(&mut cfg);
4880
4881    set_workspace_dir_or_exit();
4882
4883    Ok(r)
4884}
4885
4886fn is_hidden(entry: &walkdir::DirEntry) -> bool {
4887    entry
4888        .file_name()
4889        .to_str()
4890        .map(|s| s == "." || s.starts_with('.') || s == "target")
4891        .unwrap_or(false)
4892}
4893
4894fn get_node_version() -> Result<Version> {
4895    let node_version = std::process::Command::new("node")
4896        .arg("--version")
4897        .stderr(Stdio::inherit())
4898        .output()
4899        .map_err(|e| anyhow::format_err!("node failed: {}", e))?;
4900    let output = std::str::from_utf8(&node_version.stdout)?
4901        .strip_prefix('v')
4902        .unwrap()
4903        .trim();
4904    Version::parse(output).map_err(Into::into)
4905}
4906
4907fn add_recommended_deployment_solana_args(
4908    client: &RpcClient,
4909    args: Vec<String>,
4910) -> Result<Vec<String>> {
4911    let mut augmented_args = args.clone();
4912
4913    // If no priority fee is provided, calculate a recommended fee based on recent txs.
4914    if !args.contains(&"--with-compute-unit-price".to_string()) {
4915        let priority_fee = get_recommended_micro_lamport_fee(client);
4916        augmented_args.push("--with-compute-unit-price".to_string());
4917        augmented_args.push(priority_fee.to_string());
4918    }
4919
4920    const DEFAULT_MAX_SIGN_ATTEMPTS: u8 = 30;
4921    if !args.contains(&"--max-sign-attempts".to_string()) {
4922        augmented_args.push("--max-sign-attempts".to_string());
4923        augmented_args.push(DEFAULT_MAX_SIGN_ATTEMPTS.to_string());
4924    }
4925
4926    // If no buffer keypair is provided, create a temporary one to reuse across deployments.
4927    // This is particularly useful for upgrading larger programs, which suffer from an increased
4928    // likelihood of some write transactions failing during any single deployment.
4929    if !args.contains(&"--buffer".to_owned()) {
4930        let tmp_keypair_path = std::env::temp_dir().join("anchor-upgrade-buffer.json");
4931        if !tmp_keypair_path.exists() {
4932            if let Err(err) = Keypair::new().write_to_file(&tmp_keypair_path) {
4933                return Err(anyhow!(
4934                    "Error creating keypair for buffer account, {:?}",
4935                    err
4936                ));
4937            }
4938        }
4939
4940        augmented_args.push("--buffer".to_owned());
4941        augmented_args.push(tmp_keypair_path.to_string_lossy().to_string());
4942    }
4943
4944    Ok(augmented_args)
4945}
4946
4947fn get_node_dns_option() -> Result<&'static str> {
4948    let version = get_node_version()?;
4949    let req = VersionReq::parse(">=16.4.0").unwrap();
4950    let option = match req.matches(&version) {
4951        true => "--dns-result-order=ipv4first",
4952        false => "",
4953    };
4954    Ok(option)
4955}
4956
4957// Remove the current workspace directory if it prefixes a string.
4958// This is used as a workaround for the Solana CLI using the uriparse crate to
4959// parse args but not handling percent encoding/decoding when using the path as
4960// a local filesystem path. Removing the workspace prefix handles most/all cases
4961// of spaces in keypair/binary paths, but this should be fixed in the Solana CLI
4962// and removed here.
4963fn strip_workspace_prefix(absolute_path: String) -> String {
4964    let workspace_prefix =
4965        std::env::current_dir().unwrap().display().to_string() + std::path::MAIN_SEPARATOR_STR;
4966    absolute_path
4967        .strip_prefix(&workspace_prefix)
4968        .unwrap_or(&absolute_path)
4969        .into()
4970}
4971
4972/// Create a new [`RpcClient`] with `confirmed` commitment level instead of the default(finalized).
4973fn create_client<U: ToString>(url: U) -> RpcClient {
4974    RpcClient::new_with_commitment(url, CommitmentConfig::confirmed())
4975}
4976
4977fn address(cfg_override: &ConfigOverride) -> Result<()> {
4978    let (_cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
4979
4980    // Load keypair and get pubkey
4981    let keypair = Keypair::read_from_file(&wallet_path)
4982        .map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
4983
4984    // Print the public key
4985    println!("{}", keypair.pubkey());
4986
4987    Ok(())
4988}
4989
4990fn balance(cfg_override: &ConfigOverride, pubkey: Option<Pubkey>, lamports: bool) -> Result<()> {
4991    let (cluster_url, wallet_path) = get_cluster_and_wallet(cfg_override)?;
4992
4993    // Create RPC client
4994    let client = RpcClient::new(cluster_url);
4995
4996    // Determine which account to check
4997    let account_pubkey = if let Some(pubkey) = pubkey {
4998        pubkey
4999    } else {
5000        // Load keypair from wallet path and get pubkey
5001        let keypair = Keypair::read_from_file(&wallet_path)
5002            .map_err(|e| anyhow!("Failed to read keypair from {}: {}", wallet_path, e))?;
5003        keypair.pubkey()
5004    };
5005
5006    // Get balance
5007    let balance = client.get_balance(&account_pubkey)?;
5008
5009    // Format and display output
5010    if lamports {
5011        println!("{}", balance);
5012    } else {
5013        println!("{}", format_sol(balance));
5014    }
5015
5016    Ok(())
5017}
5018
5019fn epoch(cfg_override: &ConfigOverride) -> Result<()> {
5020    let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
5021
5022    // Create RPC client
5023    let client = RpcClient::new(cluster_url);
5024
5025    // Get epoch info
5026    let epoch_info = client.get_epoch_info()?;
5027
5028    // Print just the epoch number
5029    println!("{}", epoch_info.epoch);
5030
5031    Ok(())
5032}
5033
5034fn epoch_info(cfg_override: &ConfigOverride) -> Result<()> {
5035    let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
5036
5037    // Create RPC client
5038    let client = RpcClient::new(cluster_url);
5039
5040    // Get epoch info
5041    let epoch_info = client.get_epoch_info()?;
5042
5043    // Calculate epoch slot range
5044    let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
5045    let last_slot_in_epoch = first_slot_in_epoch + epoch_info.slots_in_epoch;
5046
5047    // Calculate completion stats
5048    let epoch_completed_percent =
5049        epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100.0;
5050    let remaining_slots = epoch_info.slots_in_epoch - epoch_info.slot_index;
5051
5052    // Display epoch information (matching Solana CLI format)
5053    println!("Block height: {}", epoch_info.block_height);
5054    println!("Slot: {}", epoch_info.absolute_slot);
5055    println!("Epoch: {}", epoch_info.epoch);
5056
5057    if let Some(tx_count) = epoch_info.transaction_count {
5058        println!("Transaction Count: {}", tx_count);
5059    }
5060
5061    println!(
5062        "Epoch Slot Range: [{}..{})",
5063        first_slot_in_epoch, last_slot_in_epoch
5064    );
5065    println!("Epoch Completed Percent: {:>3.3}%", epoch_completed_percent);
5066    println!(
5067        "Epoch Completed Slots: {}/{} ({} remaining)",
5068        epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots
5069    );
5070
5071    // Try to calculate epoch completed time
5072    // Get average slot time from performance samples (aggregate up to 60 samples)
5073    if let Ok(samples) = client.get_recent_performance_samples(Some(60)) {
5074        // Aggregate all samples to calculate average slot time
5075        let (total_slots, total_secs) =
5076            samples.iter().fold((0u64, 0u64), |(slots, secs), sample| {
5077                (
5078                    slots.saturating_add(sample.num_slots),
5079                    secs.saturating_add(sample.sample_period_secs as u64),
5080                )
5081            });
5082
5083        if total_slots > 0 {
5084            let avg_slot_time_ms = (total_secs * 1000) / total_slots;
5085
5086            // Calculate time_remaining using average slot time (always estimated)
5087            let remaining_secs = (remaining_slots * avg_slot_time_ms) / 1000;
5088
5089            // Calculate time_elapsed - try actual block times first, then estimate
5090            // Get the first actual block in the epoch and adjust for slot differences
5091            let start_block_time = client
5092                .get_blocks_with_limit(first_slot_in_epoch, 1)
5093                .ok()
5094                .and_then(|slots| slots.first().cloned())
5095                .and_then(|first_actual_block| {
5096                    client.get_block_time(first_actual_block).ok().map(|time| {
5097                        // Adjust backwards if first actual block is after expected start
5098                        let slot_diff = first_actual_block.saturating_sub(first_slot_in_epoch);
5099                        let time_adjustment = (slot_diff * avg_slot_time_ms / 1000) as i64;
5100                        time.saturating_sub(time_adjustment)
5101                    })
5102                });
5103
5104            let current_block_time = client.get_block_time(epoch_info.absolute_slot).ok();
5105
5106            let (elapsed_secs, is_estimated) = if let (Some(start_time), Some(current_time)) =
5107                (start_block_time, current_block_time)
5108            {
5109                // Use actual block times for elapsed
5110                ((current_time - start_time) as u64, false)
5111            } else {
5112                // Estimate elapsed using average slot time
5113                ((epoch_info.slot_index * avg_slot_time_ms) / 1000, true)
5114            };
5115
5116            // Total time = elapsed + remaining
5117            let total_secs = elapsed_secs + remaining_secs;
5118
5119            let estimated_marker = if is_estimated { "*" } else { "" };
5120            println!(
5121                "Epoch Completed Time: {}{}/{} ({} remaining)",
5122                format_duration_secs(elapsed_secs),
5123                estimated_marker,
5124                format_duration_secs(total_secs),
5125                format_duration_secs(remaining_secs)
5126            );
5127        }
5128    }
5129
5130    Ok(())
5131}
5132
5133/// Format seconds into human-readable duration (e.g., "1day 5h 49m 8s")
5134fn format_duration_secs(total_seconds: u64) -> String {
5135    let seconds = total_seconds % 60;
5136    let total_minutes = total_seconds / 60;
5137    let minutes = total_minutes % 60;
5138    let total_hours = total_minutes / 60;
5139    let hours = total_hours % 24;
5140    let days = total_hours / 24;
5141
5142    let mut parts = Vec::new();
5143    if days > 0 {
5144        parts.push(format!("{}day", days));
5145    }
5146    if hours > 0 {
5147        parts.push(format!("{}h", hours));
5148    }
5149    if minutes > 0 {
5150        parts.push(format!("{}m", minutes));
5151    }
5152    if seconds > 0 || parts.is_empty() {
5153        parts.push(format!("{}s", seconds));
5154    }
5155
5156    parts.join(" ")
5157}
5158
5159fn logs_subscribe(
5160    cfg_override: &ConfigOverride,
5161    include_votes: bool,
5162    address: Option<Vec<Pubkey>>,
5163) -> Result<()> {
5164    let (cluster_url, _wallet_path) = get_cluster_and_wallet(cfg_override)?;
5165
5166    // Convert HTTP(S) URL to WebSocket URL
5167    let ws_url = if cluster_url.contains("localhost") || cluster_url.contains("127.0.0.1") {
5168        // Parse the URL to extract and increment the port
5169        cluster_url
5170            .replace("https://", "wss://")
5171            .replace("http://", "ws://")
5172            .replace(":8899", ":8900") // Default test validator ports
5173    } else {
5174        cluster_url
5175            .replace("https://", "wss://")
5176            .replace("http://", "ws://")
5177    };
5178
5179    println!("Connecting to {}", ws_url);
5180
5181    let filter = match (include_votes, address) {
5182        (true, Some(address)) => {
5183            RpcTransactionLogsFilter::Mentions(address.iter().map(|p| p.to_string()).collect())
5184        }
5185        (true, None) => RpcTransactionLogsFilter::AllWithVotes,
5186        (false, Some(address)) => {
5187            RpcTransactionLogsFilter::Mentions(address.iter().map(|p| p.to_string()).collect())
5188        }
5189        (false, None) => RpcTransactionLogsFilter::All,
5190    };
5191
5192    let (_client, receiver) = PubsubClient::logs_subscribe(
5193        &ws_url,
5194        filter,
5195        RpcTransactionLogsConfig {
5196            commitment: cfg_override.commitment.map(|c| CommitmentConfig {
5197                commitment: c.into(),
5198            }),
5199        },
5200    )?;
5201
5202    loop {
5203        match receiver.recv() {
5204            Ok(logs) => {
5205                println!("Transaction executed in slot {}:", logs.context.slot);
5206                println!("  Signature: {}", logs.value.signature);
5207                println!(
5208                    "  Status: {}",
5209                    logs.value
5210                        .err
5211                        .map(|err| err.to_string())
5212                        .unwrap_or_else(|| "Ok".to_string())
5213                );
5214                println!("  Log Messages:");
5215                for log in logs.value.logs {
5216                    println!("    {log}");
5217                }
5218            }
5219            Err(err) => {
5220                return Err(anyhow!("Disconnected: {err}"));
5221            }
5222        }
5223    }
5224}
5225
5226#[cfg(test)]
5227mod tests {
5228    use super::*;
5229
5230    #[test]
5231    #[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
5232    fn test_init_reserved_word() {
5233        init(
5234            &ConfigOverride {
5235                cluster: None,
5236                wallet: None,
5237                commitment: None,
5238            },
5239            "await".to_string(),
5240            true,
5241            true,
5242            PackageManager::default(),
5243            false,
5244            ProgramTemplate::default(),
5245            TestTemplate::default(),
5246            false,
5247            true,
5248        )
5249        .unwrap();
5250    }
5251
5252    #[test]
5253    #[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
5254    fn test_init_reserved_word_from_syn() {
5255        init(
5256            &ConfigOverride {
5257                cluster: None,
5258                wallet: None,
5259                commitment: None,
5260            },
5261            "fn".to_string(),
5262            true,
5263            true,
5264            PackageManager::default(),
5265            false,
5266            ProgramTemplate::default(),
5267            TestTemplate::default(),
5268            false,
5269            true,
5270        )
5271        .unwrap();
5272    }
5273
5274    #[test]
5275    #[should_panic(expected = "Anchor workspace name must be a valid Rust identifier.")]
5276    fn test_init_starting_with_digit() {
5277        init(
5278            &ConfigOverride {
5279                cluster: None,
5280                wallet: None,
5281                commitment: None,
5282            },
5283            "1project".to_string(),
5284            true,
5285            true,
5286            PackageManager::default(),
5287            false,
5288            ProgramTemplate::default(),
5289            TestTemplate::default(),
5290            false,
5291            true,
5292        )
5293        .unwrap();
5294    }
5295}