Skip to main content

mobench_macros/
lib.rs

1//! # mobench-macros
2//!
3//! [![Crates.io](https://img.shields.io/crates/v/mobench-macros.svg)](https://crates.io/crates/mobench-macros)
4//! [![Documentation](https://docs.rs/mobench-macros/badge.svg)](https://docs.rs/mobench-macros)
5//! [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/worldcoin/mobile-bench-rs/blob/main/LICENSE)
6//!
7//! Procedural macros for the mobench mobile benchmarking SDK.
8//!
9//! This crate ships as part of the mobench `0.1.16` ecosystem release.
10//!
11//! This crate provides the [`#[benchmark]`](macro@benchmark) attribute macro
12//! that marks functions for mobile benchmarking. Functions annotated with this
13//! macro are automatically registered in a global registry and can be discovered
14//! and executed at runtime.
15//!
16//! ## Usage
17//!
18//! Most users should import the macro via [`mobench-sdk`](https://crates.io/crates/mobench-sdk)
19//! rather than using this crate directly:
20//!
21//! ```ignore
22//! use mobench_sdk::benchmark;
23//!
24//! #[benchmark]
25//! fn my_benchmark() {
26//!     // Your benchmark code here
27//!     let result = expensive_computation();
28//!     std::hint::black_box(result);
29//! }
30//! ```
31//!
32//! ## Setup and Teardown
33//!
34//! For benchmarks that need expensive setup that shouldn't be measured:
35//!
36//! ```ignore
37//! use mobench_sdk::benchmark;
38//!
39//! fn setup_data() -> Vec<u8> {
40//!     vec![0u8; 1_000_000]  // Not measured
41//! }
42//!
43//! #[benchmark(setup = setup_data)]
44//! fn hash_benchmark(data: &Vec<u8>) {
45//!     std::hint::black_box(compute_hash(data));  // Only this is measured
46//! }
47//! ```
48//!
49//! ## How It Works
50//!
51//! The `#[benchmark]` macro:
52//!
53//! 1. **Preserves the original function** - The function remains callable as normal
54//! 2. **Registers with inventory** - Creates a static registration that the SDK discovers at runtime
55//! 3. **Captures the fully-qualified name** - Uses `module_path!()` to generate unique names like `my_crate::my_module::my_benchmark`
56//! 4. **Handles setup/teardown** - If specified, wraps the benchmark with setup/teardown that aren't timed
57//!
58//! ## Requirements
59//!
60//! - The [`inventory`](https://crates.io/crates/inventory) crate must be in your dependency tree
61//! - Simple benchmarks: no parameters, returns `()`
62//! - With setup: exactly one parameter (reference to setup result), returns `()`
63//! - The function should not panic during normal execution
64//!
65//! ## Crate Ecosystem
66//!
67//! This crate is part of the mobench ecosystem:
68//!
69//! - **[`mobench-sdk`](https://crates.io/crates/mobench-sdk)** - Core SDK with timing harness (re-exports this macro)
70//! - **[`mobench`](https://crates.io/crates/mobench)** - CLI tool
71//! - **`mobench-macros`** (this crate) - Proc macros
72
73use proc_macro::TokenStream;
74use quote::quote;
75use syn::{
76    Ident, ItemFn, ReturnType, Token,
77    parse::{Parse, ParseStream},
78    parse_macro_input,
79    punctuated::Punctuated,
80};
81
82/// Arguments to the benchmark attribute
83struct BenchmarkArgs {
84    setup: Option<Ident>,
85    teardown: Option<Ident>,
86    per_iteration: bool,
87}
88
89impl Parse for BenchmarkArgs {
90    fn parse(input: ParseStream) -> syn::Result<Self> {
91        let mut setup = None;
92        let mut teardown = None;
93        let mut per_iteration = false;
94
95        if input.is_empty() {
96            return Ok(Self {
97                setup,
98                teardown,
99                per_iteration,
100            });
101        }
102
103        // Parse key = value pairs separated by commas
104        let args = Punctuated::<BenchmarkArg, Token![,]>::parse_terminated(input)?;
105
106        for arg in args {
107            match arg {
108                BenchmarkArg::Setup(ident) => {
109                    if setup.is_some() {
110                        return Err(syn::Error::new_spanned(ident, "duplicate setup argument"));
111                    }
112                    setup = Some(ident);
113                }
114                BenchmarkArg::Teardown(ident) => {
115                    if teardown.is_some() {
116                        return Err(syn::Error::new_spanned(
117                            ident,
118                            "duplicate teardown argument",
119                        ));
120                    }
121                    teardown = Some(ident);
122                }
123                BenchmarkArg::PerIteration => {
124                    per_iteration = true;
125                }
126            }
127        }
128
129        // Validate: teardown without setup is invalid
130        if teardown.is_some() && setup.is_none() {
131            return Err(syn::Error::new(
132                proc_macro2::Span::call_site(),
133                "teardown requires setup to be specified",
134            ));
135        }
136
137        // Validate: per_iteration with teardown is not supported
138        if per_iteration && teardown.is_some() {
139            return Err(syn::Error::new(
140                proc_macro2::Span::call_site(),
141                "per_iteration mode is not compatible with teardown",
142            ));
143        }
144
145        Ok(Self {
146            setup,
147            teardown,
148            per_iteration,
149        })
150    }
151}
152
153enum BenchmarkArg {
154    Setup(Ident),
155    Teardown(Ident),
156    PerIteration,
157}
158
159impl Parse for BenchmarkArg {
160    fn parse(input: ParseStream) -> syn::Result<Self> {
161        let name: Ident = input.parse()?;
162
163        match name.to_string().as_str() {
164            "setup" => {
165                input.parse::<Token![=]>()?;
166                let value: Ident = input.parse()?;
167                Ok(BenchmarkArg::Setup(value))
168            }
169            "teardown" => {
170                input.parse::<Token![=]>()?;
171                let value: Ident = input.parse()?;
172                Ok(BenchmarkArg::Teardown(value))
173            }
174            "per_iteration" => Ok(BenchmarkArg::PerIteration),
175            _ => Err(syn::Error::new_spanned(
176                name,
177                "expected 'setup', 'teardown', or 'per_iteration'",
178            )),
179        }
180    }
181}
182
183/// Marks a function as a benchmark for mobile execution.
184///
185/// This attribute macro registers the function in the global benchmark registry,
186/// making it discoverable and executable by the mobench runtime.
187///
188/// # Basic Usage
189///
190/// ```ignore
191/// use mobench_sdk::benchmark;
192///
193/// #[benchmark]
194/// fn fibonacci_bench() {
195///     let result = fibonacci(30);
196///     std::hint::black_box(result);
197/// }
198/// ```
199///
200/// # With Setup (setup runs once, not measured)
201///
202/// ```ignore
203/// use mobench_sdk::benchmark;
204///
205/// fn setup_proof() -> ProofInput {
206///     ProofInput::generate()  // Expensive, not measured
207/// }
208///
209/// #[benchmark(setup = setup_proof)]
210/// fn verify_proof(input: &ProofInput) {
211///     verify(&input.proof);  // Only this is measured
212/// }
213/// ```
214///
215/// # With Per-Iteration Setup (for mutating benchmarks)
216///
217/// ```ignore
218/// use mobench_sdk::benchmark;
219///
220/// fn generate_random_vec() -> Vec<i32> {
221///     (0..1000).map(|_| rand::random()).collect()
222/// }
223///
224/// #[benchmark(setup = generate_random_vec, per_iteration)]
225/// fn sort_benchmark(data: Vec<i32>) {
226///     let mut data = data;
227///     data.sort();
228///     std::hint::black_box(data);
229/// }
230/// ```
231///
232/// # With Setup and Teardown
233///
234/// ```ignore
235/// use mobench_sdk::benchmark;
236///
237/// fn setup_db() -> Database { Database::connect("test.db") }
238/// fn cleanup_db(db: Database) { db.close(); }
239///
240/// #[benchmark(setup = setup_db, teardown = cleanup_db)]
241/// fn db_query(db: &Database) {
242///     db.query("SELECT * FROM users");
243/// }
244/// ```
245///
246/// # Function Requirements
247///
248/// **Without setup:**
249/// - Take no parameters
250/// - Return `()` (unit type)
251///
252/// **With setup:**
253/// - Take exactly one parameter (reference to setup result, or owned for per_iteration)
254/// - Return `()` (unit type)
255///
256/// # Best Practices
257///
258/// ## Use `black_box` to Prevent Optimization
259///
260/// Always wrap results with [`std::hint::black_box`] to prevent the compiler
261/// from optimizing away the computation:
262///
263/// ```ignore
264/// #[benchmark]
265/// fn good_benchmark() {
266///     let result = compute_something();
267///     std::hint::black_box(result);  // Prevents optimization
268/// }
269/// ```
270#[proc_macro_attribute]
271pub fn benchmark(attr: TokenStream, item: TokenStream) -> TokenStream {
272    let args = parse_macro_input!(attr as BenchmarkArgs);
273    let input_fn = parse_macro_input!(item as ItemFn);
274
275    let fn_name = &input_fn.sig.ident;
276    let fn_name_str = fn_name.to_string();
277    let vis = &input_fn.vis;
278    let sig = &input_fn.sig;
279    let block = &input_fn.block;
280    let attrs = &input_fn.attrs;
281
282    // Validate based on whether setup is provided
283    if args.setup.is_some() {
284        // With setup: must have exactly one parameter
285        if input_fn.sig.inputs.len() != 1 {
286            let param_count = input_fn.sig.inputs.len();
287            return syn::Error::new_spanned(
288                &input_fn.sig,
289                format!(
290                    "#[benchmark(setup = ...)] functions must take exactly one parameter.\n\
291                     Found {} parameter(s).\n\n\
292                     Example:\n\
293                     fn setup_data() -> MyData {{ ... }}\n\n\
294                     #[benchmark(setup = setup_data)]\n\
295                     fn {}(input: &MyData) {{\n\
296                         // input is the result of setup_data()\n\
297                     }}",
298                    param_count, fn_name_str
299                ),
300            )
301            .to_compile_error()
302            .into();
303        }
304    } else {
305        // No setup: must have no parameters
306        if !input_fn.sig.inputs.is_empty() {
307            let param_count = input_fn.sig.inputs.len();
308            let param_names: Vec<String> = input_fn
309                .sig
310                .inputs
311                .iter()
312                .map(|arg| match arg {
313                    syn::FnArg::Receiver(_) => "self".to_string(),
314                    syn::FnArg::Typed(pat) => quote!(#pat).to_string(),
315                })
316                .collect();
317            return syn::Error::new_spanned(
318                &input_fn.sig.inputs,
319                format!(
320                    "#[benchmark] functions must take no parameters.\n\
321                     Found {} parameter(s): {}\n\n\
322                     If you need setup data, use the setup attribute:\n\n\
323                     fn setup_data() -> MyData {{ ... }}\n\n\
324                     #[benchmark(setup = setup_data)]\n\
325                     fn {}(input: &MyData) {{\n\
326                         // Your benchmark code using input\n\
327                     }}",
328                    param_count,
329                    param_names.join(", "),
330                    fn_name_str
331                ),
332            )
333            .to_compile_error()
334            .into();
335        }
336    }
337
338    // Validate: function must return () (unit type)
339    match &input_fn.sig.output {
340        ReturnType::Default => {} // () return type is OK
341        ReturnType::Type(_, return_type) => {
342            let type_str = quote!(#return_type).to_string();
343            if type_str.trim() != "()" {
344                return syn::Error::new_spanned(
345                    return_type,
346                    format!(
347                        "#[benchmark] functions must return () (unit type).\n\
348                         Found return type: {}\n\n\
349                         Benchmark results should be consumed with std::hint::black_box() \
350                         rather than returned:\n\n\
351                         #[benchmark]\n\
352                         fn {}() {{\n\
353                             let result = compute_something();\n\
354                             std::hint::black_box(result);  // Prevents optimization\n\
355                         }}",
356                        type_str, fn_name_str
357                    ),
358                )
359                .to_compile_error()
360                .into();
361            }
362        }
363    }
364
365    // Generate the runner based on configuration
366    let runner = generate_runner(fn_name, &args);
367
368    let expanded = quote! {
369        // Preserve the original function
370        #(#attrs)*
371        #vis #sig {
372            #block
373        }
374
375        // Register the function with inventory
376        ::inventory::submit! {
377            ::mobench_sdk::registry::BenchFunction {
378                name: ::std::concat!(::std::module_path!(), "::", #fn_name_str),
379                runner: #runner,
380            }
381        }
382    };
383
384    TokenStream::from(expanded)
385}
386
387fn generate_runner(fn_name: &Ident, args: &BenchmarkArgs) -> proc_macro2::TokenStream {
388    match (&args.setup, &args.teardown, args.per_iteration) {
389        // No setup - simple benchmark
390        (None, None, _) => quote! {
391            |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
392                ::mobench_sdk::timing::run_closure(spec, || {
393                    #fn_name();
394                    Ok(())
395                })
396            }
397        },
398
399        // Setup only, runs once before all iterations
400        (Some(setup), None, false) => quote! {
401            |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
402                ::mobench_sdk::timing::run_closure_with_setup(
403                    spec,
404                    || #setup(),
405                    |input| {
406                        #fn_name(input);
407                        Ok(())
408                    },
409                )
410            }
411        },
412
413        // Setup only, per iteration (for mutating benchmarks)
414        (Some(setup), None, true) => quote! {
415            |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
416                ::mobench_sdk::timing::run_closure_with_setup_per_iter(
417                    spec,
418                    || #setup(),
419                    |input| {
420                        #fn_name(input);
421                        Ok(())
422                    },
423                )
424            }
425        },
426
427        // Setup + teardown (per_iteration with teardown is rejected during parsing)
428        (Some(setup), Some(teardown), false) => quote! {
429            |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
430                ::mobench_sdk::timing::run_closure_with_setup_teardown(
431                    spec,
432                    || #setup(),
433                    |input| {
434                        #fn_name(input);
435                        Ok(())
436                    },
437                    |input| #teardown(input),
438                )
439            }
440        },
441
442        // These cases are rejected during parsing, but we need to handle them
443        (None, Some(_), _) | (Some(_), Some(_), true) => {
444            quote! { compile_error!("invalid benchmark configuration") }
445        }
446    }
447}