mobench_macros/lib.rs
1//! # mobench-macros
2//!
3//! [](https://crates.io/crates/mobench-macros)
4//! [](https://docs.rs/mobench-macros)
5//! [](https://github.com/worldcoin/mobile-bench-rs/blob/main/LICENSE)
6//!
7//! Procedural macros for the mobench mobile benchmarking SDK.
8//!
9//! This crate provides the [`#[benchmark]`](macro@benchmark) attribute macro
10//! that marks functions for mobile benchmarking. Functions annotated with this
11//! macro are automatically registered in a global registry and can be discovered
12//! and executed at runtime.
13//!
14//! ## Usage
15//!
16//! Most users should import the macro via [`mobench-sdk`](https://crates.io/crates/mobench-sdk)
17//! rather than using this crate directly:
18//!
19//! ```ignore
20//! use mobench_sdk::benchmark;
21//!
22//! #[benchmark]
23//! fn my_benchmark() {
24//! // Your benchmark code here
25//! let result = expensive_computation();
26//! std::hint::black_box(result);
27//! }
28//! ```
29//!
30//! ## Setup and Teardown
31//!
32//! For benchmarks that need expensive setup that shouldn't be measured:
33//!
34//! ```ignore
35//! use mobench_sdk::benchmark;
36//!
37//! fn setup_data() -> Vec<u8> {
38//! vec![0u8; 1_000_000] // Not measured
39//! }
40//!
41//! #[benchmark(setup = setup_data)]
42//! fn hash_benchmark(data: &Vec<u8>) {
43//! std::hint::black_box(compute_hash(data)); // Only this is measured
44//! }
45//! ```
46//!
47//! ## How It Works
48//!
49//! The `#[benchmark]` macro:
50//!
51//! 1. **Preserves the original function** - The function remains callable as normal
52//! 2. **Registers with inventory** - Creates a static registration that the SDK discovers at runtime
53//! 3. **Captures the fully-qualified name** - Uses `module_path!()` to generate unique names like `my_crate::my_module::my_benchmark`
54//! 4. **Handles setup/teardown** - If specified, wraps the benchmark with setup/teardown that aren't timed
55//!
56//! ## Requirements
57//!
58//! - The [`inventory`](https://crates.io/crates/inventory) crate must be in your dependency tree
59//! - Simple benchmarks: no parameters, returns `()`
60//! - With setup: exactly one parameter (reference to setup result), returns `()`
61//! - The function should not panic during normal execution
62//!
63//! ## Crate Ecosystem
64//!
65//! This crate is part of the mobench ecosystem:
66//!
67//! - **[`mobench-sdk`](https://crates.io/crates/mobench-sdk)** - Core SDK with timing harness (re-exports this macro)
68//! - **[`mobench`](https://crates.io/crates/mobench)** - CLI tool
69//! - **`mobench-macros`** (this crate) - Proc macros
70
71use proc_macro::TokenStream;
72use quote::quote;
73use syn::{
74 Ident, ItemFn, ReturnType, Token,
75 parse::{Parse, ParseStream},
76 parse_macro_input,
77 punctuated::Punctuated,
78};
79
80/// Arguments to the benchmark attribute
81struct BenchmarkArgs {
82 setup: Option<Ident>,
83 teardown: Option<Ident>,
84 per_iteration: bool,
85}
86
87impl Parse for BenchmarkArgs {
88 fn parse(input: ParseStream) -> syn::Result<Self> {
89 let mut setup = None;
90 let mut teardown = None;
91 let mut per_iteration = false;
92
93 if input.is_empty() {
94 return Ok(Self {
95 setup,
96 teardown,
97 per_iteration,
98 });
99 }
100
101 // Parse key = value pairs separated by commas
102 let args = Punctuated::<BenchmarkArg, Token![,]>::parse_terminated(input)?;
103
104 for arg in args {
105 match arg {
106 BenchmarkArg::Setup(ident) => {
107 if setup.is_some() {
108 return Err(syn::Error::new_spanned(ident, "duplicate setup argument"));
109 }
110 setup = Some(ident);
111 }
112 BenchmarkArg::Teardown(ident) => {
113 if teardown.is_some() {
114 return Err(syn::Error::new_spanned(
115 ident,
116 "duplicate teardown argument",
117 ));
118 }
119 teardown = Some(ident);
120 }
121 BenchmarkArg::PerIteration => {
122 per_iteration = true;
123 }
124 }
125 }
126
127 // Validate: teardown without setup is invalid
128 if teardown.is_some() && setup.is_none() {
129 return Err(syn::Error::new(
130 proc_macro2::Span::call_site(),
131 "teardown requires setup to be specified",
132 ));
133 }
134
135 // Validate: per_iteration with teardown is not supported
136 if per_iteration && teardown.is_some() {
137 return Err(syn::Error::new(
138 proc_macro2::Span::call_site(),
139 "per_iteration mode is not compatible with teardown",
140 ));
141 }
142
143 Ok(Self {
144 setup,
145 teardown,
146 per_iteration,
147 })
148 }
149}
150
151enum BenchmarkArg {
152 Setup(Ident),
153 Teardown(Ident),
154 PerIteration,
155}
156
157impl Parse for BenchmarkArg {
158 fn parse(input: ParseStream) -> syn::Result<Self> {
159 let name: Ident = input.parse()?;
160
161 match name.to_string().as_str() {
162 "setup" => {
163 input.parse::<Token![=]>()?;
164 let value: Ident = input.parse()?;
165 Ok(BenchmarkArg::Setup(value))
166 }
167 "teardown" => {
168 input.parse::<Token![=]>()?;
169 let value: Ident = input.parse()?;
170 Ok(BenchmarkArg::Teardown(value))
171 }
172 "per_iteration" => Ok(BenchmarkArg::PerIteration),
173 _ => Err(syn::Error::new_spanned(
174 name,
175 "expected 'setup', 'teardown', or 'per_iteration'",
176 )),
177 }
178 }
179}
180
181/// Marks a function as a benchmark for mobile execution.
182///
183/// This attribute macro registers the function in the global benchmark registry,
184/// making it discoverable and executable by the mobench runtime.
185///
186/// # Basic Usage
187///
188/// ```ignore
189/// use mobench_sdk::benchmark;
190///
191/// #[benchmark]
192/// fn fibonacci_bench() {
193/// let result = fibonacci(30);
194/// std::hint::black_box(result);
195/// }
196/// ```
197///
198/// # With Setup (setup runs once, not measured)
199///
200/// ```ignore
201/// use mobench_sdk::benchmark;
202///
203/// fn setup_proof() -> ProofInput {
204/// ProofInput::generate() // Expensive, not measured
205/// }
206///
207/// #[benchmark(setup = setup_proof)]
208/// fn verify_proof(input: &ProofInput) {
209/// verify(&input.proof); // Only this is measured
210/// }
211/// ```
212///
213/// # With Per-Iteration Setup (for mutating benchmarks)
214///
215/// ```ignore
216/// use mobench_sdk::benchmark;
217///
218/// fn generate_random_vec() -> Vec<i32> {
219/// (0..1000).map(|_| rand::random()).collect()
220/// }
221///
222/// #[benchmark(setup = generate_random_vec, per_iteration)]
223/// fn sort_benchmark(data: Vec<i32>) {
224/// let mut data = data;
225/// data.sort();
226/// std::hint::black_box(data);
227/// }
228/// ```
229///
230/// # With Setup and Teardown
231///
232/// ```ignore
233/// use mobench_sdk::benchmark;
234///
235/// fn setup_db() -> Database { Database::connect("test.db") }
236/// fn cleanup_db(db: Database) { db.close(); }
237///
238/// #[benchmark(setup = setup_db, teardown = cleanup_db)]
239/// fn db_query(db: &Database) {
240/// db.query("SELECT * FROM users");
241/// }
242/// ```
243///
244/// # Function Requirements
245///
246/// **Without setup:**
247/// - Take no parameters
248/// - Return `()` (unit type)
249///
250/// **With setup:**
251/// - Take exactly one parameter (reference to setup result, or owned for per_iteration)
252/// - Return `()` (unit type)
253///
254/// # Best Practices
255///
256/// ## Use `black_box` to Prevent Optimization
257///
258/// Always wrap results with [`std::hint::black_box`] to prevent the compiler
259/// from optimizing away the computation:
260///
261/// ```ignore
262/// #[benchmark]
263/// fn good_benchmark() {
264/// let result = compute_something();
265/// std::hint::black_box(result); // Prevents optimization
266/// }
267/// ```
268#[proc_macro_attribute]
269pub fn benchmark(attr: TokenStream, item: TokenStream) -> TokenStream {
270 let args = parse_macro_input!(attr as BenchmarkArgs);
271 let input_fn = parse_macro_input!(item as ItemFn);
272
273 let fn_name = &input_fn.sig.ident;
274 let fn_name_str = fn_name.to_string();
275 let vis = &input_fn.vis;
276 let sig = &input_fn.sig;
277 let block = &input_fn.block;
278 let attrs = &input_fn.attrs;
279
280 // Validate based on whether setup is provided
281 if args.setup.is_some() {
282 // With setup: must have exactly one parameter
283 if input_fn.sig.inputs.len() != 1 {
284 let param_count = input_fn.sig.inputs.len();
285 return syn::Error::new_spanned(
286 &input_fn.sig,
287 format!(
288 "#[benchmark(setup = ...)] functions must take exactly one parameter.\n\
289 Found {} parameter(s).\n\n\
290 Example:\n\
291 fn setup_data() -> MyData {{ ... }}\n\n\
292 #[benchmark(setup = setup_data)]\n\
293 fn {}(input: &MyData) {{\n\
294 // input is the result of setup_data()\n\
295 }}",
296 param_count, fn_name_str
297 ),
298 )
299 .to_compile_error()
300 .into();
301 }
302 } else {
303 // No setup: must have no parameters
304 if !input_fn.sig.inputs.is_empty() {
305 let param_count = input_fn.sig.inputs.len();
306 let param_names: Vec<String> = input_fn
307 .sig
308 .inputs
309 .iter()
310 .map(|arg| match arg {
311 syn::FnArg::Receiver(_) => "self".to_string(),
312 syn::FnArg::Typed(pat) => quote!(#pat).to_string(),
313 })
314 .collect();
315 return syn::Error::new_spanned(
316 &input_fn.sig.inputs,
317 format!(
318 "#[benchmark] functions must take no parameters.\n\
319 Found {} parameter(s): {}\n\n\
320 If you need setup data, use the setup attribute:\n\n\
321 fn setup_data() -> MyData {{ ... }}\n\n\
322 #[benchmark(setup = setup_data)]\n\
323 fn {}(input: &MyData) {{\n\
324 // Your benchmark code using input\n\
325 }}",
326 param_count,
327 param_names.join(", "),
328 fn_name_str
329 ),
330 )
331 .to_compile_error()
332 .into();
333 }
334 }
335
336 // Validate: function must return () (unit type)
337 match &input_fn.sig.output {
338 ReturnType::Default => {} // () return type is OK
339 ReturnType::Type(_, return_type) => {
340 let type_str = quote!(#return_type).to_string();
341 if type_str.trim() != "()" {
342 return syn::Error::new_spanned(
343 return_type,
344 format!(
345 "#[benchmark] functions must return () (unit type).\n\
346 Found return type: {}\n\n\
347 Benchmark results should be consumed with std::hint::black_box() \
348 rather than returned:\n\n\
349 #[benchmark]\n\
350 fn {}() {{\n\
351 let result = compute_something();\n\
352 std::hint::black_box(result); // Prevents optimization\n\
353 }}",
354 type_str, fn_name_str
355 ),
356 )
357 .to_compile_error()
358 .into();
359 }
360 }
361 }
362
363 // Generate the runner based on configuration
364 let runner = generate_runner(fn_name, &args);
365
366 let expanded = quote! {
367 // Preserve the original function
368 #(#attrs)*
369 #vis #sig {
370 #block
371 }
372
373 // Register the function with inventory
374 ::inventory::submit! {
375 ::mobench_sdk::registry::BenchFunction {
376 name: ::std::concat!(::std::module_path!(), "::", #fn_name_str),
377 runner: #runner,
378 }
379 }
380 };
381
382 TokenStream::from(expanded)
383}
384
385fn generate_runner(fn_name: &Ident, args: &BenchmarkArgs) -> proc_macro2::TokenStream {
386 match (&args.setup, &args.teardown, args.per_iteration) {
387 // No setup - simple benchmark
388 (None, None, _) => quote! {
389 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
390 ::mobench_sdk::timing::run_closure(spec, || {
391 #fn_name();
392 Ok(())
393 })
394 }
395 },
396
397 // Setup only, runs once before all iterations
398 (Some(setup), None, false) => quote! {
399 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
400 ::mobench_sdk::timing::run_closure_with_setup(
401 spec,
402 || #setup(),
403 |input| {
404 #fn_name(input);
405 Ok(())
406 },
407 )
408 }
409 },
410
411 // Setup only, per iteration (for mutating benchmarks)
412 (Some(setup), None, true) => quote! {
413 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
414 ::mobench_sdk::timing::run_closure_with_setup_per_iter(
415 spec,
416 || #setup(),
417 |input| {
418 #fn_name(input);
419 Ok(())
420 },
421 )
422 }
423 },
424
425 // Setup + teardown (per_iteration with teardown is rejected during parsing)
426 (Some(setup), Some(teardown), false) => quote! {
427 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
428 ::mobench_sdk::timing::run_closure_with_setup_teardown(
429 spec,
430 || #setup(),
431 |input| {
432 #fn_name(input);
433 Ok(())
434 },
435 |input| #teardown(input),
436 )
437 }
438 },
439
440 // These cases are rejected during parsing, but we need to handle them
441 (None, Some(_), _) | (Some(_), Some(_), true) => {
442 quote! { compile_error!("invalid benchmark configuration") }
443 }
444 }
445}