mobench_macros/lib.rs
1//! # mobench-macros
2//!
3//! [](https://crates.io/crates/mobench-macros)
4//! [](https://docs.rs/mobench-macros)
5//! [](https://github.com/worldcoin/mobile-bench-rs/blob/main/LICENSE)
6//!
7//! Procedural macros for the mobench mobile benchmarking SDK.
8//!
9//! This crate provides the [`#[benchmark]`](macro@benchmark) attribute macro
10//! that marks functions for mobile benchmarking. Functions annotated with this
11//! macro are automatically registered in a global registry and can be discovered
12//! and executed at runtime.
13//!
14//! ## Usage
15//!
16//! Most users should import the macro via [`mobench-sdk`](https://crates.io/crates/mobench-sdk)
17//! rather than using this crate directly:
18//!
19//! ```ignore
20//! use mobench_sdk::benchmark;
21//!
22//! #[benchmark]
23//! fn my_benchmark() {
24//! // Your benchmark code here
25//! let result = expensive_computation();
26//! std::hint::black_box(result);
27//! }
28//! ```
29//!
30//! ## Setup and Teardown
31//!
32//! For benchmarks that need expensive setup that shouldn't be measured:
33//!
34//! ```ignore
35//! use mobench_sdk::benchmark;
36//!
37//! fn setup_data() -> Vec<u8> {
38//! vec![0u8; 1_000_000] // Not measured
39//! }
40//!
41//! #[benchmark(setup = setup_data)]
42//! fn hash_benchmark(data: &Vec<u8>) {
43//! std::hint::black_box(compute_hash(data)); // Only this is measured
44//! }
45//! ```
46//!
47//! ## How It Works
48//!
49//! The `#[benchmark]` macro:
50//!
51//! 1. **Preserves the original function** - The function remains callable as normal
52//! 2. **Registers with inventory** - Creates a static registration that the SDK discovers at runtime
53//! 3. **Captures the fully-qualified name** - Uses `module_path!()` to generate unique names like `my_crate::my_module::my_benchmark`
54//! 4. **Handles setup/teardown** - If specified, wraps the benchmark with setup/teardown that aren't timed
55//!
56//! ## Requirements
57//!
58//! - The [`inventory`](https://crates.io/crates/inventory) crate must be in your dependency tree
59//! - Simple benchmarks: no parameters, returns `()`
60//! - With setup: exactly one parameter (reference to setup result), returns `()`
61//! - The function should not panic during normal execution
62//!
63//! ## Crate Ecosystem
64//!
65//! This crate is part of the mobench ecosystem:
66//!
67//! - **[`mobench-sdk`](https://crates.io/crates/mobench-sdk)** - Core SDK with timing harness (re-exports this macro)
68//! - **[`mobench`](https://crates.io/crates/mobench)** - CLI tool
69//! - **`mobench-macros`** (this crate) - Proc macros
70
71use proc_macro::TokenStream;
72use quote::quote;
73use syn::{
74 Ident, ItemFn, ReturnType, Token,
75 parse::{Parse, ParseStream},
76 parse_macro_input,
77 punctuated::Punctuated,
78};
79
80/// Arguments to the benchmark attribute
81struct BenchmarkArgs {
82 setup: Option<Ident>,
83 teardown: Option<Ident>,
84 per_iteration: bool,
85}
86
87impl Parse for BenchmarkArgs {
88 fn parse(input: ParseStream) -> syn::Result<Self> {
89 let mut setup = None;
90 let mut teardown = None;
91 let mut per_iteration = false;
92
93 if input.is_empty() {
94 return Ok(Self {
95 setup,
96 teardown,
97 per_iteration,
98 });
99 }
100
101 // Parse key = value pairs separated by commas
102 let args = Punctuated::<BenchmarkArg, Token![,]>::parse_terminated(input)?;
103
104 for arg in args {
105 match arg {
106 BenchmarkArg::Setup(ident) => {
107 if setup.is_some() {
108 return Err(syn::Error::new_spanned(ident, "duplicate setup argument"));
109 }
110 setup = Some(ident);
111 }
112 BenchmarkArg::Teardown(ident) => {
113 if teardown.is_some() {
114 return Err(syn::Error::new_spanned(
115 ident,
116 "duplicate teardown argument",
117 ));
118 }
119 teardown = Some(ident);
120 }
121 BenchmarkArg::PerIteration => {
122 per_iteration = true;
123 }
124 }
125 }
126
127 // Validate: teardown without setup is invalid
128 if teardown.is_some() && setup.is_none() {
129 return Err(syn::Error::new(
130 proc_macro2::Span::call_site(),
131 "teardown requires setup to be specified",
132 ));
133 }
134
135 // Validate: per_iteration with teardown is not supported
136 if per_iteration && teardown.is_some() {
137 return Err(syn::Error::new(
138 proc_macro2::Span::call_site(),
139 "per_iteration mode is not compatible with teardown",
140 ));
141 }
142
143 Ok(Self {
144 setup,
145 teardown,
146 per_iteration,
147 })
148 }
149}
150
151enum BenchmarkArg {
152 Setup(Ident),
153 Teardown(Ident),
154 PerIteration,
155}
156
157impl Parse for BenchmarkArg {
158 fn parse(input: ParseStream) -> syn::Result<Self> {
159 let name: Ident = input.parse()?;
160
161 match name.to_string().as_str() {
162 "setup" => {
163 input.parse::<Token![=]>()?;
164 let value: Ident = input.parse()?;
165 Ok(BenchmarkArg::Setup(value))
166 }
167 "teardown" => {
168 input.parse::<Token![=]>()?;
169 let value: Ident = input.parse()?;
170 Ok(BenchmarkArg::Teardown(value))
171 }
172 "per_iteration" => Ok(BenchmarkArg::PerIteration),
173 _ => Err(syn::Error::new_spanned(
174 name,
175 "expected 'setup', 'teardown', or 'per_iteration'",
176 )),
177 }
178 }
179}
180
181/// Marks a function as a benchmark for mobile execution.
182///
183/// This attribute macro registers the function in the global benchmark registry,
184/// making it discoverable and executable by the mobench runtime.
185///
186/// # Basic Usage
187///
188/// ```ignore
189/// use mobench_sdk::benchmark;
190///
191/// #[benchmark]
192/// fn fibonacci_bench() {
193/// let result = fibonacci(30);
194/// std::hint::black_box(result);
195/// }
196/// ```
197///
198/// # With Setup (setup runs once, not measured)
199///
200/// ```ignore
201/// use mobench_sdk::benchmark;
202///
203/// fn setup_proof() -> ProofInput {
204/// ProofInput::generate() // Expensive, not measured
205/// }
206///
207/// #[benchmark(setup = setup_proof)]
208/// fn verify_proof(input: &ProofInput) {
209/// verify(&input.proof); // Only this is measured
210/// }
211/// ```
212///
213/// # With Per-Iteration Setup (for mutating benchmarks)
214///
215/// ```ignore
216/// use mobench_sdk::benchmark;
217///
218/// fn generate_random_vec() -> Vec<i32> {
219/// (0..1000).map(|_| rand::random()).collect()
220/// }
221///
222/// #[benchmark(setup = generate_random_vec, per_iteration)]
223/// fn sort_benchmark(data: Vec<i32>) {
224/// let mut data = data;
225/// data.sort();
226/// std::hint::black_box(data);
227/// }
228/// ```
229///
230/// # With Setup and Teardown
231///
232/// ```ignore
233/// use mobench_sdk::benchmark;
234///
235/// fn setup_db() -> Database { Database::connect("test.db") }
236/// fn cleanup_db(db: Database) { db.close(); }
237///
238/// #[benchmark(setup = setup_db, teardown = cleanup_db)]
239/// fn db_query(db: &Database) {
240/// db.query("SELECT * FROM users");
241/// }
242/// ```
243///
244/// # Function Requirements
245///
246/// **Without setup:**
247/// - Take no parameters
248/// - Return `()` (unit type)
249///
250/// **With setup:**
251/// - Take exactly one parameter (reference to setup result, or owned for per_iteration)
252/// - Return `()` (unit type)
253///
254/// # Best Practices
255///
256/// ## Use `black_box` to Prevent Optimization
257///
258/// Always wrap results with [`std::hint::black_box`] to prevent the compiler
259/// from optimizing away the computation:
260///
261/// ```ignore
262/// #[benchmark]
263/// fn good_benchmark() {
264/// let result = compute_something();
265/// std::hint::black_box(result); // Prevents optimization
266/// }
267/// ```
268#[proc_macro_attribute]
269pub fn benchmark(attr: TokenStream, item: TokenStream) -> TokenStream {
270 let args = parse_macro_input!(attr as BenchmarkArgs);
271 let input_fn = parse_macro_input!(item as ItemFn);
272
273 let fn_name = &input_fn.sig.ident;
274 let fn_name_str = fn_name.to_string();
275 let vis = &input_fn.vis;
276 let sig = &input_fn.sig;
277 let block = &input_fn.block;
278 let attrs = &input_fn.attrs;
279
280 if input_fn.sig.asyncness.is_some() {
281 return syn::Error::new_spanned(
282 input_fn.sig.asyncness,
283 "#[benchmark] does not support async fn. Move async work into a synchronous runtime boundary so the benchmark measures execution instead of future creation.",
284 )
285 .to_compile_error()
286 .into();
287 }
288
289 // Validate based on whether setup is provided
290 if args.setup.is_some() {
291 // With setup: must have exactly one parameter
292 if input_fn.sig.inputs.len() != 1 {
293 let param_count = input_fn.sig.inputs.len();
294 return syn::Error::new_spanned(
295 &input_fn.sig,
296 format!(
297 "#[benchmark(setup = ...)] functions must take exactly one parameter.\n\
298 Found {} parameter(s).\n\n\
299 Example:\n\
300 fn setup_data() -> MyData {{ ... }}\n\n\
301 #[benchmark(setup = setup_data)]\n\
302 fn {}(input: &MyData) {{\n\
303 // input is the result of setup_data()\n\
304 }}",
305 param_count, fn_name_str
306 ),
307 )
308 .to_compile_error()
309 .into();
310 }
311 } else {
312 // No setup: must have no parameters
313 if !input_fn.sig.inputs.is_empty() {
314 let param_count = input_fn.sig.inputs.len();
315 let param_names: Vec<String> = input_fn
316 .sig
317 .inputs
318 .iter()
319 .map(|arg| match arg {
320 syn::FnArg::Receiver(_) => "self".to_string(),
321 syn::FnArg::Typed(pat) => quote!(#pat).to_string(),
322 })
323 .collect();
324 return syn::Error::new_spanned(
325 &input_fn.sig.inputs,
326 format!(
327 "#[benchmark] functions must take no parameters.\n\
328 Found {} parameter(s): {}\n\n\
329 If you need setup data, use the setup attribute:\n\n\
330 fn setup_data() -> MyData {{ ... }}\n\n\
331 #[benchmark(setup = setup_data)]\n\
332 fn {}(input: &MyData) {{\n\
333 // Your benchmark code using input\n\
334 }}",
335 param_count,
336 param_names.join(", "),
337 fn_name_str
338 ),
339 )
340 .to_compile_error()
341 .into();
342 }
343 }
344
345 // Validate: function must return () (unit type)
346 match &input_fn.sig.output {
347 ReturnType::Default => {} // () return type is OK
348 ReturnType::Type(_, return_type) => {
349 let type_str = quote!(#return_type).to_string();
350 if type_str.trim() != "()" {
351 return syn::Error::new_spanned(
352 return_type,
353 format!(
354 "#[benchmark] functions must return () (unit type).\n\
355 Found return type: {}\n\n\
356 Benchmark results should be consumed with std::hint::black_box() \
357 rather than returned:\n\n\
358 #[benchmark]\n\
359 fn {}() {{\n\
360 let result = compute_something();\n\
361 std::hint::black_box(result); // Prevents optimization\n\
362 }}",
363 type_str, fn_name_str
364 ),
365 )
366 .to_compile_error()
367 .into();
368 }
369 }
370 }
371
372 // Generate the runner based on configuration
373 let runner = generate_runner(fn_name, &args);
374
375 let expanded = quote! {
376 // Preserve the original function
377 #(#attrs)*
378 #vis #sig {
379 #block
380 }
381
382 // Register the function with inventory
383 ::inventory::submit! {
384 ::mobench_sdk::registry::BenchFunction {
385 name: ::std::concat!(::std::module_path!(), "::", #fn_name_str),
386 runner: #runner,
387 }
388 }
389 };
390
391 TokenStream::from(expanded)
392}
393
394fn generate_runner(fn_name: &Ident, args: &BenchmarkArgs) -> proc_macro2::TokenStream {
395 match (&args.setup, &args.teardown, args.per_iteration) {
396 // No setup - simple benchmark
397 (None, None, _) => quote! {
398 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
399 ::mobench_sdk::timing::run_closure(spec, || {
400 #fn_name();
401 Ok(())
402 })
403 }
404 },
405
406 // Setup only, runs once before all iterations
407 (Some(setup), None, false) => quote! {
408 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
409 ::mobench_sdk::timing::run_closure_with_setup(
410 spec,
411 || #setup(),
412 |input| {
413 #fn_name(input);
414 Ok(())
415 },
416 )
417 }
418 },
419
420 // Setup only, per iteration (for mutating benchmarks)
421 (Some(setup), None, true) => quote! {
422 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
423 ::mobench_sdk::timing::run_closure_with_setup_per_iter(
424 spec,
425 || #setup(),
426 |input| {
427 #fn_name(input);
428 Ok(())
429 },
430 )
431 }
432 },
433
434 // Setup + teardown (per_iteration with teardown is rejected during parsing)
435 (Some(setup), Some(teardown), false) => quote! {
436 |spec: ::mobench_sdk::timing::BenchSpec| -> ::std::result::Result<::mobench_sdk::timing::BenchReport, ::mobench_sdk::timing::TimingError> {
437 ::mobench_sdk::timing::run_closure_with_setup_teardown(
438 spec,
439 || #setup(),
440 |input| {
441 #fn_name(input);
442 Ok(())
443 },
444 |input| #teardown(input),
445 )
446 }
447 },
448
449 // These cases are rejected during parsing, but we need to handle them
450 (None, Some(_), _) | (Some(_), Some(_), true) => {
451 quote! { compile_error!("invalid benchmark configuration") }
452 }
453 }
454}