iai_callgrind_macros/lib.rs
1//! The library of iai-callgrind-macros
2
3#![cfg_attr(docsrs, feature(doc_auto_cfg))]
4#![doc(test(attr(warn(unused))))]
5#![doc(test(attr(allow(unused_extern_crates))))]
6
7mod bin_bench;
8mod common;
9pub(crate) mod defaults;
10mod derive_macros;
11mod lib_bench;
12
13use proc_macro::TokenStream;
14use proc_macro_error2::proc_macro_error;
15use serde::Deserialize;
16
17#[derive(Debug, Deserialize)]
18struct CargoMetadata {
19 workspace_root: String,
20}
21
22impl CargoMetadata {
23 fn try_new() -> Option<Self> {
24 std::process::Command::new(option_env!("CARGO").unwrap_or("cargo"))
25 .args(["metadata", "--no-deps", "--format-version", "1"])
26 .output()
27 .ok()
28 .and_then(|output| serde_json::de::from_slice(&output.stdout).ok())
29 }
30}
31
32/// The `#[library_benchmark]` attribute lets you define a benchmark function which you can later
33/// use in the `library_benchmark_groups!` macro.
34///
35/// This attribute accepts the following parameters:
36/// * `config`: Accepts a `LibraryBenchmarkConfig`
37/// * `setup`: A global setup function which is applied to all following [`#[bench]`][bench] and
38/// [`#[benches]`][benches] attributes if not overwritten by a `setup` parameter of these
39/// attributes.
40/// * `teardown`: Similar to `setup` but takes a global `teardown` function.
41///
42/// A short introductory example on the usage including the `setup` parameter:
43///
44/// ```rust
45/// # use iai_callgrind_macros::library_benchmark;
46/// # mod iai_callgrind {
47/// # pub mod client_requests { pub mod cachegrind {
48/// # pub fn start_instrumentation() {}
49/// # pub fn stop_instrumentation() {}
50/// # }}
51/// # pub struct LibraryBenchmarkConfig {}
52/// # pub mod __internal {
53/// # pub struct InternalMacroLibBench {
54/// # pub id_display: Option<&'static str>,
55/// # pub args_display: Option<&'static str>,
56/// # pub func: fn(),
57/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
58/// # }
59/// # pub struct InternalLibraryBenchmarkConfig {}
60/// # }
61/// # }
62/// fn my_setup(value: u64) -> String {
63/// format!("{value}")
64/// }
65///
66/// fn my_other_setup(value: u64) -> String {
67/// format!("{}", value + 10)
68/// }
69///
70/// #[library_benchmark(setup = my_setup)]
71/// #[bench::first(21)]
72/// #[benches::multiple(42, 84)]
73/// #[bench::last(args = (102), setup = my_other_setup)]
74/// fn my_bench(value: String) {
75/// println!("{value}");
76/// }
77/// # fn main() {}
78/// ```
79///
80/// The `#[library_benchmark]` attribute can be applied in two ways.
81///
82/// 1. Using the `#[library_benchmark]` attribute as a standalone without [`#[bench]`][bench] or
83/// [`#[benches]`][benches] is fine for simple function calls without parameters.
84/// 2. We mostly need to benchmark cases which would need to be setup for example with a vector, but
85/// everything we set up within the benchmark function itself would be attributed to the event
86/// counts. The second form of this attribute macro uses the [`#[bench]`][bench] and
87/// [`#[benches]`][benches] attributes to set up benchmarks with different cases. The main
88/// advantage is, that the setup costs and event counts aren't attributed to the benchmark (and
89/// opposed to the old api we don't have to deal with callgrind arguments, toggles,
90/// inline(never), ...)
91///
92/// # The `#[bench]` attribute
93///
94/// The basic structure is `#[bench::some_id(/* parameters */)]`. The part after the `::` must be an
95/// id unique within the same `#[library_benchmark]`. This attribute accepts the following
96/// parameters:
97///
98/// * __`args`__: A tuple with a list of arguments which are passed to the benchmark function. The
99/// parentheses also need to be present if there is only a single argument (`#[bench::my_id(args =
100/// (10))]`).
101/// * __`config`__: Accepts a `LibraryBenchmarkConfig`
102/// * __`setup`__: A function which takes the arguments specified in the `args` parameter and passes
103/// its return value to the benchmark function.
104/// * __`teardown`__: A function which takes the return value of the benchmark function.
105///
106/// If no other parameters besides `args` are present you can simply pass the arguments as a list of
107/// values. Instead of `#[bench::my_id(args = (10, 20))]`, you could also use the shorter
108/// `#[bench::my_id(10, 20)]`.
109///
110/// ```rust
111/// # use iai_callgrind_macros::library_benchmark;
112/// # mod iai_callgrind {
113/// # pub mod client_requests { pub mod cachegrind {
114/// # pub fn start_instrumentation() {}
115/// # pub fn stop_instrumentation() {}
116/// # }}
117/// # pub struct LibraryBenchmarkConfig {}
118/// # pub mod __internal {
119/// # pub struct InternalMacroLibBench {
120/// # pub id_display: Option<&'static str>,
121/// # pub args_display: Option<&'static str>,
122/// # pub func: fn(),
123/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
124/// # }
125/// # pub struct InternalLibraryBenchmarkConfig {}
126/// # }
127/// # }
128/// // Assume this is a function in your library which you want to benchmark
129/// fn some_func(value: u64) -> u64 {
130/// 42
131/// }
132///
133/// #[library_benchmark]
134/// #[bench::some_id(42)]
135/// fn bench_some_func(value: u64) -> u64 {
136/// std::hint::black_box(some_func(value))
137/// }
138/// # fn main() {}
139/// ```
140///
141/// # The `#[benches]` attribute
142///
143/// The `#[benches]` attribute lets you define multiple benchmarks in one go. This attribute accepts
144/// the same parameters as the [`#[bench]`][bench] attribute: `args`, `config`, `setup` and
145/// `teardown` and additionally the `file` parameter. In contrast to the `args` parameter in
146/// [`#[bench]`][bench], `args` takes an array of arguments. The id (`#[benches::id(*/ parameters
147/// */)]`) is getting suffixed with the index of the current element of the `args` array.
148///
149/// ```rust
150/// # use iai_callgrind_macros::library_benchmark;
151/// # mod my_lib { pub fn bubble_sort(_: Vec<i32>) -> Vec<i32> { vec![] } }
152/// # mod iai_callgrind {
153/// # pub mod client_requests { pub mod cachegrind {
154/// # pub fn start_instrumentation() {}
155/// # pub fn stop_instrumentation() {}
156/// # }}
157/// # pub struct LibraryBenchmarkConfig {}
158/// # pub mod __internal {
159/// # pub struct InternalMacroLibBench {
160/// # pub id_display: Option<&'static str>,
161/// # pub args_display: Option<&'static str>,
162/// # pub func: fn(),
163/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
164/// # }
165/// # pub struct InternalLibraryBenchmarkConfig {}
166/// # }
167/// # }
168/// use std::hint::black_box;
169///
170/// fn setup_worst_case_array(start: i32) -> Vec<i32> {
171/// if start.is_negative() {
172/// (start..0).rev().collect()
173/// } else {
174/// (0..start).rev().collect()
175/// }
176/// }
177///
178/// #[library_benchmark]
179/// #[benches::multiple(vec![1], vec![5])]
180/// #[benches::with_setup(args = [1, 5], setup = setup_worst_case_array)]
181/// fn bench_bubble_sort_with_benches_attribute(input: Vec<i32>) -> Vec<i32> {
182/// black_box(my_lib::bubble_sort(input))
183/// }
184/// # fn main() {}
185/// ```
186///
187/// Usually the `arguments` are passed directly to the benchmarking function as it can be seen in
188/// the `#[benches::multiple(...)]` case. In `#[benches::with_setup(...)]`, the arguments are passed
189/// to the `setup` function and the return value of the `setup` function is passed as argument to
190/// the benchmark function. The above `#[library_benchmark]` is pretty much the same as
191///
192/// ```rust
193/// # use iai_callgrind_macros::library_benchmark;
194/// # mod iai_callgrind {
195/// # pub struct LibraryBenchmarkConfig {}
196/// # pub mod client_requests { pub mod cachegrind {
197/// # pub fn start_instrumentation() {}
198/// # pub fn stop_instrumentation() {}
199/// # }}
200/// # pub mod __internal {
201/// # pub struct InternalMacroLibBench {
202/// # pub id_display: Option<&'static str>,
203/// # pub args_display: Option<&'static str>,
204/// # pub func: fn(),
205/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
206/// # }
207/// # pub struct InternalLibraryBenchmarkConfig {}
208/// # }
209/// # }
210/// # fn bubble_sort(_: Vec<i32>) -> Vec<i32> { vec![] }
211/// # fn setup_worst_case_array(_: i32) -> Vec<i32> { vec![] }
212/// use std::hint::black_box;
213///
214/// #[library_benchmark]
215/// #[bench::multiple_0(vec![1])]
216/// #[bench::multiple_1(vec![5])]
217/// #[bench::with_setup_0(setup_worst_case_array(1))]
218/// #[bench::with_setup_1(setup_worst_case_array(5))]
219/// fn bench_bubble_sort_with_benches_attribute(input: Vec<i32>) -> Vec<i32> {
220/// black_box(bubble_sort(input))
221/// }
222/// # fn main() {}
223/// ```
224///
225/// but a lot more concise especially if a lot of values are passed to the same `setup` function.
226///
227/// The `file` parameter goes a step further and reads the specified file line by line creating a
228/// benchmark from each line. The line is passed to the benchmark function as `String` or if the
229/// `setup` parameter is also present to the `setup` function. A small example assuming you have a
230/// file `benches/inputs` (relative paths are interpreted to the workspace root) with the following
231/// content
232///
233/// ```text
234/// 1
235/// 11
236/// 111
237/// ```
238///
239/// then
240///
241/// ```rust
242/// # use iai_callgrind_macros::library_benchmark;
243/// # mod iai_callgrind {
244/// # pub mod client_requests { pub mod cachegrind {
245/// # pub fn start_instrumentation() {}
246/// # pub fn stop_instrumentation() {}
247/// # }}
248/// # pub struct LibraryBenchmarkConfig {}
249/// # pub mod __internal {
250/// # pub struct InternalMacroLibBench {
251/// # pub id_display: Option<&'static str>,
252/// # pub args_display: Option<&'static str>,
253/// # pub func: fn(),
254/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
255/// # }
256/// # pub struct InternalLibraryBenchmarkConfig {}
257/// # }
258/// # }
259/// # mod my_lib { pub fn string_to_u64(_line: String) -> Result<u64, String> { Ok(0) } }
260/// use std::hint::black_box;
261/// #[library_benchmark]
262/// #[benches::by_file(file = "iai-callgrind-macros/fixtures/inputs")]
263/// fn some_bench(line: String) -> Result<u64, String> {
264/// black_box(my_lib::string_to_u64(line))
265/// }
266/// # fn main() {}
267/// ```
268///
269/// The above is roughly equivalent to the following but with the `args` parameter
270///
271/// ```rust,ignore
272/// # use iai_callgrind_macros::library_benchmark;
273/// # mod iai_callgrind {
274/// # pub struct LibraryBenchmarkConfig {}
275/// # pub mod __internal {
276/// # pub struct InternalMacroLibBench {
277/// # pub id_display: Option<&'static str>,
278/// # pub args_display: Option<&'static str>,
279/// # pub func: fn(),
280/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
281/// # }
282/// # pub struct InternalLibraryBenchmarkConfig {}
283/// # }
284/// # }
285/// # mod my_lib { pub fn string_to_u64(_line: String) -> Result<u64, String> { Ok(0) } }
286/// use std::hint::black_box;
287/// #[library_benchmark]
288/// #[benches::by_file(args = [1.to_string(), 11.to_string(), 111.to_string()])]
289/// fn some_bench(line: String) -> Result<u64, String> {
290/// black_box(my_lib::string_to_u64(line))
291/// }
292/// # fn main() {}
293/// ```
294///
295/// # More Examples
296///
297/// The `#[library_benchmark]` attribute as a standalone
298///
299/// ```rust
300/// # use iai_callgrind_macros::library_benchmark;
301/// # mod iai_callgrind {
302/// # pub mod client_requests { pub mod cachegrind {
303/// # pub fn start_instrumentation() {}
304/// # pub fn stop_instrumentation() {}
305/// # }}
306/// # pub struct LibraryBenchmarkConfig {}
307/// # pub mod __internal {
308/// # pub struct InternalMacroLibBench {
309/// # pub id_display: Option<&'static str>,
310/// # pub args_display: Option<&'static str>,
311/// # pub func: fn(),
312/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
313/// # }
314/// # pub struct InternalLibraryBenchmarkConfig {}
315/// # }
316/// # }
317/// fn some_func() -> u64 {
318/// 42
319/// }
320///
321/// #[library_benchmark]
322/// // If possible, it's best to return something from a benchmark function
323/// fn bench_my_library_function() -> u64 {
324/// // The `black_box` is needed to tell the compiler to not optimize what's inside the
325/// // black_box or else the benchmarks might return inaccurate results.
326/// std::hint::black_box(some_func())
327/// }
328/// # fn main() {
329/// # }
330/// ```
331///
332/// In the following example we pass a single argument with `Vec<i32>` type to the benchmark. All
333/// arguments are already wrapped in a black box and don't need to be put in a `black_box` again.
334///
335/// ```rust
336/// # use iai_callgrind_macros::library_benchmark;
337/// # mod iai_callgrind {
338/// # pub mod client_requests { pub mod cachegrind {
339/// # pub fn start_instrumentation() {}
340/// # pub fn stop_instrumentation() {}
341/// # }}
342/// # pub struct LibraryBenchmarkConfig {}
343/// # pub mod __internal {
344/// # pub struct InternalMacroLibBench {
345/// # pub id_display: Option<&'static str>,
346/// # pub args_display: Option<&'static str>,
347/// # pub func: fn(),
348/// # pub config: Option<fn() -> InternalLibraryBenchmarkConfig>
349/// # }
350/// # pub struct InternalLibraryBenchmarkConfig {}
351/// # }
352/// # }
353/// // Our function we want to test
354/// fn some_func_with_array(array: Vec<i32>) -> Vec<i32> {
355/// // do something with the array and return a new array
356/// # array
357/// }
358///
359/// // This function is used to create a worst case array for our `some_func_with_array`
360/// fn setup_worst_case_array(start: i32) -> Vec<i32> {
361/// if start.is_negative() {
362/// (start..0).rev().collect()
363/// } else {
364/// (0..start).rev().collect()
365/// }
366/// }
367///
368/// // This benchmark is setting up multiple benchmark cases with the advantage that the setup
369/// // costs for creating a vector (even if it is empty) aren't attributed to the benchmark and
370/// // that the `array` is already wrapped in a black_box.
371/// #[library_benchmark]
372/// #[bench::empty(vec![])]
373/// #[bench::worst_case_6(vec![6, 5, 4, 3, 2, 1])]
374/// // Function calls are fine too
375/// #[bench::worst_case_4000(setup_worst_case_array(4000))]
376/// // The argument of the benchmark function defines the type of the argument from the `bench`
377/// // cases.
378/// fn bench_some_func_with_array(array: Vec<i32>) -> Vec<i32> {
379/// // Note `array` does not need to be put in a `black_box` because that's already done for
380/// // you.
381/// std::hint::black_box(some_func_with_array(array))
382/// }
383///
384/// // The following benchmark uses the `#[benches]` attribute to setup multiple benchmark cases
385/// // in one go
386/// #[library_benchmark]
387/// #[benches::multiple(vec![1], vec![5])]
388/// // Reroute the `args` to a `setup` function and use the setup function's return value as
389/// // input for the benchmarking function
390/// #[benches::with_setup(args = [1, 5], setup = setup_worst_case_array)]
391/// fn bench_using_the_benches_attribute(array: Vec<i32>) -> Vec<i32> {
392/// std::hint::black_box(some_func_with_array(array))
393/// }
394/// # fn main() {
395/// # }
396/// ```
397///
398/// [bench]: #the-bench-attribute
399/// [benches]: #the-benches-attribute
400#[proc_macro_attribute]
401#[proc_macro_error]
402pub fn library_benchmark(args: TokenStream, input: TokenStream) -> TokenStream {
403 match lib_bench::render(args.into(), input.into()) {
404 Ok(stream) => stream.into(),
405 Err(error) => error.to_compile_error().into(),
406 }
407}
408
409/// Used to annotate functions building the to be benchmarked `iai_callgrind::Command`
410///
411/// This macro works almost the same way as the [`macro@crate::library_benchmark`] attribute. Please
412/// see there for the basic usage.
413///
414/// # Differences to the `#[library_benchmark]` attribute
415///
416/// Any `config` parameter takes a `BinaryBenchmarkConfig` instead of a `LibraryBenchmarkConfig`.
417/// All functions annotated with the `#[binary_benchmark]` attribute need to return an
418/// `iai_callgrind::Command`. Also, the annotated function itself is not benchmarked. Instead, this
419/// function serves the purpose of a builder for the `Command` which is getting benchmarked.
420/// So, any code within this function is evaluated only once when all `Commands` in this benchmark
421/// file are collected and built. You can put any code in the function which is necessary to build
422/// the `Command` without attributing any event counts to the benchmark results which is why the
423/// `setup` and `teardown` parameters work differently in binary benchmarks.
424///
425/// The `setup` and `teardown` parameters of `#[binary_benchmark]`, `#[bench]` and of `#[benches]`
426/// take an expression instead of a function pointer. The expression of the `setup` (`teardown`)
427/// parameter is evaluated and executed not until before (after) the `Command` is executed (not
428/// __built__). There's a special case if `setup` or `teardown` are a function pointer like in
429/// library benchmarks. In this case the `args` from `#[bench]` or `#[benches]` are passed to the
430/// function AND `setup` or `teardown` respectively.
431///
432/// For example (Suppose your crate's binary is named `my-foo`)
433///
434/// ```rust
435/// # macro_rules! env { ($m:tt) => {{ "/some/path" }} }
436/// # use iai_callgrind_macros::binary_benchmark;
437/// # pub mod iai_callgrind {
438/// # use std::path::PathBuf;
439/// # #[derive(Clone)]
440/// # pub struct Command {}
441/// # impl Command {
442/// # pub fn new(_a: &str) -> Self { Self {}}
443/// # pub fn stdout(&mut self, _a: Stdio) -> &mut Self {self}
444/// # pub fn arg<T>(&mut self, _a: T) -> &mut Self where T: Into<PathBuf> {self}
445/// # pub fn build(&mut self) -> Self {self.clone()}
446/// # }
447/// # pub enum Stdio { Inherit, File(PathBuf) }
448/// # #[derive(Clone)]
449/// # pub struct Sandbox {}
450/// # impl Sandbox {
451/// # pub fn new(_a: bool) -> Self { Self {}}
452/// # pub fn fixtures(&mut self, _a: [&str; 2]) -> &mut Self { self }
453/// # }
454/// # impl From<&mut Sandbox> for Sandbox { fn from(value: &mut Sandbox) -> Self {value.clone() }}
455/// # #[derive(Default)]
456/// # pub struct BinaryBenchmarkConfig {}
457/// # impl BinaryBenchmarkConfig { pub fn sandbox<T: Into<Sandbox>>(&mut self, _a: T) -> &mut Self {self}}
458/// # impl From<&mut BinaryBenchmarkConfig> for BinaryBenchmarkConfig
459/// # { fn from(_value: &mut BinaryBenchmarkConfig) -> Self { BinaryBenchmarkConfig {}}}
460/// # pub mod __internal {
461/// # use super::*;
462/// # pub struct InternalMacroBinBench {
463/// # pub id_display: Option<&'static str>,
464/// # pub args_display: Option<&'static str>,
465/// # pub func: fn() -> Command,
466/// # pub config: Option<fn() -> InternalBinaryBenchmarkConfig>,
467/// # pub setup: Option<fn()>,
468/// # pub teardown: Option<fn()>,
469/// # }
470/// # pub struct InternalBinaryBenchmarkConfig {}
471/// # impl From<&mut BinaryBenchmarkConfig> for InternalBinaryBenchmarkConfig
472/// # { fn from(_value: &mut BinaryBenchmarkConfig) -> Self { InternalBinaryBenchmarkConfig {}} }
473/// # }
474/// # }
475/// use iai_callgrind::{BinaryBenchmarkConfig, Sandbox};
476/// use std::path::PathBuf;
477///
478/// // In binary benchmarks there's no need to return a value from the setup function
479/// # #[allow(unused)]
480/// fn simple_setup() {
481/// println!("Put code in here which will be run before the actual command");
482/// }
483///
484/// // It is good style to write any setup function idempotent, so it doesn't depend on the
485/// // `teardown` to have run. The `teardown` function isn't executed if the benchmark
486/// // command fails to run successfully.
487/// # #[allow(unused)]
488/// fn create_file(path: &str) {
489/// // You can for example create a file here which should be available for the `Command`
490/// std::fs::File::create(path).unwrap();
491/// }
492///
493/// # #[allow(unused)]
494/// fn teardown() {
495/// // Let's clean up this temporary file after we have used it
496/// std::fs::remove_file("file_from_setup_function.txt").unwrap();
497/// }
498///
499/// #[binary_benchmark]
500/// #[bench::just_a_fixture("benches/fixture.json")]
501/// // First big difference to library benchmarks! `my_setup` is not evaluated right away and the
502/// // return value of `simple_setup` is not used as input for the `bench_foo` function. Instead,
503/// // `simple_setup()` is executed before the execution of the `Command`.
504/// #[bench::with_other_fixture_and_setup(args = ("benches/other_fixture.txt"), setup = simple_setup())]
505/// // Here, setup is a function pointer, what tells us to route `args` to `setup` AND `bench_foo`
506/// #[bench::file_from_setup(args = ("file_from_setup_function.txt"), setup = create_file, teardown = teardown())]
507/// // Just an small example for the basic usage of the `#[benches]` attribute
508/// #[benches::multiple("benches/fix_1.txt", "benches/fix_2.txt")]
509/// // We're using a `BinaryBenchmarkConfig` in binary benchmarks to configure these benchmarks to
510/// // run in a sandbox.
511/// #[benches::multiple_with_config(
512/// args = ["benches/fix_1.txt", "benches/fix_2.txt"],
513/// config = BinaryBenchmarkConfig::default()
514/// .sandbox(Sandbox::new(true)
515/// .fixtures(["benches/fix_1.txt", "benches/fix_2.txt"])
516/// )
517/// )]
518/// // All functions annotated with `#[binary_benchmark]` need to return a `iai_callgrind::Command`
519/// fn bench_foo(path: &str) -> iai_callgrind::Command {
520/// let path = PathBuf::from(path);
521/// // We can put any code in here which is needed to configure the `Command`.
522/// let stdout = if path.extension().unwrap() == "txt" {
523/// iai_callgrind::Stdio::Inherit
524/// } else {
525/// iai_callgrind::Stdio::File(path.with_extension("out"))
526/// };
527/// // Configure the command depending on the arguments passed to this function and the code
528/// // above
529/// iai_callgrind::Command::new(env!("CARGO_BIN_EXE_my-foo"))
530/// .stdout(stdout)
531/// .arg(path)
532/// .build()
533/// }
534/// # fn main() {
535/// # // To avoid the unused warning
536/// # let _ = (bench_foo::__BENCHES[0].func)();
537/// # }
538/// ```
539#[proc_macro_attribute]
540#[proc_macro_error]
541pub fn binary_benchmark(args: TokenStream, input: TokenStream) -> TokenStream {
542 match bin_bench::render(args.into(), input.into()) {
543 Ok(stream) => stream.into(),
544 Err(error) => error.to_compile_error().into(),
545 }
546}
547
548/// For internal use only.
549///
550/// The old `macro_rules! impl_traits` was easy to overlook in the source code files and this derive
551/// macro is just a much nicer way to do the same.
552///
553/// We use this derive macro to spare us the manual implementation of
554///
555/// * `From<Outer> for Inner`
556/// * `From<&Outer> for Inner` (which clones the value)
557/// * `From<&mut Outer> for Inner` (which also just clones the value)
558///
559/// for our builder tuple structs which wrap the inner type from the iai-callgrind-runner api. So,
560/// our builders don't need a build method, which is just cool.
561#[proc_macro_derive(IntoInner)]
562#[proc_macro_error]
563pub fn into_inner(item: TokenStream) -> TokenStream {
564 match derive_macros::render_into_inner(item.into()) {
565 Ok(stream) => stream.into(),
566 Err(error) => error.to_compile_error().into(),
567 }
568}