cachelito_macros/
lib.rs

1use proc_macro::TokenStream;
2use proc_macro2::TokenStream as TokenStream2;
3use quote::{format_ident, quote};
4use syn::{parse_macro_input, FnArg, ItemFn, ReturnType};
5
6// Import shared utilities
7use cachelito_macro_utils::{
8    generate_key_expr_with_cacheable_key, parse_sync_attributes, SyncCacheAttributes,
9};
10
11/// Parse macro attributes from the attribute token stream
12fn parse_attributes(attr: TokenStream) -> SyncCacheAttributes {
13    let attr_stream: TokenStream2 = attr.into();
14    match parse_sync_attributes(attr_stream) {
15        Ok(attrs) => attrs,
16        Err(err) => {
17            // Return default attributes with the error embedded
18            // This will cause a compile error with a helpful message
19            panic!("Failed to parse attributes: {}", err);
20        }
21    }
22}
23
24/// Generate the appropriate insert call based on memory configuration and result type
25fn generate_insert_call(has_max_memory: bool, is_result: bool) -> TokenStream2 {
26    if has_max_memory {
27        // Use memory-aware insert methods when max_memory is configured
28        if is_result {
29            quote! { __cache.insert_result_with_memory(&__key, &__result); }
30        } else {
31            quote! { __cache.insert_with_memory(&__key, __result.clone()); }
32        }
33    } else {
34        // Use regular insert methods when max_memory is None
35        if is_result {
36            quote! { __cache.insert_result(&__key, &__result); }
37        } else {
38            quote! { __cache.insert(&__key, __result.clone()); }
39        }
40    }
41}
42
43/// Generate the thread-local cache branch
44fn generate_thread_local_branch(
45    cache_ident: &syn::Ident,
46    order_ident: &syn::Ident,
47    ret_type: &TokenStream2,
48    limit_expr: &TokenStream2,
49    max_memory_expr: &TokenStream2,
50    policy_expr: &TokenStream2,
51    ttl_expr: &TokenStream2,
52    frequency_weight_expr: &TokenStream2,
53    window_ratio_expr: &TokenStream2,
54    sketch_width_expr: &TokenStream2,
55    sketch_depth_expr: &TokenStream2,
56    decay_interval_expr: &TokenStream2,
57    key_expr: &TokenStream2,
58    block: &syn::Block,
59    is_result: bool,
60    invalidate_on: &Option<syn::Path>,
61    cache_if: &Option<syn::Path>,
62) -> TokenStream2 {
63    // Check if max_memory is None by comparing the token stream
64    let has_max_memory = has_max_memory(max_memory_expr);
65
66    let invalidation_check = generate_invalidation_check(invalidate_on);
67    let cache_condition = generate_cache_condition(cache_if, has_max_memory, is_result);
68
69    quote! {
70        thread_local! {
71            static #cache_ident: RefCell<std::collections::HashMap<String, CacheEntry<#ret_type>>> = RefCell::new(std::collections::HashMap::new());
72            static #order_ident: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
73        }
74
75        let __cache = ThreadLocalCache::<#ret_type>::new(
76            &#cache_ident,
77            &#order_ident,
78            #limit_expr,
79            #max_memory_expr,
80            #policy_expr,
81            #ttl_expr,
82            #frequency_weight_expr,
83            #window_ratio_expr,
84            #sketch_width_expr,
85            #sketch_depth_expr,
86            #decay_interval_expr
87        );
88
89        let __key = #key_expr;
90
91        if let Some(cached) = __cache.get(&__key) {
92            #invalidation_check
93        }
94
95        let __result = (|| #block)();
96        #cache_condition
97        __result
98    }
99}
100/// Check if max_memory is None by comparing the token stream
101fn has_max_memory(max_memory_expr: &TokenStream2) -> bool {
102    let max_memory_str = max_memory_expr.to_string();
103    let has_max_memory = !max_memory_str.contains("None");
104    has_max_memory
105}
106
107/// Generate invalidation check code if an invalidate_on function is specified
108fn generate_invalidation_check(invalidate_on: &Option<syn::Path>) -> TokenStream2 {
109    if let Some(pred_fn) = invalidate_on {
110        quote! {
111            // Validate cached value with invalidate_on function
112            // If function returns true, entry is stale - don't use it, re-execute
113            if !#pred_fn(&__key, &cached) {
114                // Function returned false, entry is valid
115                return cached;
116            }
117            // If function returned true, entry is stale/invalid - fall through to re-execute and refresh cache
118        }
119    } else {
120        quote! {
121            return cached;
122        }
123    }
124}
125
126/// Generate cache condition check code if a cache_if function is specified
127fn generate_cache_condition(
128    cache_if: &Option<syn::Path>,
129    has_max_memory: bool,
130    is_result: bool,
131) -> TokenStream2 {
132    let insert_call = generate_insert_call(has_max_memory, is_result);
133
134    if let Some(pred_fn) = cache_if {
135        quote! {
136            // Check if result should be cached using cache_if function
137            // Only cache if function returns true
138            if #pred_fn(&__key, &__result) {
139                #insert_call
140            }
141        }
142    } else {
143        // Always cache if no predicate is specified (default behavior)
144        insert_call
145    }
146}
147
148/// Generate the global cache branch
149fn generate_global_branch(
150    cache_ident: &syn::Ident,
151    order_ident: &syn::Ident,
152    stats_ident: &syn::Ident,
153    ret_type: &TokenStream2,
154    limit_expr: &TokenStream2,
155    max_memory_expr: &TokenStream2,
156    policy_expr: &TokenStream2,
157    ttl_expr: &TokenStream2,
158    frequency_weight_expr: &TokenStream2,
159    window_ratio_expr: &TokenStream2,
160    sketch_width_expr: &TokenStream2,
161    sketch_depth_expr: &TokenStream2,
162    decay_interval_expr: &TokenStream2,
163    key_expr: &TokenStream2,
164    block: &syn::Block,
165    fn_name_str: &str,
166    is_result: bool,
167    attrs: &SyncCacheAttributes,
168) -> TokenStream2 {
169    // ...existing code...
170    let has_max_memory = has_max_memory(max_memory_expr);
171
172    let invalidation_check = generate_invalidation_check(&attrs.invalidate_on);
173    let cache_condition = generate_cache_condition(&attrs.cache_if, has_max_memory, is_result);
174
175    // ...existing code...
176
177    let invalidation_registration = if !attrs.tags.is_empty()
178        || !attrs.events.is_empty()
179        || !attrs.dependencies.is_empty()
180    {
181        // ...existing code...
182        let tags = &attrs.tags;
183        let events = &attrs.events;
184        let deps = &attrs.dependencies;
185
186        quote! {
187            // Register invalidation metadata
188            {
189                use std::sync::Once;
190                static INVALIDATION_REGISTER_ONCE: Once = Once::new();
191                INVALIDATION_REGISTER_ONCE.call_once(|| {
192                    let metadata = cachelito_core::InvalidationMetadata::new(
193                        vec![#(#tags.to_string()),*],
194                        vec![#(#events.to_string()),*],
195                        vec![#(#deps.to_string()),*],
196                    );
197                    cachelito_core::InvalidationRegistry::global().register(#fn_name_str, metadata);
198
199                    // Register invalidation callback
200                    cachelito_core::InvalidationRegistry::global().register_callback(
201                        #fn_name_str,
202                        move || {
203                            #cache_ident.write().clear();
204                            #order_ident.lock().clear();
205                        }
206                    );
207                });
208            }
209        }
210    } else {
211        quote! {}
212    };
213
214    // ...existing code...
215    let invalidation_callback_registration = quote! {
216        // Register callback for runtime invalidation checks
217        {
218            use std::sync::Once;
219            static INVALIDATION_CALLBACK_REGISTER_ONCE: Once = Once::new();
220            INVALIDATION_CALLBACK_REGISTER_ONCE.call_once(|| {
221                cachelito_core::InvalidationRegistry::global().register_invalidation_callback(
222                    #fn_name_str,
223                    move |check_fn: &dyn Fn(&str) -> bool| {
224                        let mut map_write = #cache_ident.write();
225                        let mut order_write = #order_ident.lock();
226
227                        // Collect keys to remove based on check function
228                        let keys_to_remove: Vec<String> = map_write
229                            .keys()
230                            .filter(|k| check_fn(k.as_str()))
231                            .cloned()
232                            .collect();
233
234                        // Remove matched keys
235                        for key in &keys_to_remove {
236                            map_write.remove(key);
237                            if let Some(pos) = order_write.iter().position(|k| k == key) {
238                                order_write.remove(pos);
239                            }
240                        }
241                    }
242                );
243            });
244        }
245    };
246
247    quote! {
248        // ...existing code...
249        static #cache_ident: once_cell::sync::Lazy<parking_lot::RwLock<std::collections::HashMap<String, CacheEntry<#ret_type>>>> =
250            once_cell::sync::Lazy::new(|| parking_lot::RwLock::new(std::collections::HashMap::new()));
251        static #order_ident: once_cell::sync::Lazy<parking_lot::Mutex<VecDeque<String>>> =
252            once_cell::sync::Lazy::new(|| parking_lot::Mutex::new(VecDeque::new()));
253
254        #[cfg(feature = "stats")]
255        static #stats_ident: once_cell::sync::Lazy<cachelito_core::CacheStats> =
256            once_cell::sync::Lazy::new(|| cachelito_core::CacheStats::new());
257
258        #[cfg(feature = "stats")]
259        {
260            use std::sync::Once;
261            static REGISTER_ONCE: Once = Once::new();
262            REGISTER_ONCE.call_once(|| {
263                cachelito_core::stats_registry::register(#fn_name_str, &#stats_ident);
264            });
265        }
266
267        #invalidation_registration
268        #invalidation_callback_registration
269
270        #[cfg(feature = "stats")]
271        let __cache = GlobalCache::<#ret_type>::new(
272            &#cache_ident,
273            &#order_ident,
274            #limit_expr,
275            #max_memory_expr,
276            #policy_expr,
277            #ttl_expr,
278            #frequency_weight_expr,
279            #window_ratio_expr,
280            #sketch_width_expr,
281            #sketch_depth_expr,
282            #decay_interval_expr,
283            &#stats_ident,
284        );
285        #[cfg(not(feature = "stats"))]
286        let __cache = GlobalCache::<#ret_type>::new(
287            &#cache_ident,
288            &#order_ident,
289            #limit_expr,
290            #max_memory_expr,
291            #policy_expr,
292            #ttl_expr,
293            #frequency_weight_expr,
294            #window_ratio_expr,
295            #sketch_width_expr,
296            #sketch_depth_expr,
297            #decay_interval_expr,
298        );
299
300        let __key = #key_expr;
301        if let Some(cached) = __cache.get(&__key) {
302            #invalidation_check
303        }
304
305        let __result = (|| #block)();
306        #cache_condition
307        __result
308    }
309}
310
311/// A procedural macro that adds automatic memoization to functions and methods.
312///
313/// This macro transforms a function into a cached version that stores results
314/// in a thread-local HashMap based on the function arguments. Subsequent calls
315/// with the same arguments will return the cached result instead of re-executing
316/// the function body.
317///
318/// # Requirements
319///
320/// - **Arguments**: Must implement `CacheableKey` (or `DefaultCacheableKey` + `Debug`)
321/// - **Return type**: Must implement `Clone` for cache storage and retrieval
322/// - **Function purity**: For correct behavior, the function should be pure
323///   (same inputs always produce same outputs with no side effects)
324///
325/// # Macro Parameters
326///
327/// - `limit` (optional): Maximum number of entries in the cache. When the limit is reached,
328///   entries are evicted according to the specified policy. Default: unlimited.
329/// - `max_memory` (optional): Maximum memory size for the cache (e.g., `"100MB"`, `"1GB"`).
330///   When specified, entries are evicted based on memory usage. Requires implementing
331///   `MemoryEstimator` trait for cached types. Default: None (no memory limit).
332/// - `policy` (optional): Eviction policy to use when the cache is full. Options:
333///   - `"fifo"` - First In, First Out (default)
334///   - `"lru"` - Least Recently Used
335///   - `"lfu"` - Least Frequently Used
336///   - `"arc"` - Adaptive Replacement Cache (hybrid LRU/LFU)
337///   - `"random"` - Random Replacement
338///   - `"tlru"` - Time-aware Least Recently Used (combines recency, frequency, and age)
339///   - `"w_tinylfu"` - Windowed Tiny LFU (two-segment cache with window and protected segments)
340/// - `ttl` (optional): Time-to-live in seconds. Entries older than this will be
341///   automatically removed when accessed. Default: None (no expiration).
342/// - `frequency_weight` (optional): Weight factor for frequency in TLRU policy.
343///   Controls the balance between recency and frequency in eviction decisions.
344///   - Values < 1.0: Emphasize recency and age over frequency (good for time-sensitive data)
345///   - Value = 1.0 (or omitted): Balanced approach (default TLRU behavior)
346///   - Values > 1.0: Emphasize frequency over recency (good for popular content)
347///   - Formula: `score = frequency^weight × position × age_factor`
348///   - Only applicable when `policy = "tlru"`. Ignored for other policies.
349///   - Example: `frequency_weight = 1.5` makes frequently accessed entries more resistant to eviction
350/// - `window_ratio` (optional): Window segment size ratio for W-TinyLFU policy (0.01-0.99, default: 0.20).
351///   Controls the balance between recency (window segment) and frequency (protected segment).
352///   - Values < 0.2 (e.g., 0.1): Emphasize frequency → good for stable workloads, analytics
353///   - Value = 0.2 (default): Balanced approach
354///   - Values > 0.2 (e.g., 0.3-0.4): Emphasize recency → good for trending content, news
355///   - Only applicable when `policy = "w_tinylfu"`. Ignored for other policies.
356/// - `sketch_width` (optional): Count-Min Sketch width for W-TinyLFU (reserved for future use, v0.17.0+).
357/// - `sketch_depth` (optional): Count-Min Sketch depth for W-TinyLFU (reserved for future use, v0.17.0+).
358/// - `decay_interval` (optional): Decay interval for W-TinyLFU counters (reserved for future use, v0.17.0+)
359/// - `scope` (optional): Cache scope - where the cache is stored. Options:
360///   - `"global"` - Global storage shared across all threads (default, uses RwLock)
361///   - `"thread"` - Thread-local storage (no synchronization overhead)
362/// - `name` (optional): Custom identifier for the cache in the statistics registry.
363///   Default: the function name. Useful when you want a more descriptive name or
364///   when caching multiple versions of a function. Only relevant with `stats` feature.
365/// - `tags` (optional): Array of tags for invalidation grouping (e.g., `tags = ["user_data", "profile"]`).
366///   Enables tag-based cache invalidation. Only relevant with `scope = "global"`.
367/// - `events` (optional): Array of event names that trigger invalidation (e.g., `events = ["user_updated"]`).
368///   Enables event-driven cache invalidation. Only relevant with `scope = "global"`.
369/// - `dependencies` (optional): Array of cache names this cache depends on (e.g., `dependencies = ["get_user"]`).
370///   When dependencies are invalidated, this cache is also invalidated. Only relevant with `scope = "global"`.
371/// - `invalidate_on` (optional): Function that checks if a cached entry should be invalidated.
372///   Signature: `fn(key: &String, value: &T) -> bool`. Return `true` to invalidate.
373///   The check runs on every cache access. Example: `invalidate_on = is_stale`.
374/// - `cache_if` (optional): Function that determines if a result should be cached.
375///   Signature: `fn(key: &String, value: &T) -> bool`. Return `true` to cache the result.
376///   The check runs after computing the result but before caching it. Example: `cache_if = should_cache`.
377///   When not specified, all results are cached (default behavior).
378///
379/// # Cache Behavior
380///
381/// - **Regular functions**: All results are cached
382/// - **Result-returning functions**: Only `Ok` values are cached, `Err` values are not
383/// - **Thread-local storage** (default): Each thread maintains its own independent cache
384/// - **Global storage**: With `scope = "global"`, cache is shared across all threads
385/// - **Methods**: Works with `self`, `&self`, and `&mut self` parameters
386/// - **Eviction**: When limit is reached, entries are removed according to the policy
387/// - **Expiration**: When TTL is set, expired entries are removed on access
388///
389/// # Examples
390///
391/// ## Basic Function Caching (Unlimited)
392///
393/// ```ignore
394/// use cachelito::cache;
395///
396/// #[cache]
397/// fn fibonacci(n: u32) -> u64 {
398///     if n <= 1 {
399///         return n as u64;
400///     }
401///     fibonacci(n - 1) + fibonacci(n - 2)
402/// }
403///
404/// // First call computes and caches the result
405/// let result1 = fibonacci(10);
406/// // Subsequent calls return cached result (instant)
407/// let result2 = fibonacci(10);
408/// ```
409///
410/// ## Cache with Limit and FIFO Policy (Default)
411///
412/// ```ignore
413/// use cachelito::cache;
414///
415/// #[cache(limit = 100)]
416/// fn expensive_computation(x: i32) -> i32 {
417///     // Cache will hold at most 100 entries
418///     // Oldest entries are evicted first (FIFO)
419///     x * x
420/// }
421/// ```
422///
423/// ## Cache with Limit and LRU Policy
424///
425/// ```ignore
426/// use cachelito::cache;
427///
428/// #[cache(limit = 100, policy = "lru")]
429/// fn expensive_computation(x: i32) -> i32 {
430///     // Cache will hold at most 100 entries
431///     // Least recently used entries are evicted first
432///     x * x
433/// }
434/// ```
435///
436/// ## Cache with TTL (Time To Live)
437///
438/// ```ignore
439/// use cachelito::cache;
440///
441/// #[cache(ttl = 60)]
442/// fn fetch_user_data(user_id: u32) -> UserData {
443///     // Cache expires after 60 seconds
444///     // Expired entries are automatically removed
445///     fetch_from_database(user_id)
446/// }
447/// ```
448///
449/// ## Combining All Features
450///
451/// ```ignore
452/// use cachelito::cache;
453///
454/// #[cache(limit = 50, policy = "lru", ttl = 300)]
455/// fn api_call(endpoint: &str) -> Result<Response, Error> {
456///     // - Max 50 entries
457///     // - LRU eviction
458///     // - 5 minute TTL
459///     // - Only Ok values cached
460///     make_http_request(endpoint)
461/// }
462/// ```
463///
464/// ## Method Caching
465///
466/// ```ignore
467/// use cachelito::cache;
468///
469/// #[derive(Debug, Clone)]
470/// struct Calculator;
471///
472/// impl Calculator {
473///     #[cache(limit = 50, policy = "lru", ttl = 60)]
474///     fn compute(&self, x: f64, y: f64) -> f64 {
475///         x.powf(y)
476///     }
477/// }
478/// ```
479///
480/// ## Result Type Caching (Errors NOT Cached)
481///
482/// ```ignore
483/// use cachelito::cache;
484///
485/// #[cache(limit = 10, ttl = 30)]
486/// fn divide(a: i32, b: i32) -> Result<i32, String> {
487///     if b == 0 {
488///         Err("Division by zero".to_string())
489///     } else {
490///         Ok(a / b)
491///     }
492/// }
493/// ```
494///
495/// ## Global Scope Cache (Shared Across Threads)
496///
497/// ```ignore
498/// use cachelito::cache;
499///
500/// // Global cache (default) - shared across all threads
501/// #[cache(limit = 100)]
502/// fn global_computation(x: i32) -> i32 {
503///     // Cache IS shared across all threads
504///     // Uses RwLock for thread-safe access
505///     x * x
506/// }
507///
508/// // Thread-local cache - each thread has its own cache
509/// #[cache(limit = 100, scope = "thread")]
510/// fn thread_local_computation(x: i32) -> i32 {
511///     // Cache is NOT shared across threads
512///     x * x
513/// }
514/// ```
515///
516/// ## Custom Cache Name for Statistics
517///
518/// ```ignore
519/// use cachelito::cache;
520///
521/// // Use a custom name for the cache in the statistics registry
522/// #[cache(scope = "global", name = "user_api_v1")]
523/// fn fetch_user(id: u32) -> User {
524///     // The cache will be registered as "user_api_v1" instead of "fetch_user"
525///     api_call(id)
526/// }
527///
528/// #[cache(scope = "global", name = "user_api_v2")]
529/// fn fetch_user_v2(id: u32) -> UserV2 {
530///     // Different cache with its own statistics
531///     new_api_call(id)
532/// }
533///
534/// // Access statistics using the custom name
535/// #[cfg(feature = "stats")]
536/// {
537///     if let Some(stats) = cachelito::stats_registry::get("user_api_v1") {
538///         println!("V1 hit rate: {:.2}%", stats.hit_rate() * 100.0);
539///     }
540///     if let Some(stats) = cachelito::stats_registry::get("user_api_v2") {
541///         println!("V2 hit rate: {:.2}%", stats.hit_rate() * 100.0);
542///     }
543/// }
544/// ```
545///
546/// ## TLRU with Custom Frequency Weight
547///
548/// ```ignore
549/// use cachelito::cache;
550///
551/// // Low frequency_weight (0.3) - emphasizes recency and age
552/// // Good for time-sensitive data where freshness matters more than popularity
553/// #[cache(
554///     policy = "tlru",
555///     limit = 100,
556///     ttl = 300,
557///     frequency_weight = 0.3
558/// )]
559/// fn fetch_realtime_data(source: String) -> Data {
560///     // Fetch time-sensitive data
561///     // Recent entries are preferred even if less frequently accessed
562///     api_client.fetch(source)
563/// }
564///
565/// // High frequency_weight (1.5) - emphasizes access frequency
566/// // Good for popular content that should stay cached despite age
567/// #[cache(
568///     policy = "tlru",
569///     limit = 100,
570///     ttl = 300,
571///     frequency_weight = 1.5
572/// )]
573/// fn fetch_popular_content(id: u64) -> Content {
574///     // Frequently accessed entries remain cached longer
575///     // Popular content is protected from eviction
576///     database.fetch_content(id)
577/// }
578///
579/// // Default behavior (balanced) - omit frequency_weight
580/// #[cache(policy = "tlru", limit = 100, ttl = 300)]
581/// fn fetch_balanced(key: String) -> Value {
582///     // Balanced approach between recency and frequency
583///     // Neither recency nor frequency dominates eviction decisions
584///     expensive_operation(key)
585/// }
586/// ```
587///
588/// ## W-TinyLFU with Custom Window Ratio
589///
590/// ```ignore
591/// use cachelito::cache;
592///
593/// // Basic W-TinyLFU - default window_ratio (0.2 = 20%)
594/// #[cache(limit = 1000, policy = "w_tinylfu")]
595/// fn fetch_user_data(user_id: u64) -> UserData {
596///     // Window segment (20%): Recent items using FIFO
597///     // Protected segment (80%): Frequently accessed items using LFU
598///     // Excellent hit rates on mixed workloads
599///     database.fetch_user(user_id)
600/// }
601///
602/// // Large window_ratio (0.3) - emphasizes recency
603/// // Good for frequently changing data like news or social media
604/// #[cache(
605///     limit = 1000,
606///     policy = "w_tinylfu",
607///     window_ratio = 0.3
608/// )]
609/// fn fetch_trending_content(content_id: u64) -> Content {
610///     // 30% window segment = more emphasis on recent items
611///     // Good for: news, social media feeds, trending topics
612///     api_client.fetch_trending(content_id)
613/// }
614///
615/// // Small window_ratio (0.1) - emphasizes frequency
616/// // Good for stable data with clear access patterns
617/// #[cache(
618///     limit = 1000,
619///     policy = "w_tinylfu",
620///     window_ratio = 0.1
621/// )]
622/// fn fetch_analytics_query(query_id: u64) -> QueryResult {
623///     // 10% window segment = strong frequency protection
624///     // Good for: analytics, reference data, stable workloads
625///     run_expensive_query(query_id)
626/// }
627/// ```
628///
629/// # Performance Considerations
630///
631/// - **Cache key generation**: Uses `CacheableKey::to_cache_key()` method
632/// - **Thread-local storage**: Each thread has its own cache (no locks needed)
633/// - **Global storage**: With `scope = "global"`, uses `parking_lot::RwLock` for concurrent reads
634/// - **Memory usage**: Controlled by `limit` and/or `max_memory` parameters
635/// - **FIFO overhead**: O(1) for all operations
636/// - **LRU overhead**: O(n) for cache hits (reordering), O(1) for misses and evictions
637/// - **LFU overhead**: O(n) for eviction (finding minimum frequency)
638/// - **ARC overhead**: O(n) for cache operations (scoring and reordering)
639/// - **Random overhead**: O(1) for eviction selection
640/// - **TLRU overhead**: O(n) for cache operations (scoring with frequency, position, and age)
641/// - **W-TinyLFU overhead**: O(n) for eviction (segment management and LFU in protected segment)
642/// - **TTL overhead**: O(1) expiration check on each get()
643/// - **Memory estimation**: O(1) if `MemoryEstimator` is implemented efficiently
644///
645#[proc_macro_attribute]
646pub fn cache(attr: TokenStream, item: TokenStream) -> TokenStream {
647    // Parse macro attributes
648    let attrs = parse_attributes(attr);
649
650    // Parse function
651    let input = parse_macro_input!(item as ItemFn);
652    let vis = &input.vis;
653    let sig = &input.sig;
654    let ident = &sig.ident;
655    let block = &input.block;
656
657    // Extract return type
658    let ret_type = match &sig.output {
659        ReturnType::Type(_, ty) => quote! { #ty },
660        ReturnType::Default => quote! { () },
661    };
662
663    // Parse arguments and detect self
664    let mut arg_pats = Vec::new();
665    let mut has_self = false;
666    for arg in sig.inputs.iter() {
667        match arg {
668            FnArg::Receiver(_) => has_self = true,
669            FnArg::Typed(pat_type) => {
670                let pat = &pat_type.pat;
671                arg_pats.push(quote! { #pat });
672            }
673        }
674    }
675
676    // Generate unique identifiers for static storage
677    let cache_ident = format_ident!(
678        "GLOBAL_OR_THREAD_CACHE_{}",
679        ident.to_string().to_uppercase()
680    );
681    let order_ident = format_ident!(
682        "GLOBAL_OR_THREAD_ORDER_{}",
683        ident.to_string().to_uppercase()
684    );
685    let stats_ident = format_ident!(
686        "GLOBAL_OR_THREAD_STATS_{}",
687        ident.to_string().to_uppercase()
688    );
689
690    // Generate cache key expression
691    let key_expr = generate_key_expr_with_cacheable_key(has_self, &arg_pats);
692
693    // Detect Result type
694    let is_result = {
695        let s = quote!(#ret_type).to_string().replace(' ', "");
696        s.starts_with("Result<") || s.starts_with("std::result::Result<")
697    };
698
699    // Use custom name if provided, otherwise use function name
700    let fn_name_str = attrs
701        .custom_name
702        .clone()
703        .unwrap_or_else(|| ident.to_string());
704
705    // Generate thread-local and global cache branches
706    let thread_local_branch = generate_thread_local_branch(
707        &cache_ident,
708        &order_ident,
709        &ret_type,
710        &attrs.limit,
711        &attrs.max_memory,
712        &attrs.policy,
713        &attrs.ttl,
714        &attrs.frequency_weight,
715        &attrs.window_ratio,
716        &attrs.sketch_width,
717        &attrs.sketch_depth,
718        &attrs.decay_interval,
719        &key_expr,
720        block,
721        is_result,
722        &attrs.invalidate_on,
723        &attrs.cache_if,
724    );
725
726    let global_branch = generate_global_branch(
727        &cache_ident,
728        &order_ident,
729        &stats_ident,
730        &ret_type,
731        &attrs.limit,
732        &attrs.max_memory,
733        &attrs.policy,
734        &attrs.ttl,
735        &attrs.frequency_weight,
736        &attrs.window_ratio,
737        &attrs.sketch_width,
738        &attrs.sketch_depth,
739        &attrs.decay_interval,
740        &key_expr,
741        block,
742        &fn_name_str,
743        is_result,
744        &attrs,
745    );
746
747    // Generate final expanded code
748    let scope_expr = &attrs.scope;
749    let expanded = quote! {
750        #vis #sig {
751            use ::std::collections::VecDeque;
752            use ::std::cell::RefCell;
753            use ::cachelito_core::{CacheEntry, CacheScope, ThreadLocalCache, GlobalCache, CacheableKey};
754
755            let __scope = #scope_expr;
756
757            if __scope == cachelito_core::CacheScope::ThreadLocal {
758                #thread_local_branch
759            } else {
760                #global_branch
761            }
762
763        }
764    };
765
766    TokenStream::from(expanded)
767}